aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm/spinlock.h
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2015-11-19 17:48:31 +0000
committerWill Deacon <will.deacon@arm.com>2015-12-03 19:37:59 +0000
commitd86b8da04dfa4771a68bdbad6c424d40f22f0d14 (patch)
tree67c9c634056117222d7387a06259ba1f807f9ad8 /arch/arm64/include/asm/spinlock.h
parentarm64: enable HAVE_IRQ_TIME_ACCOUNTING (diff)
downloadlinux-dev-d86b8da04dfa4771a68bdbad6c424d40f22f0d14.tar.xz
linux-dev-d86b8da04dfa4771a68bdbad6c424d40f22f0d14.zip
arm64: spinlock: serialise spin_unlock_wait against concurrent lockers
Boqun Feng reported a rather nasty ordering issue with spin_unlock_wait on architectures implementing spin_lock with LL/SC sequences and acquire semantics: | CPU 1 CPU 2 CPU 3 | ================== ==================== ============== | spin_unlock(&lock); | spin_lock(&lock): | r1 = *lock; // r1 == 0; | o = READ_ONCE(object); // reordered here | object = NULL; | smp_mb(); | spin_unlock_wait(&lock); | *lock = 1; | smp_mb(); | o->dead = true; | if (o) // true | BUG_ON(o->dead); // true!! The crux of the problem is that spin_unlock_wait(&lock) can return on CPU 1 whilst CPU 2 is in the process of taking the lock. This can be resolved by upgrading spin_unlock_wait to a LOCK operation, forcing it to serialise against a concurrent locker and giving it acquire semantics in the process (although it is not at all clear whether this is needed - different callers seem to assume different things about the barrier semantics and architectures are similarly disjoint in their implementations of the macro). This patch implements spin_unlock_wait using an LL/SC sequence with acquire semantics on arm64. For v8.1 systems with the LSE atomics, the exclusive writeback is omitted, since the spin_lock operation is indivisible and no intermediate state can be observed. Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/include/asm/spinlock.h')
-rw-r--r--arch/arm64/include/asm/spinlock.h23
1 files changed, 21 insertions, 2 deletions
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index c85e96d174a5..fc9682bfe002 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -26,9 +26,28 @@
* The memory barriers are implicit with the load-acquire and store-release
* instructions.
*/
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ unsigned int tmp;
+ arch_spinlock_t lockval;
-#define arch_spin_unlock_wait(lock) \
- do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
+ asm volatile(
+" sevl\n"
+"1: wfe\n"
+"2: ldaxr %w0, %2\n"
+" eor %w1, %w0, %w0, ror #16\n"
+" cbnz %w1, 1b\n"
+ ARM64_LSE_ATOMIC_INSN(
+ /* LL/SC */
+" stxr %w1, %w0, %2\n"
+" cbnz %w1, 2b\n", /* Serialise against any concurrent lockers */
+ /* LSE atomics */
+" nop\n"
+" nop\n")
+ : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
+ :
+ : "memory");
+}
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)