|author||Ingo Molnar <email@example.com>||2017-08-21 09:45:19 +0200|
|committer||Ingo Molnar <firstname.lastname@example.org>||2017-08-21 09:45:19 +0200|
|parent||Merge branch 'for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu (diff)|
|parent||Merge branches 'doc.2017.08.17a', 'fixes.2017.08.17a', 'hotplug.2017.07.25b', 'misc.2017.08.17a', 'spin_unlock_wait_no.2017.08.17a', 'srcu.2017.07.27c' and 'torture.2017.07.24c' into HEAD (diff)|
Merge branch 'for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu
Pull RCU updates from Paul E. McKenney: - Removal of spin_unlock_wait() - SRCU updates - Torture-test updates - Documentation updates - Miscellaneous fixes - CPU-hotplug fixes - Miscellaneous non-RCU fixes Signed-off-by: Ingo Molnar <email@example.com>
Diffstat (limited to 'arch/arm64/include')
1 files changed, 5 insertions, 53 deletions
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index cae331d553f8..f445bd7f2b9f 100644
@@ -26,58 +26,6 @@
* The memory barriers are implicit with the load-acquire and store-release
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
- unsigned int tmp;
- arch_spinlock_t lockval;
- u32 owner;
- * Ensure prior spin_lock operations to other locks have completed
- * on this CPU before we test whether "lock" is locked.
- owner = READ_ONCE(lock->owner) << 16;
- asm volatile(
-"2: ldaxr %w0, %2\n"
- /* Is the lock free? */
-" eor %w1, %w0, %w0, ror #16\n"
-" cbz %w1, 3f\n"
- /* Lock taken -- has there been a subsequent unlock->lock transition? */
-" eor %w1, %w3, %w0, lsl #16\n"
-" cbz %w1, 1b\n"
- * The owner has been updated, so there was an unlock->lock
- * transition that we missed. That means we can rely on the
- * store-release of the unlock operation paired with the
- * load-acquire of the lock operation to publish any of our
- * previous stores to the new lock owner and therefore don't
- * need to bother with the writeback below.
-" b 4f\n"
- * Serialise against any concurrent lockers by writing back the
- * unlocked lock value
- /* LL/SC */
-" stxr %w1, %w0, %2\n"
- /* LSE atomics */
-" mov %w1, %w0\n"
-" cas %w0, %w0, %2\n"
-" eor %w1, %w1, %w0\n")
- /* Somebody else wrote to the lock, GOTO 10 and reload the value */
-" cbnz %w1, 2b\n"
- : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
- : "r" (owner)
- : "memory");
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
@@ -176,7 +124,11 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
- smp_mb(); /* See arch_spin_unlock_wait */
+ * Ensure prior spin_lock operations to other locks have completed
+ * on this CPU before we test whether "lock" is locked.
+ smp_mb(); /* ^^^ */