From 625e88be1f41b53cec55827c984e4a89ea8ee9f9 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 26 Apr 2018 11:34:16 +0100 Subject: locking/qspinlock: Merge 'struct __qspinlock' into 'struct qspinlock' 'struct __qspinlock' provides a handy union of fields so that subcomponents of the lockword can be accessed by name, without having to manage shifts and masks explicitly and take endianness into account. This is useful in qspinlock.h and also potentially in arch headers, so move the 'struct __qspinlock' into 'struct qspinlock' and kill the extra definition. Signed-off-by: Will Deacon Acked-by: Peter Zijlstra (Intel) Acked-by: Waiman Long Acked-by: Boqun Feng Cc: Linus Torvalds Cc: Thomas Gleixner Cc: linux-arm-kernel@lists.infradead.org Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1524738868-31318-3-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar --- kernel/locking/qspinlock.c | 46 +++---------------------------------- kernel/locking/qspinlock_paravirt.h | 34 +++++++++------------------ 2 files changed, 14 insertions(+), 66 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index d880296245c5..f5b0e59f6d14 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -114,40 +114,6 @@ static inline __pure struct mcs_spinlock *decode_tail(u32 tail) #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK) -/* - * By using the whole 2nd least significant byte for the pending bit, we - * can allow better optimization of the lock acquisition for the pending - * bit holder. - * - * This internal structure is also used by the set_locked function which - * is not restricted to _Q_PENDING_BITS == 8. - */ -struct __qspinlock { - union { - atomic_t val; -#ifdef __LITTLE_ENDIAN - struct { - u8 locked; - u8 pending; - }; - struct { - u16 locked_pending; - u16 tail; - }; -#else - struct { - u16 tail; - u16 locked_pending; - }; - struct { - u8 reserved[2]; - u8 pending; - u8 locked; - }; -#endif - }; -}; - #if _Q_PENDING_BITS == 8 /** * clear_pending_set_locked - take ownership and clear the pending bit. @@ -159,9 +125,7 @@ struct __qspinlock { */ static __always_inline void clear_pending_set_locked(struct qspinlock *lock) { - struct __qspinlock *l = (void *)lock; - - WRITE_ONCE(l->locked_pending, _Q_LOCKED_VAL); + WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL); } /* @@ -176,13 +140,11 @@ static __always_inline void clear_pending_set_locked(struct qspinlock *lock) */ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) { - struct __qspinlock *l = (void *)lock; - /* * Use release semantics to make sure that the MCS node is properly * initialized before changing the tail code. */ - return (u32)xchg_release(&l->tail, + return (u32)xchg_release(&lock->tail, tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET; } @@ -237,9 +199,7 @@ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) */ static __always_inline void set_locked(struct qspinlock *lock) { - struct __qspinlock *l = (void *)lock; - - WRITE_ONCE(l->locked, _Q_LOCKED_VAL); + WRITE_ONCE(lock->locked, _Q_LOCKED_VAL); } diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h index 6ee477765e6c..2711940429f5 100644 --- a/kernel/locking/qspinlock_paravirt.h +++ b/kernel/locking/qspinlock_paravirt.h @@ -87,8 +87,6 @@ struct pv_node { #define queued_spin_trylock(l) pv_hybrid_queued_unfair_trylock(l) static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock) { - struct __qspinlock *l = (void *)lock; - /* * Stay in unfair lock mode as long as queued mode waiters are * present in the MCS wait queue but the pending bit isn't set. @@ -97,7 +95,7 @@ static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock) int val = atomic_read(&lock->val); if (!(val & _Q_LOCKED_PENDING_MASK) && - (cmpxchg_acquire(&l->locked, 0, _Q_LOCKED_VAL) == 0)) { + (cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) { qstat_inc(qstat_pv_lock_stealing, true); return true; } @@ -117,16 +115,12 @@ static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock) #if _Q_PENDING_BITS == 8 static __always_inline void set_pending(struct qspinlock *lock) { - struct __qspinlock *l = (void *)lock; - - WRITE_ONCE(l->pending, 1); + WRITE_ONCE(lock->pending, 1); } static __always_inline void clear_pending(struct qspinlock *lock) { - struct __qspinlock *l = (void *)lock; - - WRITE_ONCE(l->pending, 0); + WRITE_ONCE(lock->pending, 0); } /* @@ -136,10 +130,8 @@ static __always_inline void clear_pending(struct qspinlock *lock) */ static __always_inline int trylock_clear_pending(struct qspinlock *lock) { - struct __qspinlock *l = (void *)lock; - - return !READ_ONCE(l->locked) && - (cmpxchg_acquire(&l->locked_pending, _Q_PENDING_VAL, + return !READ_ONCE(lock->locked) && + (cmpxchg_acquire(&lock->locked_pending, _Q_PENDING_VAL, _Q_LOCKED_VAL) == _Q_PENDING_VAL); } #else /* _Q_PENDING_BITS == 8 */ @@ -384,7 +376,6 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev) static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node) { struct pv_node *pn = (struct pv_node *)node; - struct __qspinlock *l = (void *)lock; /* * If the vCPU is indeed halted, advance its state to match that of @@ -413,7 +404,7 @@ static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node) * the hash table later on at unlock time, no atomic instruction is * needed. */ - WRITE_ONCE(l->locked, _Q_SLOW_VAL); + WRITE_ONCE(lock->locked, _Q_SLOW_VAL); (void)pv_hash(lock, pn); } @@ -428,7 +419,6 @@ static u32 pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node) { struct pv_node *pn = (struct pv_node *)node; - struct __qspinlock *l = (void *)lock; struct qspinlock **lp = NULL; int waitcnt = 0; int loop; @@ -479,13 +469,13 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node) * * Matches the smp_rmb() in __pv_queued_spin_unlock(). */ - if (xchg(&l->locked, _Q_SLOW_VAL) == 0) { + if (xchg(&lock->locked, _Q_SLOW_VAL) == 0) { /* * The lock was free and now we own the lock. * Change the lock value back to _Q_LOCKED_VAL * and unhash the table. */ - WRITE_ONCE(l->locked, _Q_LOCKED_VAL); + WRITE_ONCE(lock->locked, _Q_LOCKED_VAL); WRITE_ONCE(*lp, NULL); goto gotlock; } @@ -493,7 +483,7 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node) WRITE_ONCE(pn->state, vcpu_hashed); qstat_inc(qstat_pv_wait_head, true); qstat_inc(qstat_pv_wait_again, waitcnt); - pv_wait(&l->locked, _Q_SLOW_VAL); + pv_wait(&lock->locked, _Q_SLOW_VAL); /* * Because of lock stealing, the queue head vCPU may not be @@ -518,7 +508,6 @@ gotlock: __visible void __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked) { - struct __qspinlock *l = (void *)lock; struct pv_node *node; if (unlikely(locked != _Q_SLOW_VAL)) { @@ -547,7 +536,7 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked) * Now that we have a reference to the (likely) blocked pv_node, * release the lock. */ - smp_store_release(&l->locked, 0); + smp_store_release(&lock->locked, 0); /* * At this point the memory pointed at by lock can be freed/reused, @@ -573,7 +562,6 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked) #ifndef __pv_queued_spin_unlock __visible void __pv_queued_spin_unlock(struct qspinlock *lock) { - struct __qspinlock *l = (void *)lock; u8 locked; /* @@ -581,7 +569,7 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock) * unhash. Otherwise it would be possible to have multiple @lock * entries, which would be BAD. */ - locked = cmpxchg_release(&l->locked, _Q_LOCKED_VAL, 0); + locked = cmpxchg_release(&lock->locked, _Q_LOCKED_VAL, 0); if (likely(locked == _Q_LOCKED_VAL)) return; -- cgit v1.2.3-59-g8ed1b From 6512276d97b160d90b53285bd06f7f201459a7e3 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 26 Apr 2018 11:34:17 +0100 Subject: locking/qspinlock: Bound spinning on pending->locked transition in slowpath If a locker taking the qspinlock slowpath reads a lock value indicating that only the pending bit is set, then it will spin whilst the concurrent pending->locked transition takes effect. Unfortunately, there is no guarantee that such a transition will ever be observed since concurrent lockers could continuously set pending and hand over the lock amongst themselves, leading to starvation. Whilst this would probably resolve in practice, it means that it is not possible to prove liveness properties about the lock and means that lock acquisition time is unbounded. Rather than removing the pending->locked spinning from the slowpath altogether (which has been shown to heavily penalise a 2-threaded locking stress test on x86), this patch replaces the explicit spinning with a call to atomic_cond_read_relaxed and allows the architecture to provide a bound on the number of spins. For architectures that can respond to changes in cacheline state in their smp_cond_load implementation, it should be sufficient to use the default bound of 1. Suggested-by: Waiman Long Signed-off-by: Will Deacon Acked-by: Peter Zijlstra (Intel) Acked-by: Waiman Long Cc: Linus Torvalds Cc: Thomas Gleixner Cc: boqun.feng@gmail.com Cc: linux-arm-kernel@lists.infradead.org Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1524738868-31318-4-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar --- kernel/locking/qspinlock.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index f5b0e59f6d14..a0f7976348f8 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -76,6 +76,18 @@ #define MAX_NODES 4 #endif +/* + * The pending bit spinning loop count. + * This heuristic is used to limit the number of lockword accesses + * made by atomic_cond_read_relaxed when waiting for the lock to + * transition out of the "== _Q_PENDING_VAL" state. We don't spin + * indefinitely because there's no guarantee that we'll make forward + * progress. + */ +#ifndef _Q_PENDING_LOOPS +#define _Q_PENDING_LOOPS 1 +#endif + /* * Per-CPU queue node structures; we can never have more than 4 nested * contexts: task, softirq, hardirq, nmi. @@ -266,13 +278,15 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) return; /* - * wait for in-progress pending->locked hand-overs + * Wait for in-progress pending->locked hand-overs with a bounded + * number of spins so that we guarantee forward progress. * * 0,1,0 -> 0,0,1 */ if (val == _Q_PENDING_VAL) { - while ((val = atomic_read(&lock->val)) == _Q_PENDING_VAL) - cpu_relax(); + int cnt = _Q_PENDING_LOOPS; + val = atomic_cond_read_relaxed(&lock->val, + (VAL != _Q_PENDING_VAL) || !cnt--); } /* -- cgit v1.2.3-59-g8ed1b From 59fb586b4a07b4e1a0ee577140ab4842ba451acd Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 26 Apr 2018 11:34:19 +0100 Subject: locking/qspinlock: Remove unbounded cmpxchg() loop from locking slowpath The qspinlock locking slowpath utilises a "pending" bit as a simple form of an embedded test-and-set lock that can avoid the overhead of explicit queuing in cases where the lock is held but uncontended. This bit is managed using a cmpxchg() loop which tries to transition the uncontended lock word from (0,0,0) -> (0,0,1) or (0,0,1) -> (0,1,1). Unfortunately, the cmpxchg() loop is unbounded and lockers can be starved indefinitely if the lock word is seen to oscillate between unlocked (0,0,0) and locked (0,0,1). This could happen if concurrent lockers are able to take the lock in the cmpxchg() loop without queuing and pass it around amongst themselves. This patch fixes the problem by unconditionally setting _Q_PENDING_VAL using atomic_fetch_or, and then inspecting the old value to see whether we need to spin on the current lock owner, or whether we now effectively hold the lock. The tricky scenario is when concurrent lockers end up queuing on the lock and the lock becomes available, causing us to see a lockword of (n,0,0). With pending now set, simply queuing could lead to deadlock as the head of the queue may not have observed the pending flag being cleared. Conversely, if the head of the queue did observe pending being cleared, then it could transition the lock from (n,0,0) -> (0,0,1) meaning that any attempt to "undo" our setting of the pending bit could race with a concurrent locker trying to set it. We handle this race by preserving the pending bit when taking the lock after reaching the head of the queue and leaving the tail entry intact if we saw pending set, because we know that the tail is going to be updated shortly. Signed-off-by: Will Deacon Acked-by: Peter Zijlstra (Intel) Acked-by: Waiman Long Cc: Linus Torvalds Cc: Thomas Gleixner Cc: boqun.feng@gmail.com Cc: linux-arm-kernel@lists.infradead.org Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1524738868-31318-6-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar --- kernel/locking/qspinlock.c | 102 ++++++++++++++++++++---------------- kernel/locking/qspinlock_paravirt.h | 5 -- 2 files changed, 58 insertions(+), 49 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index a0f7976348f8..e06f67e021d9 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -127,6 +127,17 @@ static inline __pure struct mcs_spinlock *decode_tail(u32 tail) #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK) #if _Q_PENDING_BITS == 8 +/** + * clear_pending - clear the pending bit. + * @lock: Pointer to queued spinlock structure + * + * *,1,* -> *,0,* + */ +static __always_inline void clear_pending(struct qspinlock *lock) +{ + WRITE_ONCE(lock->pending, 0); +} + /** * clear_pending_set_locked - take ownership and clear the pending bit. * @lock: Pointer to queued spinlock structure @@ -162,6 +173,17 @@ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) #else /* _Q_PENDING_BITS == 8 */ +/** + * clear_pending - clear the pending bit. + * @lock: Pointer to queued spinlock structure + * + * *,1,* -> *,0,* + */ +static __always_inline void clear_pending(struct qspinlock *lock) +{ + atomic_andnot(_Q_PENDING_VAL, &lock->val); +} + /** * clear_pending_set_locked - take ownership and clear the pending bit. * @lock: Pointer to queued spinlock structure @@ -266,7 +288,7 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock, void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) { struct mcs_spinlock *prev, *next, *node; - u32 new, old, tail; + u32 old, tail; int idx; BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); @@ -289,59 +311,51 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) (VAL != _Q_PENDING_VAL) || !cnt--); } + /* + * If we observe any contention; queue. + */ + if (val & ~_Q_LOCKED_MASK) + goto queue; + /* * trylock || pending * * 0,0,0 -> 0,0,1 ; trylock * 0,0,1 -> 0,1,1 ; pending */ - for (;;) { + val = atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val); + if (!(val & ~_Q_LOCKED_MASK)) { /* - * If we observe any contention; queue. + * We're pending, wait for the owner to go away. + * + * *,1,1 -> *,1,0 + * + * this wait loop must be a load-acquire such that we match the + * store-release that clears the locked bit and create lock + * sequentiality; this is because not all + * clear_pending_set_locked() implementations imply full + * barriers. */ - if (val & ~_Q_LOCKED_MASK) - goto queue; - - new = _Q_LOCKED_VAL; - if (val == new) - new |= _Q_PENDING_VAL; + if (val & _Q_LOCKED_MASK) { + smp_cond_load_acquire(&lock->val.counter, + !(VAL & _Q_LOCKED_MASK)); + } /* - * Acquire semantic is required here as the function may - * return immediately if the lock was free. + * take ownership and clear the pending bit. + * + * *,1,0 -> *,0,1 */ - old = atomic_cmpxchg_acquire(&lock->val, val, new); - if (old == val) - break; - - val = old; - } - - /* - * we won the trylock - */ - if (new == _Q_LOCKED_VAL) + clear_pending_set_locked(lock); return; + } /* - * we're pending, wait for the owner to go away. - * - * *,1,1 -> *,1,0 - * - * this wait loop must be a load-acquire such that we match the - * store-release that clears the locked bit and create lock - * sequentiality; this is because not all clear_pending_set_locked() - * implementations imply full barriers. - */ - smp_cond_load_acquire(&lock->val.counter, !(VAL & _Q_LOCKED_MASK)); - - /* - * take ownership and clear the pending bit. - * - * *,1,0 -> *,0,1 + * If pending was clear but there are waiters in the queue, then + * we need to undo our setting of pending before we queue ourselves. */ - clear_pending_set_locked(lock); - return; + if (!(val & _Q_PENDING_MASK)) + clear_pending(lock); /* * End of pending bit optimistic spinning and beginning of MCS @@ -445,15 +459,15 @@ locked: * claim the lock: * * n,0,0 -> 0,0,1 : lock, uncontended - * *,0,0 -> *,0,1 : lock, contended + * *,*,0 -> *,*,1 : lock, contended * - * If the queue head is the only one in the queue (lock value == tail), - * clear the tail code and grab the lock. Otherwise, we only need - * to grab the lock. + * If the queue head is the only one in the queue (lock value == tail) + * and nobody is pending, clear the tail code and grab the lock. + * Otherwise, we only need to grab the lock. */ for (;;) { /* In the PV case we might already have _Q_LOCKED_VAL set */ - if ((val & _Q_TAIL_MASK) != tail) { + if ((val & _Q_TAIL_MASK) != tail || (val & _Q_PENDING_MASK)) { set_locked(lock); break; } diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h index 2711940429f5..2dbad2f25480 100644 --- a/kernel/locking/qspinlock_paravirt.h +++ b/kernel/locking/qspinlock_paravirt.h @@ -118,11 +118,6 @@ static __always_inline void set_pending(struct qspinlock *lock) WRITE_ONCE(lock->pending, 1); } -static __always_inline void clear_pending(struct qspinlock *lock) -{ - WRITE_ONCE(lock->pending, 0); -} - /* * The pending bit check in pv_queued_spin_steal_lock() isn't a memory * barrier. Therefore, an atomic cmpxchg_acquire() is used to acquire the -- cgit v1.2.3-59-g8ed1b From c61da58d8a9ba9238250a548f00826eaf44af0f7 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 26 Apr 2018 11:34:20 +0100 Subject: locking/qspinlock: Kill cmpxchg() loop when claiming lock from head of queue When a queued locker reaches the head of the queue, it claims the lock by setting _Q_LOCKED_VAL in the lockword. If there isn't contention, it must also clear the tail as part of this operation so that subsequent lockers can avoid taking the slowpath altogether. Currently this is expressed as a cmpxchg() loop that practically only runs up to two iterations. This is confusing to the reader and unhelpful to the compiler. Rewrite the cmpxchg() loop without the loop, so that a failed cmpxchg() implies that there is contention and we just need to write to _Q_LOCKED_VAL without considering the rest of the lockword. Signed-off-by: Will Deacon Acked-by: Peter Zijlstra (Intel) Acked-by: Waiman Long Cc: Linus Torvalds Cc: Thomas Gleixner Cc: boqun.feng@gmail.com Cc: linux-arm-kernel@lists.infradead.org Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1524738868-31318-7-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar --- kernel/locking/qspinlock.c | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index e06f67e021d9..b51494a50b1e 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -465,24 +465,21 @@ locked: * and nobody is pending, clear the tail code and grab the lock. * Otherwise, we only need to grab the lock. */ - for (;;) { - /* In the PV case we might already have _Q_LOCKED_VAL set */ - if ((val & _Q_TAIL_MASK) != tail || (val & _Q_PENDING_MASK)) { - set_locked(lock); - break; - } + + /* In the PV case we might already have _Q_LOCKED_VAL set */ + if ((val & _Q_TAIL_MASK) == tail) { /* * The smp_cond_load_acquire() call above has provided the - * necessary acquire semantics required for locking. At most - * two iterations of this loop may be ran. + * necessary acquire semantics required for locking. */ old = atomic_cmpxchg_relaxed(&lock->val, val, _Q_LOCKED_VAL); if (old == val) - goto release; /* No contention */ - - val = old; + goto release; /* No contention */ } + /* Either somebody is queued behind us or _Q_PENDING_VAL is set */ + set_locked(lock); + /* * contended path; wait for next if not observed yet, release. */ -- cgit v1.2.3-59-g8ed1b From f9c811fac48cfbbfb452b08d1042386947868d07 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 26 Apr 2018 11:34:21 +0100 Subject: locking/qspinlock: Use atomic_cond_read_acquire() Rather than dig into the counter field of the atomic_t inside the qspinlock structure so that we can call smp_cond_load_acquire(), use atomic_cond_read_acquire() instead, which operates on the atomic_t directly. Signed-off-by: Will Deacon Acked-by: Peter Zijlstra (Intel) Acked-by: Waiman Long Cc: Linus Torvalds Cc: Thomas Gleixner Cc: boqun.feng@gmail.com Cc: linux-arm-kernel@lists.infradead.org Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1524738868-31318-8-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar --- kernel/locking/qspinlock.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index b51494a50b1e..56af1fa9874d 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -337,8 +337,8 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) * barriers. */ if (val & _Q_LOCKED_MASK) { - smp_cond_load_acquire(&lock->val.counter, - !(VAL & _Q_LOCKED_MASK)); + atomic_cond_read_acquire(&lock->val, + !(VAL & _Q_LOCKED_MASK)); } /* @@ -441,8 +441,8 @@ queue: * * The PV pv_wait_head_or_lock function, if active, will acquire * the lock and return a non-zero value. So we have to skip the - * smp_cond_load_acquire() call. As the next PV queue head hasn't been - * designated yet, there is no way for the locked value to become + * atomic_cond_read_acquire() call. As the next PV queue head hasn't + * been designated yet, there is no way for the locked value to become * _Q_SLOW_VAL. So both the set_locked() and the * atomic_cmpxchg_relaxed() calls will be safe. * @@ -452,7 +452,7 @@ queue: if ((val = pv_wait_head_or_lock(lock, node))) goto locked; - val = smp_cond_load_acquire(&lock->val.counter, !(VAL & _Q_LOCKED_PENDING_MASK)); + val = atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK)); locked: /* @@ -469,7 +469,7 @@ locked: /* In the PV case we might already have _Q_LOCKED_VAL set */ if ((val & _Q_TAIL_MASK) == tail) { /* - * The smp_cond_load_acquire() call above has provided the + * The atomic_cond_read_acquire() call above has provided the * necessary acquire semantics required for locking. */ old = atomic_cmpxchg_relaxed(&lock->val, val, _Q_LOCKED_VAL); -- cgit v1.2.3-59-g8ed1b From 7f56b58a92aaf2cab049f32a19af7cc57a3972f2 Mon Sep 17 00:00:00 2001 From: Jason Low Date: Thu, 26 Apr 2018 11:34:22 +0100 Subject: locking/mcs: Use smp_cond_load_acquire() in MCS spin loop For qspinlocks on ARM64, we would like to use WFE instead of purely spinning. Qspinlocks internally have lock contenders spin on an MCS lock. Update arch_mcs_spin_lock_contended() such that it uses the new smp_cond_load_acquire() so that ARM64 can also override this spin loop with its own implementation using WFE. On x86, this can also be cheaper than spinning on smp_load_acquire(). Signed-off-by: Jason Low Signed-off-by: Will Deacon Acked-by: Peter Zijlstra (Intel) Acked-by: Waiman Long Cc: Linus Torvalds Cc: Thomas Gleixner Cc: boqun.feng@gmail.com Cc: linux-arm-kernel@lists.infradead.org Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1524738868-31318-9-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar --- kernel/locking/mcs_spinlock.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h index f046b7ce9dd6..5e10153b4d3c 100644 --- a/kernel/locking/mcs_spinlock.h +++ b/kernel/locking/mcs_spinlock.h @@ -23,13 +23,15 @@ struct mcs_spinlock { #ifndef arch_mcs_spin_lock_contended /* - * Using smp_load_acquire() provides a memory barrier that ensures - * subsequent operations happen after the lock is acquired. + * Using smp_cond_load_acquire() provides the acquire semantics + * required so that subsequent operations happen after the + * lock is acquired. Additionally, some architectures such as + * ARM64 would like to do spin-waiting instead of purely + * spinning, and smp_cond_load_acquire() provides that behavior. */ #define arch_mcs_spin_lock_contended(l) \ do { \ - while (!(smp_load_acquire(l))) \ - cpu_relax(); \ + smp_cond_load_acquire(l, VAL); \ } while (0) #endif -- cgit v1.2.3-59-g8ed1b From c131a198c497db436b558ac5e9a140cdcb91b304 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 26 Apr 2018 11:34:23 +0100 Subject: locking/qspinlock: Use smp_cond_load_relaxed() to wait for next node When a locker reaches the head of the queue and takes the lock, a concurrent locker may enqueue and force the lock holder to spin whilst its node->next field is initialised. Rather than open-code a READ_ONCE/cpu_relax() loop, this can be implemented using smp_cond_load_relaxed() instead. Signed-off-by: Will Deacon Acked-by: Peter Zijlstra (Intel) Acked-by: Waiman Long Cc: Linus Torvalds Cc: Thomas Gleixner Cc: boqun.feng@gmail.com Cc: linux-arm-kernel@lists.infradead.org Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1524738868-31318-10-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar --- kernel/locking/qspinlock.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 56af1fa9874d..d6c3b029bd93 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -483,10 +483,8 @@ locked: /* * contended path; wait for next if not observed yet, release. */ - if (!next) { - while (!(next = READ_ONCE(node->next))) - cpu_relax(); - } + if (!next) + next = smp_cond_load_relaxed(&node->next, (VAL)); arch_mcs_spin_unlock_contended(&next->locked); pv_kick_node(lock, next); -- cgit v1.2.3-59-g8ed1b From 9d4646d14d51d62b967a12452c30ea7edf8dd8fa Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 26 Apr 2018 11:34:25 +0100 Subject: locking/qspinlock: Elide back-to-back RELEASE operations with smp_wmb() The qspinlock slowpath must ensure that the MCS node is fully initialised before it can be reached by another other CPU. This is currently enforced by using a RELEASE operation when updating the tail and also when linking the node into the waitqueue, since the control dependency off xchg_tail is insufficient to enforce sufficient ordering, see: 95bcade33a8a ("locking/qspinlock: Ensure node is initialised before updating prev->next") Back-to-back RELEASE operations may be expensive on some architectures, particularly those that implement them using fences under the hood. We can replace the two RELEASE operations with a single smp_wmb() fence and use RELAXED operations for the subsequent publishing of the node. Signed-off-by: Will Deacon Acked-by: Peter Zijlstra (Intel) Acked-by: Waiman Long Cc: Linus Torvalds Cc: Thomas Gleixner Cc: boqun.feng@gmail.com Cc: linux-arm-kernel@lists.infradead.org Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1524738868-31318-12-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar --- kernel/locking/qspinlock.c | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index d6c3b029bd93..956a12983bd0 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -164,10 +164,10 @@ static __always_inline void clear_pending_set_locked(struct qspinlock *lock) static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) { /* - * Use release semantics to make sure that the MCS node is properly - * initialized before changing the tail code. + * We can use relaxed semantics since the caller ensures that the + * MCS node is properly initialized before updating the tail. */ - return (u32)xchg_release(&lock->tail, + return (u32)xchg_relaxed(&lock->tail, tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET; } @@ -212,10 +212,11 @@ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) for (;;) { new = (val & _Q_LOCKED_PENDING_MASK) | tail; /* - * Use release semantics to make sure that the MCS node is - * properly initialized before changing the tail code. + * We can use relaxed semantics since the caller ensures that + * the MCS node is properly initialized before updating the + * tail. */ - old = atomic_cmpxchg_release(&lock->val, val, new); + old = atomic_cmpxchg_relaxed(&lock->val, val, new); if (old == val) break; @@ -388,12 +389,18 @@ queue: goto release; /* + * Ensure that the initialisation of @node is complete before we + * publish the updated tail via xchg_tail() and potentially link + * @node into the waitqueue via WRITE_ONCE(prev->next, node) below. + */ + smp_wmb(); + + /* + * Publish the updated tail. * We have already touched the queueing cacheline; don't bother with * pending stuff. * * p,*,* -> n,*,* - * - * RELEASE, such that the stores to @node must be complete. */ old = xchg_tail(lock, tail); next = NULL; @@ -405,14 +412,8 @@ queue: if (old & _Q_TAIL_MASK) { prev = decode_tail(old); - /* - * We must ensure that the stores to @node are observed before - * the write to prev->next. The address dependency from - * xchg_tail is not sufficient to ensure this because the read - * component of xchg_tail is unordered with respect to the - * initialisation of @node. - */ - smp_store_release(&prev->next, node); + /* Link @node into the waitqueue. */ + WRITE_ONCE(prev->next, node); pv_wait_node(node, prev); arch_mcs_spin_lock_contended(&node->locked); -- cgit v1.2.3-59-g8ed1b From ae75d9089ff7095d1d1a12c3cd86b21d3eaf3b15 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 26 Apr 2018 11:34:26 +0100 Subject: locking/qspinlock: Use try_cmpxchg() instead of cmpxchg() when locking When reaching the head of an uncontended queue on the qspinlock slow-path, using a try_cmpxchg() instead of a cmpxchg() operation to transition the lock work to _Q_LOCKED_VAL generates slightly better code for x86 and pretty much identical code for arm64. Reported-by: Peter Zijlstra Signed-off-by: Will Deacon Acked-by: Peter Zijlstra (Intel) Acked-by: Waiman Long Cc: Linus Torvalds Cc: Thomas Gleixner Cc: boqun.feng@gmail.com Cc: linux-arm-kernel@lists.infradead.org Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1524738868-31318-13-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar --- kernel/locking/qspinlock.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 956a12983bd0..46813185957b 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -467,16 +467,15 @@ locked: * Otherwise, we only need to grab the lock. */ - /* In the PV case we might already have _Q_LOCKED_VAL set */ - if ((val & _Q_TAIL_MASK) == tail) { - /* - * The atomic_cond_read_acquire() call above has provided the - * necessary acquire semantics required for locking. - */ - old = atomic_cmpxchg_relaxed(&lock->val, val, _Q_LOCKED_VAL); - if (old == val) - goto release; /* No contention */ - } + /* + * In the PV case we might already have _Q_LOCKED_VAL set. + * + * The atomic_cond_read_acquire() call above has provided the + * necessary acquire semantics required for locking. + */ + if (((val & _Q_TAIL_MASK) == tail) && + atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL)) + goto release; /* No contention */ /* Either somebody is queued behind us or _Q_PENDING_VAL is set */ set_locked(lock); -- cgit v1.2.3-59-g8ed1b From 81d3dc9a349b1e61d77106bbb05a6e6dd29b9d5e Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Thu, 26 Apr 2018 11:34:27 +0100 Subject: locking/qspinlock: Add stat tracking for pending vs. slowpath Currently, the qspinlock_stat code tracks only statistical counts in the PV qspinlock code. However, it may also be useful to track the number of locking operations done via the pending code vs. the MCS lock queue slowpath for the non-PV case. The qspinlock stat code is modified to do that. The stat counter pv_lock_slowpath is renamed to lock_slowpath so that it can be used by both the PV and non-PV cases. Signed-off-by: Waiman Long Acked-by: Peter Zijlstra (Intel) Acked-by: Waiman Long Cc: Linus Torvalds Cc: Thomas Gleixner Cc: boqun.feng@gmail.com Cc: linux-arm-kernel@lists.infradead.org Cc: paulmck@linux.vnet.ibm.com Cc: will.deacon@arm.com Link: http://lkml.kernel.org/r/1524738868-31318-14-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar --- kernel/locking/qspinlock.c | 14 +++++++++++--- kernel/locking/qspinlock_paravirt.h | 7 +------ kernel/locking/qspinlock_stat.h | 9 ++++++--- 3 files changed, 18 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 46813185957b..bfaeb05123ff 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -12,11 +12,11 @@ * GNU General Public License for more details. * * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. - * (C) Copyright 2013-2014 Red Hat, Inc. + * (C) Copyright 2013-2014,2018 Red Hat, Inc. * (C) Copyright 2015 Intel Corp. * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP * - * Authors: Waiman Long + * Authors: Waiman Long * Peter Zijlstra */ @@ -32,6 +32,11 @@ #include #include +/* + * Include queued spinlock statistics code + */ +#include "qspinlock_stat.h" + /* * The basic principle of a queue-based spinlock can best be understood * by studying a classic queue-based spinlock implementation called the @@ -295,7 +300,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); if (pv_enabled()) - goto queue; + goto pv_queue; if (virt_spin_lock(lock)) return; @@ -348,6 +353,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) * *,1,0 -> *,0,1 */ clear_pending_set_locked(lock); + qstat_inc(qstat_lock_pending, true); return; } @@ -363,6 +369,8 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) * queuing. */ queue: + qstat_inc(qstat_lock_slowpath, true); +pv_queue: node = this_cpu_ptr(&mcs_nodes[0]); idx = node->count++; tail = encode_tail(smp_processor_id(), idx); diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h index 2dbad2f25480..25730b2ac022 100644 --- a/kernel/locking/qspinlock_paravirt.h +++ b/kernel/locking/qspinlock_paravirt.h @@ -55,11 +55,6 @@ struct pv_node { u8 state; }; -/* - * Include queued spinlock statistics code - */ -#include "qspinlock_stat.h" - /* * Hybrid PV queued/unfair lock * @@ -428,7 +423,7 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node) /* * Tracking # of slowpath locking operations */ - qstat_inc(qstat_pv_lock_slowpath, true); + qstat_inc(qstat_lock_slowpath, true); for (;; waitcnt++) { /* diff --git a/kernel/locking/qspinlock_stat.h b/kernel/locking/qspinlock_stat.h index 4a30ef63c607..6bd78c0740fc 100644 --- a/kernel/locking/qspinlock_stat.h +++ b/kernel/locking/qspinlock_stat.h @@ -22,13 +22,14 @@ * pv_kick_wake - # of vCPU kicks used for computing pv_latency_wake * pv_latency_kick - average latency (ns) of vCPU kick operation * pv_latency_wake - average latency (ns) from vCPU kick to wakeup - * pv_lock_slowpath - # of locking operations via the slowpath * pv_lock_stealing - # of lock stealing operations * pv_spurious_wakeup - # of spurious wakeups in non-head vCPUs * pv_wait_again - # of wait's after a queue head vCPU kick * pv_wait_early - # of early vCPU wait's * pv_wait_head - # of vCPU wait's at the queue head * pv_wait_node - # of vCPU wait's at a non-head queue node + * lock_pending - # of locking operations via pending code + * lock_slowpath - # of locking operations via MCS lock queue * * Writing to the "reset_counters" file will reset all the above counter * values. @@ -46,13 +47,14 @@ enum qlock_stats { qstat_pv_kick_wake, qstat_pv_latency_kick, qstat_pv_latency_wake, - qstat_pv_lock_slowpath, qstat_pv_lock_stealing, qstat_pv_spurious_wakeup, qstat_pv_wait_again, qstat_pv_wait_early, qstat_pv_wait_head, qstat_pv_wait_node, + qstat_lock_pending, + qstat_lock_slowpath, qstat_num, /* Total number of statistical counters */ qstat_reset_cnts = qstat_num, }; @@ -73,12 +75,13 @@ static const char * const qstat_names[qstat_num + 1] = { [qstat_pv_spurious_wakeup] = "pv_spurious_wakeup", [qstat_pv_latency_kick] = "pv_latency_kick", [qstat_pv_latency_wake] = "pv_latency_wake", - [qstat_pv_lock_slowpath] = "pv_lock_slowpath", [qstat_pv_lock_stealing] = "pv_lock_stealing", [qstat_pv_wait_again] = "pv_wait_again", [qstat_pv_wait_early] = "pv_wait_early", [qstat_pv_wait_head] = "pv_wait_head", [qstat_pv_wait_node] = "pv_wait_node", + [qstat_lock_pending] = "lock_pending", + [qstat_lock_slowpath] = "lock_slowpath", [qstat_reset_cnts] = "reset_counters", }; -- cgit v1.2.3-59-g8ed1b From 3bea9adc96842b8a7345c7fb202c16ae9c8d5b25 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 27 Apr 2018 10:40:13 +0100 Subject: locking/qspinlock: Remove duplicate clear_pending() function from PV code The native clear_pending() function is identical to the PV version, so the latter can simply be removed. This fixes the build for systems with >= 16K CPUs using the PV lock implementation. Reported-by: Waiman Long Signed-off-by: Will Deacon Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: boqun.feng@gmail.com Cc: linux-arm-kernel@lists.infradead.org Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/20180427101619.GB21705@arm.com Signed-off-by: Ingo Molnar --- kernel/locking/qspinlock_paravirt.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h index 25730b2ac022..5a0cf5f9008c 100644 --- a/kernel/locking/qspinlock_paravirt.h +++ b/kernel/locking/qspinlock_paravirt.h @@ -130,11 +130,6 @@ static __always_inline void set_pending(struct qspinlock *lock) atomic_or(_Q_PENDING_VAL, &lock->val); } -static __always_inline void clear_pending(struct qspinlock *lock) -{ - atomic_andnot(_Q_PENDING_VAL, &lock->val); -} - static __always_inline int trylock_clear_pending(struct qspinlock *lock) { int val = atomic_read(&lock->val); -- cgit v1.2.3-59-g8ed1b From 02acc80d19edb0d5684c997b2004ad19f9f5236e Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 23 Apr 2018 18:10:23 +0200 Subject: delayacct: Use raw_spinlocks try_to_wake_up() might invoke delayacct_blkio_end() while holding the pi_lock (which is a raw_spinlock_t). delayacct_blkio_end() acquires task_delay_info.lock which is a spinlock_t. This causes a might sleep splat on -RT where non raw spinlocks are converted to 'sleeping' spinlocks. task_delay_info.lock is only held for a short amount of time so it's not a problem latency wise to make convert it to a raw spinlock. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Cc: Balbir Singh Link: https://lkml.kernel.org/r/20180423161024.6710-1-bigeasy@linutronix.de --- include/linux/delayacct.h | 2 +- kernel/delayacct.c | 17 +++++++++-------- 2 files changed, 10 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index 5e335b6203f4..e6c0448ebcc7 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -29,7 +29,7 @@ #ifdef CONFIG_TASK_DELAY_ACCT struct task_delay_info { - spinlock_t lock; + raw_spinlock_t lock; unsigned int flags; /* Private per-task flags */ /* For each stat XXX, add following, aligned appropriately diff --git a/kernel/delayacct.c b/kernel/delayacct.c index e2764d767f18..ca8ac2824f0b 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c @@ -44,23 +44,24 @@ void __delayacct_tsk_init(struct task_struct *tsk) { tsk->delays = kmem_cache_zalloc(delayacct_cache, GFP_KERNEL); if (tsk->delays) - spin_lock_init(&tsk->delays->lock); + raw_spin_lock_init(&tsk->delays->lock); } /* * Finish delay accounting for a statistic using its timestamps (@start), * accumalator (@total) and @count */ -static void delayacct_end(spinlock_t *lock, u64 *start, u64 *total, u32 *count) +static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total, + u32 *count) { s64 ns = ktime_get_ns() - *start; unsigned long flags; if (ns > 0) { - spin_lock_irqsave(lock, flags); + raw_spin_lock_irqsave(lock, flags); *total += ns; (*count)++; - spin_unlock_irqrestore(lock, flags); + raw_spin_unlock_irqrestore(lock, flags); } } @@ -127,7 +128,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) /* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */ - spin_lock_irqsave(&tsk->delays->lock, flags); + raw_spin_lock_irqsave(&tsk->delays->lock, flags); tmp = d->blkio_delay_total + tsk->delays->blkio_delay; d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp; tmp = d->swapin_delay_total + tsk->delays->swapin_delay; @@ -137,7 +138,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) d->blkio_count += tsk->delays->blkio_count; d->swapin_count += tsk->delays->swapin_count; d->freepages_count += tsk->delays->freepages_count; - spin_unlock_irqrestore(&tsk->delays->lock, flags); + raw_spin_unlock_irqrestore(&tsk->delays->lock, flags); return 0; } @@ -147,10 +148,10 @@ __u64 __delayacct_blkio_ticks(struct task_struct *tsk) __u64 ret; unsigned long flags; - spin_lock_irqsave(&tsk->delays->lock, flags); + raw_spin_lock_irqsave(&tsk->delays->lock, flags); ret = nsec_to_clock_t(tsk->delays->blkio_delay + tsk->delays->swapin_delay); - spin_unlock_irqrestore(&tsk->delays->lock, flags); + raw_spin_unlock_irqrestore(&tsk->delays->lock, flags); return ret; } -- cgit v1.2.3-59-g8ed1b From de5b55c1d4e30740009864eb35ce4ed856aac01d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 23 Apr 2018 21:16:35 +0200 Subject: stop_machine: Use raw spinlocks Use raw-locks in stop_machine() to allow locking in irq-off and preempt-disabled regions on -RT. This also documents the possible locking context in general. [bigeasy: update patch description.] Signed-off-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior Link: https://lkml.kernel.org/r/20180423191635.6014-1-bigeasy@linutronix.de --- kernel/stop_machine.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index b7591261652d..c25ba18274fb 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -36,7 +36,7 @@ struct cpu_stop_done { struct cpu_stopper { struct task_struct *thread; - spinlock_t lock; + raw_spinlock_t lock; bool enabled; /* is this stopper enabled? */ struct list_head works; /* list of pending works */ @@ -78,13 +78,13 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) unsigned long flags; bool enabled; - spin_lock_irqsave(&stopper->lock, flags); + raw_spin_lock_irqsave(&stopper->lock, flags); enabled = stopper->enabled; if (enabled) __cpu_stop_queue_work(stopper, work); else if (work->done) cpu_stop_signal_done(work->done); - spin_unlock_irqrestore(&stopper->lock, flags); + raw_spin_unlock_irqrestore(&stopper->lock, flags); return enabled; } @@ -231,8 +231,8 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1, struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2); int err; retry: - spin_lock_irq(&stopper1->lock); - spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING); + raw_spin_lock_irq(&stopper1->lock); + raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING); err = -ENOENT; if (!stopper1->enabled || !stopper2->enabled) @@ -255,8 +255,8 @@ retry: __cpu_stop_queue_work(stopper1, work1); __cpu_stop_queue_work(stopper2, work2); unlock: - spin_unlock(&stopper2->lock); - spin_unlock_irq(&stopper1->lock); + raw_spin_unlock(&stopper2->lock); + raw_spin_unlock_irq(&stopper1->lock); if (unlikely(err == -EDEADLK)) { while (stop_cpus_in_progress) @@ -448,9 +448,9 @@ static int cpu_stop_should_run(unsigned int cpu) unsigned long flags; int run; - spin_lock_irqsave(&stopper->lock, flags); + raw_spin_lock_irqsave(&stopper->lock, flags); run = !list_empty(&stopper->works); - spin_unlock_irqrestore(&stopper->lock, flags); + raw_spin_unlock_irqrestore(&stopper->lock, flags); return run; } @@ -461,13 +461,13 @@ static void cpu_stopper_thread(unsigned int cpu) repeat: work = NULL; - spin_lock_irq(&stopper->lock); + raw_spin_lock_irq(&stopper->lock); if (!list_empty(&stopper->works)) { work = list_first_entry(&stopper->works, struct cpu_stop_work, list); list_del_init(&work->list); } - spin_unlock_irq(&stopper->lock); + raw_spin_unlock_irq(&stopper->lock); if (work) { cpu_stop_fn_t fn = work->fn; @@ -541,7 +541,7 @@ static int __init cpu_stop_init(void) for_each_possible_cpu(cpu) { struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); - spin_lock_init(&stopper->lock); + raw_spin_lock_init(&stopper->lock); INIT_LIST_HEAD(&stopper->works); } -- cgit v1.2.3-59-g8ed1b From c427f69564e2a844c5fcf2804042609342513da0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 5 Apr 2018 11:05:35 +0200 Subject: locking/mutex: Optimize __mutex_trylock_fast() Use try_cmpxchg to avoid the pointless TEST instruction.. And add the (missing) atomic_long_try_cmpxchg*() wrappery. On x86_64 this gives: 0000000000000710 : 0000000000000710 : 710: 65 48 8b 14 25 00 00 mov %gs:0x0,%rdx 710: 65 48 8b 14 25 00 00 mov %gs:0x0,%rdx 717: 00 00 717: 00 00 715: R_X86_64_32S current_task 715: R_X86_64_32S current_task 719: 31 c0 xor %eax,%eax 719: 31 c0 xor %eax,%eax 71b: f0 48 0f b1 17 lock cmpxchg %rdx,(%rdi) 71b: f0 48 0f b1 17 lock cmpxchg %rdx,(%rdi) 720: 48 85 c0 test %rax,%rax 720: 75 02 jne 724 723: 75 02 jne 727 722: f3 c3 repz retq 725: f3 c3 repz retq 724: eb da jmp 700 <__mutex_lock_slowpath> 727: eb d7 jmp 700 <__mutex_lock_slowpath> 726: 66 2e 0f 1f 84 00 00 nopw %cs:0x0(%rax,%rax,1) 729: 0f 1f 80 00 00 00 00 nopl 0x0(%rax) 72d: 00 00 00 On ARM64 this gives: 000000000000638 : 0000000000000638 : 638: d5384101 mrs x1, sp_el0 638: d5384101 mrs x1, sp_el0 63c: d2800002 mov x2, #0x0 63c: d2800002 mov x2, #0x0 640: f9800011 prfm pstl1strm, [x0] 640: f9800011 prfm pstl1strm, [x0] 644: c85ffc03 ldaxr x3, [x0] 644: c85ffc03 ldaxr x3, [x0] 648: ca020064 eor x4, x3, x2 648: ca020064 eor x4, x3, x2 64c: b5000064 cbnz x4, 658 64c: b5000064 cbnz x4, 658 650: c8047c01 stxr w4, x1, [x0] 650: c8047c01 stxr w4, x1, [x0] 654: 35ffff84 cbnz w4, 644 654: 35ffff84 cbnz w4, 644 658: b40000c3 cbz x3, 670 658: b5000043 cbnz x3, 660 65c: a9bf7bfd stp x29, x30, [sp,#-16]! 65c: d65f03c0 ret 660: 910003fd mov x29, sp 660: a9bf7bfd stp x29, x30, [sp,#-16]! 664: 97ffffef bl 620 <__mutex_lock_slowpath> 664: 910003fd mov x29, sp 668: a8c17bfd ldp x29, x30, [sp],#16 668: 97ffffee bl 620 <__mutex_lock_slowpath> 66c: d65f03c0 ret 66c: a8c17bfd ldp x29, x30, [sp],#16 670: d65f03c0 ret 670: d65f03c0 ret Reported-by: Matthew Wilcox Acked-by: Will Deacon Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- include/asm-generic/atomic-long.h | 17 +++++++++++++++++ kernel/locking/mutex.c | 3 ++- 2 files changed, 19 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h index 5b2b0b5ea06d..87d14476edc2 100644 --- a/include/asm-generic/atomic-long.h +++ b/include/asm-generic/atomic-long.h @@ -25,6 +25,7 @@ typedef atomic64_t atomic_long_t; #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) #define ATOMIC_LONG_PFX(x) atomic64 ## x +#define ATOMIC_LONG_TYPE s64 #else @@ -32,6 +33,7 @@ typedef atomic_t atomic_long_t; #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) #define ATOMIC_LONG_PFX(x) atomic ## x +#define ATOMIC_LONG_TYPE int #endif @@ -90,6 +92,21 @@ ATOMIC_LONG_ADD_SUB_OP(sub, _release) #define atomic_long_cmpxchg(l, old, new) \ (ATOMIC_LONG_PFX(_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), (old), (new))) + +#define atomic_long_try_cmpxchg_relaxed(l, old, new) \ + (ATOMIC_LONG_PFX(_try_cmpxchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(l), \ + (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new))) +#define atomic_long_try_cmpxchg_acquire(l, old, new) \ + (ATOMIC_LONG_PFX(_try_cmpxchg_acquire)((ATOMIC_LONG_PFX(_t) *)(l), \ + (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new))) +#define atomic_long_try_cmpxchg_release(l, old, new) \ + (ATOMIC_LONG_PFX(_try_cmpxchg_release)((ATOMIC_LONG_PFX(_t) *)(l), \ + (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new))) +#define atomic_long_try_cmpxchg(l, old, new) \ + (ATOMIC_LONG_PFX(_try_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), \ + (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new))) + + #define atomic_long_xchg_relaxed(v, new) \ (ATOMIC_LONG_PFX(_xchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (new))) #define atomic_long_xchg_acquire(v, new) \ diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 2048359f33d2..f44f658ae629 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -139,8 +139,9 @@ static inline bool __mutex_trylock(struct mutex *lock) static __always_inline bool __mutex_trylock_fast(struct mutex *lock) { unsigned long curr = (unsigned long)current; + unsigned long zero = 0UL; - if (!atomic_long_cmpxchg_acquire(&lock->owner, 0UL, curr)) + if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) return true; return false; -- cgit v1.2.3-59-g8ed1b From 0f736a52e4be86476eec1d5adbcbd9c2809ac4b4 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Fri, 6 Apr 2018 19:41:18 +0900 Subject: locking/lockdep: Use for_each_process_thread() for debug_show_all_locks() debug_show_all_locks() tries to grab the tasklist_lock for two seconds, but calling while_each_thread() without tasklist_lock held is not safe. See the following commit for more information: 4449a51a7c281602 ("vm_is_stack: use for_each_thread() rather then buggy while_each_thread()") Change debug_show_all_locks() from "do_each_thread()/while_each_thread() with possibility of missing tasklist_lock" to "for_each_process_thread() with RCU", and add a call to touch_all_softlockup_watchdogs() like show_state_filter() does. Signed-off-by: Tetsuo Handa Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1523011279-8206-1-git-send-email-penguin-kernel@I-love.SAKURA.ne.jp Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 43 ++++++++----------------------------------- 1 file changed, 8 insertions(+), 35 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 023386338269..94f4d21ff66d 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -4451,8 +4451,6 @@ EXPORT_SYMBOL_GPL(debug_check_no_locks_held); void debug_show_all_locks(void) { struct task_struct *g, *p; - int count = 10; - int unlock = 1; if (unlikely(!debug_locks)) { pr_warn("INFO: lockdep is turned off.\n"); @@ -4460,30 +4458,8 @@ void debug_show_all_locks(void) } pr_warn("\nShowing all locks held in the system:\n"); - /* - * Here we try to get the tasklist_lock as hard as possible, - * if not successful after 2 seconds we ignore it (but keep - * trying). This is to enable a debug printout even if a - * tasklist_lock-holding task deadlocks or crashes. - */ -retry: - if (!read_trylock(&tasklist_lock)) { - if (count == 10) - pr_warn("hm, tasklist_lock locked, retrying... "); - if (count) { - count--; - pr_cont(" #%d", 10-count); - mdelay(200); - goto retry; - } - pr_cont(" ignoring it.\n"); - unlock = 0; - } else { - if (count != 10) - pr_cont(" locked it.\n"); - } - - do_each_thread(g, p) { + rcu_read_lock(); + for_each_process_thread(g, p) { /* * It's not reliable to print a task's held locks * if it's not sleeping (or if it's not the current @@ -4491,19 +4467,16 @@ retry: */ if (p->state == TASK_RUNNING && p != current) continue; - if (p->lockdep_depth) - lockdep_print_held_locks(p); - if (!unlock) - if (read_trylock(&tasklist_lock)) - unlock = 1; + if (!p->lockdep_depth) + continue; + lockdep_print_held_locks(p); touch_nmi_watchdog(); - } while_each_thread(g, p); + touch_all_softlockup_watchdogs(); + } + rcu_read_unlock(); pr_warn("\n"); pr_warn("=============================================\n\n"); - - if (unlock) - read_unlock(&tasklist_lock); } EXPORT_SYMBOL_GPL(debug_show_all_locks); #endif -- cgit v1.2.3-59-g8ed1b From 8cc05c71ba5f793690bb72aeb404dce65b5d4b52 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Fri, 6 Apr 2018 19:41:19 +0900 Subject: locking/lockdep: Move sanity check to inside lockdep_print_held_locks() Calling lockdep_print_held_locks() on a running thread is considered unsafe. Since all callers should follow that rule and the sanity check is not heavy, this patch moves the sanity check to inside lockdep_print_held_locks(). As a side effect of this patch, the number of locks held by running threads will be printed as well. This change will be preferable when we want to know which threads might be relevant to a problem but are unable to print any clues because that thread is running. Signed-off-by: Tetsuo Handa Signed-off-by: Peter Zijlstra (Intel) Cc: Dmitry Vyukov Cc: Linus Torvalds Cc: Matthew Wilcox Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1523011279-8206-2-git-send-email-penguin-kernel@I-love.SAKURA.ne.jp Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 94f4d21ff66d..edcac5de7ebc 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -561,20 +561,24 @@ static void print_lock(struct held_lock *hlock) printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip); } -static void lockdep_print_held_locks(struct task_struct *curr) +static void lockdep_print_held_locks(struct task_struct *p) { - int i, depth = curr->lockdep_depth; + int i, depth = READ_ONCE(p->lockdep_depth); - if (!depth) { - printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr)); + if (!depth) + printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p)); + else + printk("%d lock%s held by %s/%d:\n", depth, + depth > 1 ? "s" : "", p->comm, task_pid_nr(p)); + /* + * It's not reliable to print a task's held locks if it's not sleeping + * and it's not the current task. + */ + if (p->state == TASK_RUNNING && p != current) return; - } - printk("%d lock%s held by %s/%d:\n", - depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr)); - for (i = 0; i < depth; i++) { printk(" #%d: ", i); - print_lock(curr->held_locks + i); + print_lock(p->held_locks + i); } } @@ -4460,13 +4464,6 @@ void debug_show_all_locks(void) rcu_read_lock(); for_each_process_thread(g, p) { - /* - * It's not reliable to print a task's held locks - * if it's not sleeping (or if it's not the current - * task): - */ - if (p->state == TASK_RUNNING && p != current) - continue; if (!p->lockdep_depth) continue; lockdep_print_held_locks(p); -- cgit v1.2.3-59-g8ed1b From 1b22fc609cecd1b16c4a015e1a6b3c9717484e3a Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 18 May 2018 18:55:35 +0200 Subject: locking/rwsem: Simplify the is-owner-spinnable checks Add the trivial owner_on_cpu() helper for rwsem_can_spin_on_owner() and rwsem_spin_on_owner(), it also allows to make rwsem_can_spin_on_owner() a bit more clear. Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra (Intel) Acked-by: Waiman Long Cc: Amir Goldstein Cc: Davidlohr Bueso Cc: Jan Kara Cc: Linus Torvalds Cc: Matthew Wilcox Cc: Peter Zijlstra Cc: Theodore Y. Ts'o Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180518165534.GA22348@redhat.com Signed-off-by: Ingo Molnar --- kernel/locking/rwsem-xadd.c | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index a90336779375..3064c50e181e 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c @@ -347,6 +347,15 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) } } +static inline bool owner_on_cpu(struct task_struct *owner) +{ + /* + * As lock holder preemption issue, we both skip spinning if + * task is not on cpu or its cpu is preempted + */ + return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); +} + static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) { struct task_struct *owner; @@ -359,17 +368,10 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) rcu_read_lock(); owner = READ_ONCE(sem->owner); - if (!owner || !is_rwsem_owner_spinnable(owner)) { - ret = !owner; /* !owner is spinnable */ - goto done; + if (owner) { + ret = is_rwsem_owner_spinnable(owner) && + owner_on_cpu(owner); } - - /* - * As lock holder preemption issue, we both skip spinning if task is not - * on cpu or its cpu is preempted - */ - ret = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); -done: rcu_read_unlock(); return ret; } @@ -398,8 +400,7 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem) * abort spinning when need_resched or owner is not running or * owner's cpu is preempted. */ - if (!owner->on_cpu || need_resched() || - vcpu_is_preempted(task_cpu(owner))) { + if (need_resched() || !owner_on_cpu(owner)) { rcu_read_unlock(); return false; } -- cgit v1.2.3-59-g8ed1b