From 1bce11126d57dde90a02ecf9bfe98175ab4e729e Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 16 Mar 2022 15:53:54 -0700 Subject: asm-generic: ticket-lock: New generic ticket-based spinlock This is a simple, fair spinlock. Specifically it doesn't have all the subtle memory model dependencies that qspinlock has, which makes it more suitable for simple systems as it is more likely to be correct. It is implemented entirely in terms of standard atomics and thus works fine without any arch-specific code. This replaces the existing asm-generic/spinlock.h, which just errored out on SMP systems. Signed-off-by: Peter Zijlstra (Intel) Tested-by: Heiko Stuebner Reviewed-by: Guo Ren Reviewed-by: Arnd Bergmann Signed-off-by: Palmer Dabbelt --- include/asm-generic/spinlock.h | 94 +++++++++++++++++++++++++++++++++--- include/asm-generic/spinlock_types.h | 17 +++++++ 2 files changed, 104 insertions(+), 7 deletions(-) create mode 100644 include/asm-generic/spinlock_types.h (limited to 'include') diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h index adaf6acab172..fdfebcb050f4 100644 --- a/include/asm-generic/spinlock.h +++ b/include/asm-generic/spinlock.h @@ -1,12 +1,92 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __ASM_GENERIC_SPINLOCK_H -#define __ASM_GENERIC_SPINLOCK_H + /* - * You need to implement asm/spinlock.h for SMP support. The generic - * version does not handle SMP. + * 'Generic' ticket-lock implementation. + * + * It relies on atomic_fetch_add() having well defined forward progress + * guarantees under contention. If your architecture cannot provide this, stick + * to a test-and-set lock. + * + * It also relies on atomic_fetch_add() being safe vs smp_store_release() on a + * sub-word of the value. This is generally true for anything LL/SC although + * you'd be hard pressed to find anything useful in architecture specifications + * about this. If your architecture cannot do this you might be better off with + * a test-and-set. + * + * It further assumes atomic_*_release() + atomic_*_acquire() is RCpc and hence + * uses atomic_fetch_add() which is RCsc to create an RCsc hot path, along with + * a full fence after the spin to upgrade the otherwise-RCpc + * atomic_cond_read_acquire(). + * + * The implementation uses smp_cond_load_acquire() to spin, so if the + * architecture has WFE like instructions to sleep instead of poll for word + * modifications be sure to implement that (see ARM64 for example). + * */ -#ifdef CONFIG_SMP -#error need an architecture specific asm/spinlock.h -#endif + +#ifndef __ASM_GENERIC_SPINLOCK_H +#define __ASM_GENERIC_SPINLOCK_H + +#include +#include + +static __always_inline void arch_spin_lock(arch_spinlock_t *lock) +{ + u32 val = atomic_fetch_add(1<<16, lock); + u16 ticket = val >> 16; + + if (ticket == (u16)val) + return; + + /* + * atomic_cond_read_acquire() is RCpc, but rather than defining a + * custom cond_read_rcsc() here we just emit a full fence. We only + * need the prior reads before subsequent writes ordering from + * smb_mb(), but as atomic_cond_read_acquire() just emits reads and we + * have no outstanding writes due to the atomic_fetch_add() the extra + * orderings are free. + */ + atomic_cond_read_acquire(lock, ticket == (u16)VAL); + smp_mb(); +} + +static __always_inline bool arch_spin_trylock(arch_spinlock_t *lock) +{ + u32 old = atomic_read(lock); + + if ((old >> 16) != (old & 0xffff)) + return false; + + return atomic_try_cmpxchg(lock, &old, old + (1<<16)); /* SC, for RCsc */ +} + +static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) +{ + u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); + u32 val = atomic_read(lock); + + smp_store_release(ptr, (u16)val + 1); +} + +static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock) +{ + u32 val = atomic_read(lock); + + return ((val >> 16) != (val & 0xffff)); +} + +static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock) +{ + u32 val = atomic_read(lock); + + return (s16)((val >> 16) - (val & 0xffff)) > 1; +} + +static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) +{ + return !arch_spin_is_locked(&lock); +} + +#include #endif /* __ASM_GENERIC_SPINLOCK_H */ diff --git a/include/asm-generic/spinlock_types.h b/include/asm-generic/spinlock_types.h new file mode 100644 index 000000000000..8962bb730945 --- /dev/null +++ b/include/asm-generic/spinlock_types.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_GENERIC_SPINLOCK_TYPES_H +#define __ASM_GENERIC_SPINLOCK_TYPES_H + +#include +typedef atomic_t arch_spinlock_t; + +/* + * qrwlock_types depends on arch_spinlock_t, so we must typedef that before the + * include. + */ +#include + +#define __ARCH_SPIN_LOCK_UNLOCKED ATOMIC_INIT(0) + +#endif /* __ASM_GENERIC_SPINLOCK_TYPES_H */ -- cgit v1.2.3-59-g8ed1b From a8ad07e5240c9e78633270be2fa2356b7e0f0af5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 16 Mar 2022 15:48:29 -0700 Subject: asm-generic: qspinlock: Indicate the use of mixed-size atomics The qspinlock implementation depends on having well behaved mixed-size atomics. This is true on the more widely-used platforms, but these requirements are somewhat subtle and may not be satisfied by all the platforms that qspinlock is used on. Document these requirements, so ports that use qspinlock can more easily determine if they meet these requirements. Signed-off-by: Peter Zijlstra (Intel) Acked-by: Waiman Long Reviewed-by: Arnd Bergmann Signed-off-by: Palmer Dabbelt --- include/asm-generic/qspinlock.h | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) (limited to 'include') diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index d74b13825501..995513fa2690 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -2,6 +2,35 @@ /* * Queued spinlock * + * A 'generic' spinlock implementation that is based on MCS locks. For an + * architecture that's looking for a 'generic' spinlock, please first consider + * ticket-lock.h and only come looking here when you've considered all the + * constraints below and can show your hardware does actually perform better + * with qspinlock. + * + * qspinlock relies on atomic_*_release()/atomic_*_acquire() to be RCsc (or no + * weaker than RCtso if you're power), where regular code only expects atomic_t + * to be RCpc. + * + * qspinlock relies on a far greater (compared to asm-generic/spinlock.h) set + * of atomic operations to behave well together, please audit them carefully to + * ensure they all have forward progress. Many atomic operations may default to + * cmpxchg() loops which will not have good forward progress properties on + * LL/SC architectures. + * + * One notable example is atomic_fetch_or_acquire(), which x86 cannot (cheaply) + * do. Carefully read the patches that introduced + * queued_fetch_set_pending_acquire(). + * + * qspinlock also heavily relies on mixed size atomic operations, in specific + * it requires architectures to have xchg16; something which many LL/SC + * architectures need to implement as a 32bit and+or in order to satisfy the + * forward progress guarantees mentioned above. + * + * Further reading on mixed size atomics that might be relevant: + * + * http://www.cl.cam.ac.uk/~pes20/popl17/mixed-size.pdf + * * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP * -- cgit v1.2.3-59-g8ed1b From 493e2ba27635971565a991dc9f689553242890a4 Mon Sep 17 00:00:00 2001 From: Palmer Dabbelt Date: Thu, 14 Apr 2022 09:34:29 -0700 Subject: asm-generic: qrwlock: Document the spinlock fairness requirements I could only find the fairness requirements documented as the C code, this calls them out in a comment just to be a bit more explicit. Reviewed-by: Arnd Bergmann Signed-off-by: Palmer Dabbelt --- include/asm-generic/qrwlock.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h index 7ae0ece07b4e..24ae09c1db9f 100644 --- a/include/asm-generic/qrwlock.h +++ b/include/asm-generic/qrwlock.h @@ -2,6 +2,10 @@ /* * Queue read/write lock * + * These use generic atomic and locking routines, but depend on a fair spinlock + * implementation in order to be fair themselves. The implementation in + * asm-generic/spinlock.h meets these requirements. + * * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P. * * Authors: Waiman Long -- cgit v1.2.3-59-g8ed1b