aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-08-13 12:23:39 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-13 12:23:39 -0700
commitde5d1b39ea0b38a9f4dfb08966042b7b91e2df30 (patch)
tree3591bdac4fe6756b4e3dc68b2ed1c792c4104218 /include
parentMerge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip (diff)
parentlocking/atomics: Rework ordering barriers (diff)
downloadlinux-dev-de5d1b39ea0b38a9f4dfb08966042b7b91e2df30.tar.xz
linux-dev-de5d1b39ea0b38a9f4dfb08966042b7b91e2df30.zip
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking/atomics update from Thomas Gleixner: "The locking, atomics and memory model brains delivered: - A larger update to the atomics code which reworks the ordering barriers, consolidates the atomic primitives, provides the new atomic64_fetch_add_unless() primitive and cleans up the include hell. - Simplify cmpxchg() instrumentation and add instrumentation for xchg() and cmpxchg_double(). - Updates to the memory model and documentation" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (48 commits) locking/atomics: Rework ordering barriers locking/atomics: Instrument cmpxchg_double*() locking/atomics: Instrument xchg() locking/atomics: Simplify cmpxchg() instrumentation locking/atomics/x86: Reduce arch_cmpxchg64*() instrumentation tools/memory-model: Rename litmus tests to comply to norm7 tools/memory-model/Documentation: Fix typo, smb->smp sched/Documentation: Update wake_up() & co. memory-barrier guarantees locking/spinlock, sched/core: Clarify requirements for smp_mb__after_spinlock() sched/core: Use smp_mb() in wake_woken_function() tools/memory-model: Add informal LKMM documentation to MAINTAINERS locking/atomics/Documentation: Describe atomic_set() as a write operation tools/memory-model: Make scripts executable tools/memory-model: Remove ACCESS_ONCE() from model tools/memory-model: Remove ACCESS_ONCE() from recipes locking/memory-barriers.txt/kokr: Update Korean translation to fix broken DMA vs. MMIO ordering example MAINTAINERS: Add Daniel Lustig as an LKMM reviewer tools/memory-model: Fix ISA2+pooncelock+pooncelock+pombonce name tools/memory-model: Add litmus test for full multicopy atomicity locking/refcount: Always allow checked forms ...
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/atomic-instrumented.h197
-rw-r--r--include/asm-generic/atomic.h33
-rw-r--r--include/asm-generic/atomic64.h15
-rw-r--r--include/asm-generic/bitops/atomic.h188
-rw-r--r--include/asm-generic/bitops/lock.h68
-rw-r--r--include/linux/atomic.h453
-rw-r--r--include/linux/bitops.h22
-rw-r--r--include/linux/bits.h26
-rw-r--r--include/linux/refcount.h34
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/spinlock.h53
11 files changed, 619 insertions, 474 deletions
diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h
index ec07f23678ea..0d4b1d3dbc1e 100644
--- a/include/asm-generic/atomic-instrumented.h
+++ b/include/asm-generic/atomic-instrumented.h
@@ -84,42 +84,59 @@ static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 ne
}
#endif
-static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
+#ifdef arch_atomic_fetch_add_unless
+#define atomic_fetch_add_unless atomic_fetch_add_unless
+static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
kasan_check_write(v, sizeof(*v));
- return __arch_atomic_add_unless(v, a, u);
+ return arch_atomic_fetch_add_unless(v, a, u);
}
+#endif
-
-static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+#ifdef arch_atomic64_fetch_add_unless
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
kasan_check_write(v, sizeof(*v));
- return arch_atomic64_add_unless(v, a, u);
+ return arch_atomic64_fetch_add_unless(v, a, u);
}
+#endif
+#ifdef arch_atomic_inc
+#define atomic_inc atomic_inc
static __always_inline void atomic_inc(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic_inc(v);
}
+#endif
+#ifdef arch_atomic64_inc
+#define atomic64_inc atomic64_inc
static __always_inline void atomic64_inc(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic64_inc(v);
}
+#endif
+#ifdef arch_atomic_dec
+#define atomic_dec atomic_dec
static __always_inline void atomic_dec(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic_dec(v);
}
+#endif
+#ifdef atch_atomic64_dec
+#define atomic64_dec
static __always_inline void atomic64_dec(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic64_dec(v);
}
+#endif
static __always_inline void atomic_add(int i, atomic_t *v)
{
@@ -181,65 +198,95 @@ static __always_inline void atomic64_xor(s64 i, atomic64_t *v)
arch_atomic64_xor(i, v);
}
+#ifdef arch_atomic_inc_return
+#define atomic_inc_return atomic_inc_return
static __always_inline int atomic_inc_return(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_inc_return(v);
}
+#endif
+#ifdef arch_atomic64_in_return
+#define atomic64_inc_return atomic64_inc_return
static __always_inline s64 atomic64_inc_return(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_inc_return(v);
}
+#endif
+#ifdef arch_atomic_dec_return
+#define atomic_dec_return atomic_dec_return
static __always_inline int atomic_dec_return(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_dec_return(v);
}
+#endif
+#ifdef arch_atomic64_dec_return
+#define atomic64_dec_return atomic64_dec_return
static __always_inline s64 atomic64_dec_return(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_dec_return(v);
}
+#endif
-static __always_inline s64 atomic64_inc_not_zero(atomic64_t *v)
+#ifdef arch_atomic64_inc_not_zero
+#define atomic64_inc_not_zero atomic64_inc_not_zero
+static __always_inline bool atomic64_inc_not_zero(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_inc_not_zero(v);
}
+#endif
+#ifdef arch_atomic64_dec_if_positive
+#define atomic64_dec_if_positive atomic64_dec_if_positive
static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_dec_if_positive(v);
}
+#endif
+#ifdef arch_atomic_dec_and_test
+#define atomic_dec_and_test atomic_dec_and_test
static __always_inline bool atomic_dec_and_test(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_dec_and_test(v);
}
+#endif
+#ifdef arch_atomic64_dec_and_test
+#define atomic64_dec_and_test atomic64_dec_and_test
static __always_inline bool atomic64_dec_and_test(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_dec_and_test(v);
}
+#endif
+#ifdef arch_atomic_inc_and_test
+#define atomic_inc_and_test atomic_inc_and_test
static __always_inline bool atomic_inc_and_test(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_inc_and_test(v);
}
+#endif
+#ifdef arch_atomic64_inc_and_test
+#define atomic64_inc_and_test atomic64_inc_and_test
static __always_inline bool atomic64_inc_and_test(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_inc_and_test(v);
}
+#endif
static __always_inline int atomic_add_return(int i, atomic_t *v)
{
@@ -325,152 +372,96 @@ static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v)
return arch_atomic64_fetch_xor(i, v);
}
+#ifdef arch_atomic_sub_and_test
+#define atomic_sub_and_test atomic_sub_and_test
static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_sub_and_test(i, v);
}
+#endif
+#ifdef arch_atomic64_sub_and_test
+#define atomic64_sub_and_test atomic64_sub_and_test
static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_sub_and_test(i, v);
}
+#endif
+#ifdef arch_atomic_add_negative
+#define atomic_add_negative atomic_add_negative
static __always_inline bool atomic_add_negative(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_add_negative(i, v);
}
+#endif
+#ifdef arch_atomic64_add_negative
+#define atomic64_add_negative atomic64_add_negative
static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_add_negative(i, v);
}
+#endif
-static __always_inline unsigned long
-cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new, int size)
-{
- kasan_check_write(ptr, size);
- switch (size) {
- case 1:
- return arch_cmpxchg((u8 *)ptr, (u8)old, (u8)new);
- case 2:
- return arch_cmpxchg((u16 *)ptr, (u16)old, (u16)new);
- case 4:
- return arch_cmpxchg((u32 *)ptr, (u32)old, (u32)new);
- case 8:
- BUILD_BUG_ON(sizeof(unsigned long) != 8);
- return arch_cmpxchg((u64 *)ptr, (u64)old, (u64)new);
- }
- BUILD_BUG();
- return 0;
-}
+#define xchg(ptr, new) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_xchg(__ai_ptr, (new)); \
+})
#define cmpxchg(ptr, old, new) \
({ \
- ((__typeof__(*(ptr)))cmpxchg_size((ptr), (unsigned long)(old), \
- (unsigned long)(new), sizeof(*(ptr)))); \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg(__ai_ptr, (old), (new)); \
})
-static __always_inline unsigned long
-sync_cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new,
- int size)
-{
- kasan_check_write(ptr, size);
- switch (size) {
- case 1:
- return arch_sync_cmpxchg((u8 *)ptr, (u8)old, (u8)new);
- case 2:
- return arch_sync_cmpxchg((u16 *)ptr, (u16)old, (u16)new);
- case 4:
- return arch_sync_cmpxchg((u32 *)ptr, (u32)old, (u32)new);
- case 8:
- BUILD_BUG_ON(sizeof(unsigned long) != 8);
- return arch_sync_cmpxchg((u64 *)ptr, (u64)old, (u64)new);
- }
- BUILD_BUG();
- return 0;
-}
-
#define sync_cmpxchg(ptr, old, new) \
({ \
- ((__typeof__(*(ptr)))sync_cmpxchg_size((ptr), \
- (unsigned long)(old), (unsigned long)(new), \
- sizeof(*(ptr)))); \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_sync_cmpxchg(__ai_ptr, (old), (new)); \
})
-static __always_inline unsigned long
-cmpxchg_local_size(volatile void *ptr, unsigned long old, unsigned long new,
- int size)
-{
- kasan_check_write(ptr, size);
- switch (size) {
- case 1:
- return arch_cmpxchg_local((u8 *)ptr, (u8)old, (u8)new);
- case 2:
- return arch_cmpxchg_local((u16 *)ptr, (u16)old, (u16)new);
- case 4:
- return arch_cmpxchg_local((u32 *)ptr, (u32)old, (u32)new);
- case 8:
- BUILD_BUG_ON(sizeof(unsigned long) != 8);
- return arch_cmpxchg_local((u64 *)ptr, (u64)old, (u64)new);
- }
- BUILD_BUG();
- return 0;
-}
-
#define cmpxchg_local(ptr, old, new) \
({ \
- ((__typeof__(*(ptr)))cmpxchg_local_size((ptr), \
- (unsigned long)(old), (unsigned long)(new), \
- sizeof(*(ptr)))); \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg_local(__ai_ptr, (old), (new)); \
})
-static __always_inline u64
-cmpxchg64_size(volatile u64 *ptr, u64 old, u64 new)
-{
- kasan_check_write(ptr, sizeof(*ptr));
- return arch_cmpxchg64(ptr, old, new);
-}
-
#define cmpxchg64(ptr, old, new) \
({ \
- ((__typeof__(*(ptr)))cmpxchg64_size((ptr), (u64)(old), \
- (u64)(new))); \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg64(__ai_ptr, (old), (new)); \
})
-static __always_inline u64
-cmpxchg64_local_size(volatile u64 *ptr, u64 old, u64 new)
-{
- kasan_check_write(ptr, sizeof(*ptr));
- return arch_cmpxchg64_local(ptr, old, new);
-}
-
#define cmpxchg64_local(ptr, old, new) \
({ \
- ((__typeof__(*(ptr)))cmpxchg64_local_size((ptr), (u64)(old), \
- (u64)(new))); \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg64_local(__ai_ptr, (old), (new)); \
})
-/*
- * Originally we had the following code here:
- * __typeof__(p1) ____p1 = (p1);
- * kasan_check_write(____p1, 2 * sizeof(*____p1));
- * arch_cmpxchg_double(____p1, (p2), (o1), (o2), (n1), (n2));
- * But it leads to compilation failures (see gcc issue 72873).
- * So for now it's left non-instrumented.
- * There are few callers of cmpxchg_double(), so it's not critical.
- */
#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
({ \
- arch_cmpxchg_double((p1), (p2), (o1), (o2), (n1), (n2)); \
+ typeof(p1) __ai_p1 = (p1); \
+ kasan_check_write(__ai_p1, 2 * sizeof(*__ai_p1)); \
+ arch_cmpxchg_double(__ai_p1, (p2), (o1), (o2), (n1), (n2)); \
})
-#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
-({ \
- arch_cmpxchg_double_local((p1), (p2), (o1), (o2), (n1), (n2)); \
+#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
+({ \
+ typeof(p1) __ai_p1 = (p1); \
+ kasan_check_write(__ai_p1, 2 * sizeof(*__ai_p1)); \
+ arch_cmpxchg_double_local(__ai_p1, (p2), (o1), (o2), (n1), (n2)); \
})
#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index abe6dd9ca2a8..13324aa828eb 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -186,11 +186,6 @@ ATOMIC_OP(xor, ^)
#include <linux/irqflags.h>
-static inline int atomic_add_negative(int i, atomic_t *v)
-{
- return atomic_add_return(i, v) < 0;
-}
-
static inline void atomic_add(int i, atomic_t *v)
{
atomic_add_return(i, v);
@@ -201,35 +196,7 @@ static inline void atomic_sub(int i, atomic_t *v)
atomic_sub_return(i, v);
}
-static inline void atomic_inc(atomic_t *v)
-{
- atomic_add_return(1, v);
-}
-
-static inline void atomic_dec(atomic_t *v)
-{
- atomic_sub_return(1, v);
-}
-
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-
-#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
-#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
-#ifndef __atomic_add_unless
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
- c = old;
- return c;
-}
-#endif
-
#endif /* __ASM_GENERIC_ATOMIC_H */
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index 8d28eb010d0d..97b28b7f1f29 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -11,6 +11,7 @@
*/
#ifndef _ASM_GENERIC_ATOMIC64_H
#define _ASM_GENERIC_ATOMIC64_H
+#include <linux/types.h>
typedef struct {
long long counter;
@@ -50,18 +51,10 @@ ATOMIC64_OPS(xor)
#undef ATOMIC64_OP
extern long long atomic64_dec_if_positive(atomic64_t *v);
+#define atomic64_dec_if_positive atomic64_dec_if_positive
extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
extern long long atomic64_xchg(atomic64_t *v, long long new);
-extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
-
-#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
-#define atomic64_inc(v) atomic64_add(1LL, (v))
-#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
-#define atomic64_dec(v) atomic64_sub(1LL, (v))
-#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
-#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
+extern long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u);
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
#endif /* _ASM_GENERIC_ATOMIC64_H */
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index 04deffaf5f7d..dd90c9792909 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -2,189 +2,67 @@
#ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_
#define _ASM_GENERIC_BITOPS_ATOMIC_H_
-#include <asm/types.h>
-#include <linux/irqflags.h>
-
-#ifdef CONFIG_SMP
-#include <asm/spinlock.h>
-#include <asm/cache.h> /* we use L1_CACHE_BYTES */
-
-/* Use an array of spinlocks for our atomic_ts.
- * Hash function to index into a different SPINLOCK.
- * Since "a" is usually an address, use one spinlock per cacheline.
- */
-# define ATOMIC_HASH_SIZE 4
-# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
-
-extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
-
-/* Can't use raw_spin_lock_irq because of #include problems, so
- * this is the substitute */
-#define _atomic_spin_lock_irqsave(l,f) do { \
- arch_spinlock_t *s = ATOMIC_HASH(l); \
- local_irq_save(f); \
- arch_spin_lock(s); \
-} while(0)
-
-#define _atomic_spin_unlock_irqrestore(l,f) do { \
- arch_spinlock_t *s = ATOMIC_HASH(l); \
- arch_spin_unlock(s); \
- local_irq_restore(f); \
-} while(0)
-
-
-#else
-# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
-# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
-#endif
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <asm/barrier.h>
/*
- * NMI events can occur at any time, including when interrupts have been
- * disabled by *_irqsave(). So you can get NMI events occurring while a
- * *_bit function is holding a spin lock. If the NMI handler also wants
- * to do bit manipulation (and they do) then you can get a deadlock
- * between the original caller of *_bit() and the NMI handler.
- *
- * by Keith Owens
+ * Implementation of atomic bitops using atomic-fetch ops.
+ * See Documentation/atomic_bitops.txt for details.
*/
-/**
- * set_bit - Atomically set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * This function is atomic and may not be reordered. See __set_bit()
- * if you do not require the atomic guarantees.
- *
- * Note: there are no guarantees that this function will not be reordered
- * on non x86 architectures, so if you are writing portable code,
- * make sure not to rely on its reordering guarantees.
- *
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static inline void set_bit(int nr, volatile unsigned long *addr)
+static inline void set_bit(unsigned int nr, volatile unsigned long *p)
{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long flags;
-
- _atomic_spin_lock_irqsave(p, flags);
- *p |= mask;
- _atomic_spin_unlock_irqrestore(p, flags);
+ p += BIT_WORD(nr);
+ atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
}
-/**
- * clear_bit - Clears a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * clear_bit() is atomic and may not be reordered. However, it does
- * not contain a memory barrier, so if it is used for locking purposes,
- * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
- * in order to ensure changes are visible on other processors.
- */
-static inline void clear_bit(int nr, volatile unsigned long *addr)
+static inline void clear_bit(unsigned int nr, volatile unsigned long *p)
{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long flags;
-
- _atomic_spin_lock_irqsave(p, flags);
- *p &= ~mask;
- _atomic_spin_unlock_irqrestore(p, flags);
+ p += BIT_WORD(nr);
+ atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
}
-/**
- * change_bit - Toggle a bit in memory
- * @nr: Bit to change
- * @addr: Address to start counting from
- *
- * change_bit() is atomic and may not be reordered. It may be
- * reordered on other architectures than x86.
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static inline void change_bit(int nr, volatile unsigned long *addr)
+static inline void change_bit(unsigned int nr, volatile unsigned long *p)
{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long flags;
-
- _atomic_spin_lock_irqsave(p, flags);
- *p ^= mask;
- _atomic_spin_unlock_irqrestore(p, flags);
+ p += BIT_WORD(nr);
+ atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
}
-/**
- * test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It may be reordered on other architectures than x86.
- * It also implies a memory barrier.
- */
-static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p)
{
+ long old;
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old;
- unsigned long flags;
- _atomic_spin_lock_irqsave(p, flags);
- old = *p;
- *p = old | mask;
- _atomic_spin_unlock_irqrestore(p, flags);
+ p += BIT_WORD(nr);
+ if (READ_ONCE(*p) & mask)
+ return 1;
- return (old & mask) != 0;
+ old = atomic_long_fetch_or(mask, (atomic_long_t *)p);
+ return !!(old & mask);
}
-/**
- * test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to clear
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It can be reorderdered on other architectures other than x86.
- * It also implies a memory barrier.
- */
-static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
{
+ long old;
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old;
- unsigned long flags;
- _atomic_spin_lock_irqsave(p, flags);
- old = *p;
- *p = old & ~mask;
- _atomic_spin_unlock_irqrestore(p, flags);
+ p += BIT_WORD(nr);
+ if (!(READ_ONCE(*p) & mask))
+ return 0;
- return (old & mask) != 0;
+ old = atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
+ return !!(old & mask);
}
-/**
- * test_and_change_bit - Change a bit and return its old value
- * @nr: Bit to change
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_change_bit(unsigned int nr, volatile unsigned long *p)
{
+ long old;
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old;
- unsigned long flags;
-
- _atomic_spin_lock_irqsave(p, flags);
- old = *p;
- *p = old ^ mask;
- _atomic_spin_unlock_irqrestore(p, flags);
- return (old & mask) != 0;
+ p += BIT_WORD(nr);
+ old = atomic_long_fetch_xor(mask, (atomic_long_t *)p);
+ return !!(old & mask);
}
#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */
diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
index 67ab280ad134..3ae021368f48 100644
--- a/include/asm-generic/bitops/lock.h
+++ b/include/asm-generic/bitops/lock.h
@@ -2,6 +2,10 @@
#ifndef _ASM_GENERIC_BITOPS_LOCK_H_
#define _ASM_GENERIC_BITOPS_LOCK_H_
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <asm/barrier.h>
+
/**
* test_and_set_bit_lock - Set a bit and return its old value, for lock
* @nr: Bit to set
@@ -11,7 +15,20 @@
* the returned value is 0.
* It can be used to implement bit locks.
*/
-#define test_and_set_bit_lock(nr, addr) test_and_set_bit(nr, addr)
+static inline int test_and_set_bit_lock(unsigned int nr,
+ volatile unsigned long *p)
+{
+ long old;
+ unsigned long mask = BIT_MASK(nr);
+
+ p += BIT_WORD(nr);
+ if (READ_ONCE(*p) & mask)
+ return 1;
+
+ old = atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
+ return !!(old & mask);
+}
+
/**
* clear_bit_unlock - Clear a bit in memory, for unlock
@@ -20,11 +37,11 @@
*
* This operation is atomic and provides release barrier semantics.
*/
-#define clear_bit_unlock(nr, addr) \
-do { \
- smp_mb__before_atomic(); \
- clear_bit(nr, addr); \
-} while (0)
+static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
+{
+ p += BIT_WORD(nr);
+ atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
+}
/**
* __clear_bit_unlock - Clear a bit in memory, for unlock
@@ -37,11 +54,38 @@ do { \
*
* See for example x86's implementation.
*/
-#define __clear_bit_unlock(nr, addr) \
-do { \
- smp_mb__before_atomic(); \
- clear_bit(nr, addr); \
-} while (0)
+static inline void __clear_bit_unlock(unsigned int nr,
+ volatile unsigned long *p)
+{
+ unsigned long old;
-#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
+ p += BIT_WORD(nr);
+ old = READ_ONCE(*p);
+ old &= ~BIT_MASK(nr);
+ atomic_long_set_release((atomic_long_t *)p, old);
+}
+
+/**
+ * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
+ * byte is negative, for unlock.
+ * @nr: the bit to clear
+ * @addr: the address to start counting from
+ *
+ * This is a bit of a one-trick-pony for the filemap code, which clears
+ * PG_locked and tests PG_waiters,
+ */
+#ifndef clear_bit_unlock_is_negative_byte
+static inline bool clear_bit_unlock_is_negative_byte(unsigned int nr,
+ volatile unsigned long *p)
+{
+ long old;
+ unsigned long mask = BIT_MASK(nr);
+
+ p += BIT_WORD(nr);
+ old = atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
+ return !!(old & BIT(7));
+}
+#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
+#endif
+#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 01ce3997cb42..1e8e88bdaf09 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -2,6 +2,8 @@
/* Atomic operations usable in machine independent code */
#ifndef _LINUX_ATOMIC_H
#define _LINUX_ATOMIC_H
+#include <linux/types.h>
+
#include <asm/atomic.h>
#include <asm/barrier.h>
@@ -36,40 +38,46 @@
* barriers on top of the relaxed variant. In the case where the relaxed
* variant is already fully ordered, no additional barriers are needed.
*
- * Besides, if an arch has a special barrier for acquire/release, it could
- * implement its own __atomic_op_* and use the same framework for building
- * variants
- *
- * If an architecture overrides __atomic_op_acquire() it will probably want
- * to define smp_mb__after_spinlock().
+ * If an architecture overrides __atomic_acquire_fence() it will probably
+ * want to define smp_mb__after_spinlock().
*/
-#ifndef __atomic_op_acquire
+#ifndef __atomic_acquire_fence
+#define __atomic_acquire_fence smp_mb__after_atomic
+#endif
+
+#ifndef __atomic_release_fence
+#define __atomic_release_fence smp_mb__before_atomic
+#endif
+
+#ifndef __atomic_pre_full_fence
+#define __atomic_pre_full_fence smp_mb__before_atomic
+#endif
+
+#ifndef __atomic_post_full_fence
+#define __atomic_post_full_fence smp_mb__after_atomic
+#endif
+
#define __atomic_op_acquire(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
- smp_mb__after_atomic(); \
+ __atomic_acquire_fence(); \
__ret; \
})
-#endif
-#ifndef __atomic_op_release
#define __atomic_op_release(op, args...) \
({ \
- smp_mb__before_atomic(); \
+ __atomic_release_fence(); \
op##_relaxed(args); \
})
-#endif
-#ifndef __atomic_op_fence
#define __atomic_op_fence(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret; \
- smp_mb__before_atomic(); \
+ __atomic_pre_full_fence(); \
__ret = op##_relaxed(args); \
- smp_mb__after_atomic(); \
+ __atomic_post_full_fence(); \
__ret; \
})
-#endif
/* atomic_add_return_relaxed */
#ifndef atomic_add_return_relaxed
@@ -95,11 +103,23 @@
#endif
#endif /* atomic_add_return_relaxed */
+#ifndef atomic_inc
+#define atomic_inc(v) atomic_add(1, (v))
+#endif
+
/* atomic_inc_return_relaxed */
#ifndef atomic_inc_return_relaxed
+
+#ifndef atomic_inc_return
+#define atomic_inc_return(v) atomic_add_return(1, (v))
+#define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v))
+#define atomic_inc_return_acquire(v) atomic_add_return_acquire(1, (v))
+#define atomic_inc_return_release(v) atomic_add_return_release(1, (v))
+#else /* atomic_inc_return */
#define atomic_inc_return_relaxed atomic_inc_return
#define atomic_inc_return_acquire atomic_inc_return
#define atomic_inc_return_release atomic_inc_return
+#endif /* atomic_inc_return */
#else /* atomic_inc_return_relaxed */
@@ -143,11 +163,23 @@
#endif
#endif /* atomic_sub_return_relaxed */
+#ifndef atomic_dec
+#define atomic_dec(v) atomic_sub(1, (v))
+#endif
+
/* atomic_dec_return_relaxed */
#ifndef atomic_dec_return_relaxed
+
+#ifndef atomic_dec_return
+#define atomic_dec_return(v) atomic_sub_return(1, (v))
+#define atomic_dec_return_relaxed(v) atomic_sub_return_relaxed(1, (v))
+#define atomic_dec_return_acquire(v) atomic_sub_return_acquire(1, (v))
+#define atomic_dec_return_release(v) atomic_sub_return_release(1, (v))
+#else /* atomic_dec_return */
#define atomic_dec_return_relaxed atomic_dec_return
#define atomic_dec_return_acquire atomic_dec_return
#define atomic_dec_return_release atomic_dec_return
+#endif /* atomic_dec_return */
#else /* atomic_dec_return_relaxed */
@@ -328,12 +360,22 @@
#endif
#endif /* atomic_fetch_and_relaxed */
-#ifdef atomic_andnot
-/* atomic_fetch_andnot_relaxed */
+#ifndef atomic_andnot
+#define atomic_andnot(i, v) atomic_and(~(int)(i), (v))
+#endif
+
#ifndef atomic_fetch_andnot_relaxed
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot
-#define atomic_fetch_andnot_release atomic_fetch_andnot
+
+#ifndef atomic_fetch_andnot
+#define atomic_fetch_andnot(i, v) atomic_fetch_and(~(int)(i), (v))
+#define atomic_fetch_andnot_relaxed(i, v) atomic_fetch_and_relaxed(~(int)(i), (v))
+#define atomic_fetch_andnot_acquire(i, v) atomic_fetch_and_acquire(~(int)(i), (v))
+#define atomic_fetch_andnot_release(i, v) atomic_fetch_and_release(~(int)(i), (v))
+#else /* atomic_fetch_andnot */
+#define atomic_fetch_andnot_relaxed atomic_fetch_andnot
+#define atomic_fetch_andnot_acquire atomic_fetch_andnot
+#define atomic_fetch_andnot_release atomic_fetch_andnot
+#endif /* atomic_fetch_andnot */
#else /* atomic_fetch_andnot_relaxed */
@@ -352,7 +394,6 @@
__atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__)
#endif
#endif /* atomic_fetch_andnot_relaxed */
-#endif /* atomic_andnot */
/* atomic_fetch_xor_relaxed */
#ifndef atomic_fetch_xor_relaxed
@@ -520,112 +561,140 @@
#endif /* xchg_relaxed */
/**
+ * atomic_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns the original value of @v.
+ */
+#ifndef atomic_fetch_add_unless
+static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+ int c = atomic_read(v);
+
+ do {
+ if (unlikely(c == u))
+ break;
+ } while (!atomic_try_cmpxchg(v, &c, c + a));
+
+ return c;
+}
+#endif
+
+/**
* atomic_add_unless - add unless the number is already a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
*/
-static inline int atomic_add_unless(atomic_t *v, int a, int u)
+static inline bool atomic_add_unless(atomic_t *v, int a, int u)
{
- return __atomic_add_unless(v, a, u) != u;
+ return atomic_fetch_add_unless(v, a, u) != u;
}
/**
* atomic_inc_not_zero - increment unless the number is zero
* @v: pointer of type atomic_t
*
- * Atomically increments @v by 1, so long as @v is non-zero.
- * Returns non-zero if @v was non-zero, and zero otherwise.
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
*/
#ifndef atomic_inc_not_zero
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#endif
-#ifndef atomic_andnot
-static inline void atomic_andnot(int i, atomic_t *v)
-{
- atomic_and(~i, v);
-}
-
-static inline int atomic_fetch_andnot(int i, atomic_t *v)
-{
- return atomic_fetch_and(~i, v);
-}
-
-static inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v)
+/**
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#ifndef atomic_inc_and_test
+static inline bool atomic_inc_and_test(atomic_t *v)
{
- return atomic_fetch_and_relaxed(~i, v);
+ return atomic_inc_return(v) == 0;
}
+#endif
-static inline int atomic_fetch_andnot_acquire(int i, atomic_t *v)
+/**
+ * atomic_dec_and_test - decrement and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+#ifndef atomic_dec_and_test
+static inline bool atomic_dec_and_test(atomic_t *v)
{
- return atomic_fetch_and_acquire(~i, v);
+ return atomic_dec_return(v) == 0;
}
+#endif
-static inline int atomic_fetch_andnot_release(int i, atomic_t *v)
+/**
+ * atomic_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+#ifndef atomic_sub_and_test
+static inline bool atomic_sub_and_test(int i, atomic_t *v)
{
- return atomic_fetch_and_release(~i, v);
+ return atomic_sub_return(i, v) == 0;
}
#endif
/**
- * atomic_inc_not_zero_hint - increment if not null
+ * atomic_add_negative - add and test if negative
+ * @i: integer value to add
* @v: pointer of type atomic_t
- * @hint: probable value of the atomic before the increment
- *
- * This version of atomic_inc_not_zero() gives a hint of probable
- * value of the atomic. This helps processor to not read the memory
- * before doing the atomic read/modify/write cycle, lowering
- * number of bus transactions on some arches.
*
- * Returns: 0 if increment was not done, 1 otherwise.
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
*/
-#ifndef atomic_inc_not_zero_hint
-static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
+#ifndef atomic_add_negative
+static inline bool atomic_add_negative(int i, atomic_t *v)
{
- int val, c = hint;
-
- /* sanity test, should be removed by compiler if hint is a constant */
- if (!hint)
- return atomic_inc_not_zero(v);
-
- do {
- val = atomic_cmpxchg(v, c, c + 1);
- if (val == c)
- return 1;
- c = val;
- } while (c);
-
- return 0;
+ return atomic_add_return(i, v) < 0;
}
#endif
#ifndef atomic_inc_unless_negative
-static inline int atomic_inc_unless_negative(atomic_t *p)
+static inline bool atomic_inc_unless_negative(atomic_t *v)
{
- int v, v1;
- for (v = 0; v >= 0; v = v1) {
- v1 = atomic_cmpxchg(p, v, v + 1);
- if (likely(v1 == v))
- return 1;
- }
- return 0;
+ int c = atomic_read(v);
+
+ do {
+ if (unlikely(c < 0))
+ return false;
+ } while (!atomic_try_cmpxchg(v, &c, c + 1));
+
+ return true;
}
#endif
#ifndef atomic_dec_unless_positive
-static inline int atomic_dec_unless_positive(atomic_t *p)
+static inline bool atomic_dec_unless_positive(atomic_t *v)
{
- int v, v1;
- for (v = 0; v <= 0; v = v1) {
- v1 = atomic_cmpxchg(p, v, v - 1);
- if (likely(v1 == v))
- return 1;
- }
- return 0;
+ int c = atomic_read(v);
+
+ do {
+ if (unlikely(c > 0))
+ return false;
+ } while (!atomic_try_cmpxchg(v, &c, c - 1));
+
+ return true;
}
#endif
@@ -639,17 +708,14 @@ static inline int atomic_dec_unless_positive(atomic_t *p)
#ifndef atomic_dec_if_positive
static inline int atomic_dec_if_positive(atomic_t *v)
{
- int c, old, dec;
- c = atomic_read(v);
- for (;;) {
+ int dec, c = atomic_read(v);
+
+ do {
dec = c - 1;
if (unlikely(dec < 0))
break;
- old = atomic_cmpxchg((v), c, dec);
- if (likely(old == c))
- break;
- c = old;
- }
+ } while (!atomic_try_cmpxchg(v, &c, dec));
+
return dec;
}
#endif
@@ -693,11 +759,23 @@ static inline int atomic_dec_if_positive(atomic_t *v)
#endif
#endif /* atomic64_add_return_relaxed */
+#ifndef atomic64_inc
+#define atomic64_inc(v) atomic64_add(1, (v))
+#endif
+
/* atomic64_inc_return_relaxed */
#ifndef atomic64_inc_return_relaxed
+
+#ifndef atomic64_inc_return
+#define atomic64_inc_return(v) atomic64_add_return(1, (v))
+#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v))
+#define atomic64_inc_return_acquire(v) atomic64_add_return_acquire(1, (v))
+#define atomic64_inc_return_release(v) atomic64_add_return_release(1, (v))
+#else /* atomic64_inc_return */
#define atomic64_inc_return_relaxed atomic64_inc_return
#define atomic64_inc_return_acquire atomic64_inc_return
#define atomic64_inc_return_release atomic64_inc_return
+#endif /* atomic64_inc_return */
#else /* atomic64_inc_return_relaxed */
@@ -742,11 +820,23 @@ static inline int atomic_dec_if_positive(atomic_t *v)
#endif
#endif /* atomic64_sub_return_relaxed */
+#ifndef atomic64_dec
+#define atomic64_dec(v) atomic64_sub(1, (v))
+#endif
+
/* atomic64_dec_return_relaxed */
#ifndef atomic64_dec_return_relaxed
+
+#ifndef atomic64_dec_return
+#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
+#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v))
+#define atomic64_dec_return_acquire(v) atomic64_sub_return_acquire(1, (v))
+#define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v))
+#else /* atomic64_dec_return */
#define atomic64_dec_return_relaxed atomic64_dec_return
#define atomic64_dec_return_acquire atomic64_dec_return
#define atomic64_dec_return_release atomic64_dec_return
+#endif /* atomic64_dec_return */
#else /* atomic64_dec_return_relaxed */
@@ -927,12 +1017,22 @@ static inline int atomic_dec_if_positive(atomic_t *v)
#endif
#endif /* atomic64_fetch_and_relaxed */
-#ifdef atomic64_andnot
-/* atomic64_fetch_andnot_relaxed */
+#ifndef atomic64_andnot
+#define atomic64_andnot(i, v) atomic64_and(~(long long)(i), (v))
+#endif
+
#ifndef atomic64_fetch_andnot_relaxed
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot
+
+#ifndef atomic64_fetch_andnot
+#define atomic64_fetch_andnot(i, v) atomic64_fetch_and(~(long long)(i), (v))
+#define atomic64_fetch_andnot_relaxed(i, v) atomic64_fetch_and_relaxed(~(long long)(i), (v))
+#define atomic64_fetch_andnot_acquire(i, v) atomic64_fetch_and_acquire(~(long long)(i), (v))
+#define atomic64_fetch_andnot_release(i, v) atomic64_fetch_and_release(~(long long)(i), (v))
+#else /* atomic64_fetch_andnot */
+#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot
+#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot
+#define atomic64_fetch_andnot_release atomic64_fetch_andnot
+#endif /* atomic64_fetch_andnot */
#else /* atomic64_fetch_andnot_relaxed */
@@ -951,7 +1051,6 @@ static inline int atomic_dec_if_positive(atomic_t *v)
__atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__)
#endif
#endif /* atomic64_fetch_andnot_relaxed */
-#endif /* atomic64_andnot */
/* atomic64_fetch_xor_relaxed */
#ifndef atomic64_fetch_xor_relaxed
@@ -1049,30 +1148,164 @@ static inline int atomic_dec_if_positive(atomic_t *v)
#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg
#endif /* atomic64_try_cmpxchg */
-#ifndef atomic64_andnot
-static inline void atomic64_andnot(long long i, atomic64_t *v)
+/**
+ * atomic64_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns the original value of @v.
+ */
+#ifndef atomic64_fetch_add_unless
+static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
+ long long u)
{
- atomic64_and(~i, v);
+ long long c = atomic64_read(v);
+
+ do {
+ if (unlikely(c == u))
+ break;
+ } while (!atomic64_try_cmpxchg(v, &c, c + a));
+
+ return c;
}
+#endif
-static inline long long atomic64_fetch_andnot(long long i, atomic64_t *v)
+/**
+ * atomic64_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
+ */
+static inline bool atomic64_add_unless(atomic64_t *v, long long a, long long u)
{
- return atomic64_fetch_and(~i, v);
+ return atomic64_fetch_add_unless(v, a, u) != u;
}
-static inline long long atomic64_fetch_andnot_relaxed(long long i, atomic64_t *v)
+/**
+ * atomic64_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
+ */
+#ifndef atomic64_inc_not_zero
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+#endif
+
+/**
+ * atomic64_inc_and_test - increment and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#ifndef atomic64_inc_and_test
+static inline bool atomic64_inc_and_test(atomic64_t *v)
{
- return atomic64_fetch_and_relaxed(~i, v);
+ return atomic64_inc_return(v) == 0;
}
+#endif
-static inline long long atomic64_fetch_andnot_acquire(long long i, atomic64_t *v)
+/**
+ * atomic64_dec_and_test - decrement and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+#ifndef atomic64_dec_and_test
+static inline bool atomic64_dec_and_test(atomic64_t *v)
{
- return atomic64_fetch_and_acquire(~i, v);
+ return atomic64_dec_return(v) == 0;
}
+#endif
-static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v)
+/**
+ * atomic64_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+#ifndef atomic64_sub_and_test
+static inline bool atomic64_sub_and_test(long long i, atomic64_t *v)
+{
+ return atomic64_sub_return(i, v) == 0;
+}
+#endif
+
+/**
+ * atomic64_add_negative - add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+#ifndef atomic64_add_negative
+static inline bool atomic64_add_negative(long long i, atomic64_t *v)
{
- return atomic64_fetch_and_release(~i, v);
+ return atomic64_add_return(i, v) < 0;
+}
+#endif
+
+#ifndef atomic64_inc_unless_negative
+static inline bool atomic64_inc_unless_negative(atomic64_t *v)
+{
+ long long c = atomic64_read(v);
+
+ do {
+ if (unlikely(c < 0))
+ return false;
+ } while (!atomic64_try_cmpxchg(v, &c, c + 1));
+
+ return true;
+}
+#endif
+
+#ifndef atomic64_dec_unless_positive
+static inline bool atomic64_dec_unless_positive(atomic64_t *v)
+{
+ long long c = atomic64_read(v);
+
+ do {
+ if (unlikely(c > 0))
+ return false;
+ } while (!atomic64_try_cmpxchg(v, &c, c - 1));
+
+ return true;
+}
+#endif
+
+/*
+ * atomic64_dec_if_positive - decrement by 1 if old value positive
+ * @v: pointer of type atomic64_t
+ *
+ * The function returns the old value of *v minus 1, even if
+ * the atomic64 variable, v, was not decremented.
+ */
+#ifndef atomic64_dec_if_positive
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
+{
+ long long dec, c = atomic64_read(v);
+
+ do {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ } while (!atomic64_try_cmpxchg(v, &c, dec));
+
+ return dec;
}
#endif
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 4cac4e1a72ff..af419012d77d 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -2,29 +2,9 @@
#ifndef _LINUX_BITOPS_H
#define _LINUX_BITOPS_H
#include <asm/types.h>
+#include <linux/bits.h>
-#ifdef __KERNEL__
-#define BIT(nr) (1UL << (nr))
-#define BIT_ULL(nr) (1ULL << (nr))
-#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
-#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
-#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
-#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
-#define BITS_PER_BYTE 8
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
-#endif
-
-/*
- * Create a contiguous bitmask starting at bit position @l and ending at
- * position @h. For example
- * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
- */
-#define GENMASK(h, l) \
- (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
-
-#define GENMASK_ULL(h, l) \
- (((~0ULL) - (1ULL << (l)) + 1) & \
- (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
diff --git a/include/linux/bits.h b/include/linux/bits.h
new file mode 100644
index 000000000000..2b7b532c1d51
--- /dev/null
+++ b/include/linux/bits.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_BITS_H
+#define __LINUX_BITS_H
+#include <asm/bitsperlong.h>
+
+#define BIT(nr) (1UL << (nr))
+#define BIT_ULL(nr) (1ULL << (nr))
+#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
+#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
+#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
+#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
+#define BITS_PER_BYTE 8
+
+/*
+ * Create a contiguous bitmask starting at bit position @l and ending at
+ * position @h. For example
+ * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#define GENMASK(h, l) \
+ (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) - (1ULL << (l)) + 1) & \
+ (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+
+#endif /* __LINUX_BITS_H */
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index a685da2c4522..e28cce21bad6 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -3,9 +3,10 @@
#define _LINUX_REFCOUNT_H
#include <linux/atomic.h>
-#include <linux/mutex.h>
-#include <linux/spinlock.h>
-#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/spinlock_types.h>
+
+struct mutex;
/**
* struct refcount_t - variant of atomic_t specialized for reference counts
@@ -42,17 +43,30 @@ static inline unsigned int refcount_read(const refcount_t *r)
return atomic_read(&r->refs);
}
+extern __must_check bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r);
+extern void refcount_add_checked(unsigned int i, refcount_t *r);
+
+extern __must_check bool refcount_inc_not_zero_checked(refcount_t *r);
+extern void refcount_inc_checked(refcount_t *r);
+
+extern __must_check bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r);
+
+extern __must_check bool refcount_dec_and_test_checked(refcount_t *r);
+extern void refcount_dec_checked(refcount_t *r);
+
#ifdef CONFIG_REFCOUNT_FULL
-extern __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r);
-extern void refcount_add(unsigned int i, refcount_t *r);
-extern __must_check bool refcount_inc_not_zero(refcount_t *r);
-extern void refcount_inc(refcount_t *r);
+#define refcount_add_not_zero refcount_add_not_zero_checked
+#define refcount_add refcount_add_checked
+
+#define refcount_inc_not_zero refcount_inc_not_zero_checked
+#define refcount_inc refcount_inc_checked
+
+#define refcount_sub_and_test refcount_sub_and_test_checked
-extern __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r);
+#define refcount_dec_and_test refcount_dec_and_test_checked
+#define refcount_dec refcount_dec_checked
-extern __must_check bool refcount_dec_and_test(refcount_t *r);
-extern void refcount_dec(refcount_t *r);
#else
# ifdef CONFIG_ARCH_HAS_REFCOUNT
# include <asm/refcount.h>
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e0f4f56c9310..dac5086e3815 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -167,8 +167,8 @@ struct task_group;
* need_sleep = false;
* wake_up_state(p, TASK_UNINTERRUPTIBLE);
*
- * Where wake_up_state() (and all other wakeup primitives) imply enough
- * barriers to order the store of the variable against wakeup.
+ * where wake_up_state() executes a full memory barrier before accessing the
+ * task state.
*
* Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
* once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index fd57888d4942..3190997df9ca 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -114,29 +114,48 @@ do { \
#endif /*arch_spin_is_contended*/
/*
- * This barrier must provide two things:
+ * smp_mb__after_spinlock() provides the equivalent of a full memory barrier
+ * between program-order earlier lock acquisitions and program-order later
+ * memory accesses.
*
- * - it must guarantee a STORE before the spin_lock() is ordered against a
- * LOAD after it, see the comments at its two usage sites.
+ * This guarantees that the following two properties hold:
*
- * - it must ensure the critical section is RCsc.
+ * 1) Given the snippet:
*
- * The latter is important for cases where we observe values written by other
- * CPUs in spin-loops, without barriers, while being subject to scheduling.
+ * { X = 0; Y = 0; }
*
- * CPU0 CPU1 CPU2
+ * CPU0 CPU1
*
- * for (;;) {
- * if (READ_ONCE(X))
- * break;
- * }
- * X=1
- * <sched-out>
- * <sched-in>
- * r = X;
+ * WRITE_ONCE(X, 1); WRITE_ONCE(Y, 1);
+ * spin_lock(S); smp_mb();
+ * smp_mb__after_spinlock(); r1 = READ_ONCE(X);
+ * r0 = READ_ONCE(Y);
+ * spin_unlock(S);
*
- * without transitivity it could be that CPU1 observes X!=0 breaks the loop,
- * we get migrated and CPU2 sees X==0.
+ * it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0)
+ * and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments
+ * preceding the call to smp_mb__after_spinlock() in __schedule() and in
+ * try_to_wake_up().
+ *
+ * 2) Given the snippet:
+ *
+ * { X = 0; Y = 0; }
+ *
+ * CPU0 CPU1 CPU2
+ *
+ * spin_lock(S); spin_lock(S); r1 = READ_ONCE(Y);
+ * WRITE_ONCE(X, 1); smp_mb__after_spinlock(); smp_rmb();
+ * spin_unlock(S); r0 = READ_ONCE(X); r2 = READ_ONCE(X);
+ * WRITE_ONCE(Y, 1);
+ * spin_unlock(S);
+ *
+ * it is forbidden that CPU0's critical section executes before CPU1's
+ * critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1)
+ * and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments
+ * preceding the calls to smp_rmb() in try_to_wake_up() for similar
+ * snippets but "projected" onto two CPUs.
+ *
+ * Property (2) upgrades the lock to an RCsc lock.
*
* Since most load-store architectures implement ACQUIRE with an smp_mb() after
* the LL/SC loop, they need no further barriers. Similarly all our TSO