aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic/bitops/atomic.h
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2021-07-13 11:52:53 +0100
committerPeter Zijlstra <peterz@infradead.org>2021-07-16 18:46:45 +0200
commitcf3ee3c8c29dc349b2cf52e5e72e8cb805ff5e57 (patch)
tree3a341d32735ffce29fa67bc5bd0f84eb7fe2423d /include/asm-generic/bitops/atomic.h
parentlocking/atomic: add arch_atomic_long*() (diff)
downloadlinux-dev-cf3ee3c8c29dc349b2cf52e5e72e8cb805ff5e57.tar.xz
linux-dev-cf3ee3c8c29dc349b2cf52e5e72e8cb805ff5e57.zip
locking/atomic: add generic arch_*() bitops
Now that all architectures provide arch_atomic_long_*(), we can implement the generic bitops atop these rather than atop atomic_long_*(), and provide arch_*() forms of the bitops that are safe to use in noinstr code. Now that all architectures provide arch_atomic_long_*(), we can build the generic arch_*() bitops atop these, which can be safely used in noinstr code. The regular bitop wrappers are built atop these. As the generic non-atomic bitops use plain accesses, these will be implicitly instrumented unless they are inlined into noinstr functions (which is similar to arch_atomic*_read() when based on READ_ONCE()). The wrappers are modified so that where the underlying arch_*() function uses a plain access, no explicit instrumentation is added, as this is redundant and could result in confusing reports. Since function prototypes get excessively long with both an `arch_` prefix and `__always_inline` attribute, the return type and function attributes have been split onto a separate line, matching the style of the generated atomic headers. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20210713105253.7615-6-mark.rutland@arm.com
Diffstat (limited to 'include/asm-generic/bitops/atomic.h')
-rw-r--r--include/asm-generic/bitops/atomic.h32
1 files changed, 20 insertions, 12 deletions
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index 0e7316a86240..3096f086b5a3 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -11,25 +11,29 @@
* See Documentation/atomic_bitops.txt for details.
*/
-static __always_inline void set_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void
+arch_set_bit(unsigned int nr, volatile unsigned long *p)
{
p += BIT_WORD(nr);
- atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
+ arch_atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
}
-static __always_inline void clear_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void
+arch_clear_bit(unsigned int nr, volatile unsigned long *p)
{
p += BIT_WORD(nr);
- atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
+ arch_atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
}
-static __always_inline void change_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void
+arch_change_bit(unsigned int nr, volatile unsigned long *p)
{
p += BIT_WORD(nr);
- atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
+ arch_atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
}
-static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline int
+arch_test_and_set_bit(unsigned int nr, volatile unsigned long *p)
{
long old;
unsigned long mask = BIT_MASK(nr);
@@ -38,11 +42,12 @@ static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p)
if (READ_ONCE(*p) & mask)
return 1;
- old = atomic_long_fetch_or(mask, (atomic_long_t *)p);
+ old = arch_atomic_long_fetch_or(mask, (atomic_long_t *)p);
return !!(old & mask);
}
-static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline int
+arch_test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
{
long old;
unsigned long mask = BIT_MASK(nr);
@@ -51,18 +56,21 @@ static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
if (!(READ_ONCE(*p) & mask))
return 0;
- old = atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
+ old = arch_atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
return !!(old & mask);
}
-static inline int test_and_change_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline int
+arch_test_and_change_bit(unsigned int nr, volatile unsigned long *p)
{
long old;
unsigned long mask = BIT_MASK(nr);
p += BIT_WORD(nr);
- old = atomic_long_fetch_xor(mask, (atomic_long_t *)p);
+ old = arch_atomic_long_fetch_xor(mask, (atomic_long_t *)p);
return !!(old & mask);
}
+#include <asm-generic/bitops/instrumented-atomic.h>
+
#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */