aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBoqun Feng <boqun.feng@gmail.com>2015-12-15 22:24:14 +0800
committerMichael Ellerman <mpe@ellerman.id.au>2016-02-18 00:11:02 +1100
commite1ab7f39d7e0dbfbdefe148be3ae4ee121e47ecc (patch)
tree136a0781bf5788d595c85b887deef198ccaae789
parentMAINTAINERS: Update EEH details and maintainership (diff)
downloadlinux-dev-e1ab7f39d7e0dbfbdefe148be3ae4ee121e47ecc.tar.xz
linux-dev-e1ab7f39d7e0dbfbdefe148be3ae4ee121e47ecc.zip
atomics: Allow architectures to define their own __atomic_op_* helpers
Some architectures may have their special barriers for acquire, release and fence semantics, so that general memory barriers(smp_mb__*_atomic()) in the default __atomic_op_*() may be too strong, so allow architectures to define their own helpers which can overwrite the default helpers. Signed-off-by: Boqun Feng <boqun.feng@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--include/linux/atomic.h10
1 files changed, 10 insertions, 0 deletions
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 301de78d65f7..5f3ee5a60a81 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -34,20 +34,29 @@
* The idea here is to build acquire/release variants by adding explicit
* barriers on top of the relaxed variant. In the case where the relaxed
* variant is already fully ordered, no additional barriers are needed.
+ *
+ * Besides, if an arch has a special barrier for acquire/release, it could
+ * implement its own __atomic_op_* and use the same framework for building
+ * variants
*/
+#ifndef __atomic_op_acquire
#define __atomic_op_acquire(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
smp_mb__after_atomic(); \
__ret; \
})
+#endif
+#ifndef __atomic_op_release
#define __atomic_op_release(op, args...) \
({ \
smp_mb__before_atomic(); \
op##_relaxed(args); \
})
+#endif
+#ifndef __atomic_op_fence
#define __atomic_op_fence(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret; \
@@ -56,6 +65,7 @@
smp_mb__after_atomic(); \
__ret; \
})
+#endif
/* atomic_add_return_relaxed */
#ifndef atomic_add_return_relaxed