aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-05-09 12:05:46 -0700
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2017-06-08 18:52:37 -0700
commitbf32c76540257f9f5f2cf661dbdd8bb4a4bd8c82 (patch)
treeb18a1c7394024abc97c2d778125e9c471b7fce1e /kernel/rcu/tree.h
parentrcu: Refactor #includes from include/linux/rcupdate.h (diff)
downloadlinux-dev-bf32c76540257f9f5f2cf661dbdd8bb4a4bd8c82.tar.xz
linux-dev-bf32c76540257f9f5f2cf661dbdd8bb4a4bd8c82.zip
rcu: Convert rnp->lock wrappers to macros for SRCU use
Use of smp_mb__after_unlock_lock() would allow SRCU to omit a full memory barrier during callback execution, so this commit converts raw_spin_lock_rcu_node() from inline functions to type-generic macros to allow them to handle locks in srcu_node structures as well as rcu_node structures. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree.h')
-rw-r--r--kernel/rcu/tree.h47
1 files changed, 21 insertions, 26 deletions
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index ddfa34d020ba..a7f63f1074b4 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -580,27 +580,22 @@ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
* As ->lock of struct rcu_node is a __private field, therefore one should use
* these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
*/
-static inline void raw_spin_lock_rcu_node(struct rcu_node *rnp)
-{
- raw_spin_lock(&ACCESS_PRIVATE(rnp, lock));
- smp_mb__after_unlock_lock();
-}
+#define raw_spin_lock_rcu_node(p) \
+do { \
+ raw_spin_lock(&ACCESS_PRIVATE(p, lock)); \
+ smp_mb__after_unlock_lock(); \
+} while (0)
-static inline void raw_spin_unlock_rcu_node(struct rcu_node *rnp)
-{
- raw_spin_unlock(&ACCESS_PRIVATE(rnp, lock));
-}
+#define raw_spin_unlock_rcu_node(p) raw_spin_unlock(&ACCESS_PRIVATE(p, lock))
-static inline void raw_spin_lock_irq_rcu_node(struct rcu_node *rnp)
-{
- raw_spin_lock_irq(&ACCESS_PRIVATE(rnp, lock));
- smp_mb__after_unlock_lock();
-}
+#define raw_spin_lock_irq_rcu_node(p) \
+do { \
+ raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
+ smp_mb__after_unlock_lock(); \
+} while (0)
-static inline void raw_spin_unlock_irq_rcu_node(struct rcu_node *rnp)
-{
- raw_spin_unlock_irq(&ACCESS_PRIVATE(rnp, lock));
-}
+#define raw_spin_unlock_irq_rcu_node(p) \
+ raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
#define raw_spin_lock_irqsave_rcu_node(rnp, flags) \
do { \
@@ -615,11 +610,11 @@ do { \
raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(rnp, lock), flags); \
} while (0)
-static inline bool raw_spin_trylock_rcu_node(struct rcu_node *rnp)
-{
- bool locked = raw_spin_trylock(&ACCESS_PRIVATE(rnp, lock));
-
- if (locked)
- smp_mb__after_unlock_lock();
- return locked;
-}
+#define raw_spin_trylock_rcu_node(p) \
+({ \
+ bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock)); \
+ \
+ if (___locked) \
+ smp_mb__after_unlock_lock(); \
+ ___locked; \
+})