aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-05-01 11:07:23 -0700
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-07-12 15:38:42 -0700
commitab5e869c1f7aa30a1210f5e8a277758b0599609f (patch)
tree2bb9514ce8374622c314371cfeaaf9ed82b9f079 /kernel/rcu
parentrcu: Move from ->need_future_gp[] to ->gp_seq_needed (diff)
downloadlinux-dev-ab5e869c1f7aa30a1210f5e8a277758b0599609f.tar.xz
linux-dev-ab5e869c1f7aa30a1210f5e8a277758b0599609f.zip
rcu: Make rcu_nocb_wait_gp() check if GP already requested
This commit makes rcu_nocb_wait_gp() check rdp->gp_seq_needed to see if the current CPU already knows about the needed grace period having already been requested. If so, it avoids acquiring the corresponding leaf rcu_node structure's ->lock, thus decreasing contention. This optimization is intended for cases where either multiple leader rcuo kthreads are running on the same CPU or these kthreads are running on a non-offloaded (e.g., housekeeping) CPU. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> [ paulmck: Move lock release past "if" as suggested by Joel Fernandes. ] [ paulmck: Fix caching of furthest-future requested grace period. ]
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tree.c5
-rw-r--r--kernel/rcu/tree_plugin.h15
2 files changed, 15 insertions, 5 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 1ede51690e4a..4826598867c3 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1618,6 +1618,11 @@ static bool rcu_start_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum), TPS("newreq"));
ret = true; /* Caller must wake GP kthread. */
unlock_out:
+ /* Push furthest requested GP to leaf node and rcu_data structure. */
+ if (ULONG_CMP_LT(c, rnp_root->gp_seq_needed)) {
+ rnp->gp_seq_needed = rnp_root->gp_seq_needed;
+ rdp->gp_seq_needed = rnp_root->gp_seq_needed;
+ }
if (rnp != rnp_root)
raw_spin_unlock_rcu_node(rnp_root);
return ret;
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index f4a88e3c388d..ca73931f7b30 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -2104,12 +2104,17 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
bool needwake;
struct rcu_node *rnp = rdp->mynode;
- raw_spin_lock_irqsave_rcu_node(rnp, flags);
+ local_irq_save(flags);
c = rcu_seq_snap(&rdp->rsp->gp_seq);
- needwake = rcu_start_this_gp(rnp, rdp, c);
- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
- if (needwake)
- rcu_gp_kthread_wake(rdp->rsp);
+ if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
+ local_irq_restore(flags);
+ } else {
+ raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
+ needwake = rcu_start_this_gp(rnp, rdp, c);
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ if (needwake)
+ rcu_gp_kthread_wake(rdp->rsp);
+ }
/*
* Wait for the grace period. Do so interruptibly to avoid messing