aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.ibm.com>2019-07-15 01:09:04 -0700
committerPaul E. McKenney <paulmck@linux.ibm.com>2019-08-13 14:38:24 -0700
commit1d5a81c18dc68fc38a52e8dab1992a043a358927 (patch)
tree348ced2b4230c636b203b7213bf6b7a12226d4b9 /kernel/rcu
parentrcu/nocb: Advance CBs after merge in rcutree_migrate_callbacks() (diff)
downloadlinux-dev-1d5a81c18dc68fc38a52e8dab1992a043a358927.tar.xz
linux-dev-1d5a81c18dc68fc38a52e8dab1992a043a358927.zip
rcu/nocb: Reduce nocb_cb_wait() leaf rcu_node ->lock contention
Currently, nocb_cb_wait() advances callbacks on each pass through its loop, though only if it succeeds in conditionally acquiring its leaf rcu_node structure's ->lock. Despite the conditional acquisition of ->lock, this does increase contention. This commit therefore avoids advancing callbacks unless there are callbacks in ->cblist whose grace period has completed. Note that nocb_cb_wait() doesn't worry about callbacks that have not yet been assigned a grace period. The idea is that the only reason for nocb_cb_wait() to advance callbacks is to allow it to continue invoking callbacks. Time will tell whether this is the correct choice. Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tree_plugin.h5
1 files changed, 4 insertions, 1 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 4b59ef1cbc8b..f6f23a16bd64 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -2079,6 +2079,7 @@ static int rcu_nocb_gp_kthread(void *arg)
*/
static void nocb_cb_wait(struct rcu_data *rdp)
{
+ unsigned long cur_gp_seq;
unsigned long flags;
bool needwake_gp = false;
struct rcu_node *rnp = rdp->mynode;
@@ -2091,7 +2092,9 @@ static void nocb_cb_wait(struct rcu_data *rdp)
local_bh_enable();
lockdep_assert_irqs_enabled();
rcu_nocb_lock_irqsave(rdp, flags);
- if (raw_spin_trylock_rcu_node(rnp)) { /* irqs already disabled. */
+ if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
+ rcu_seq_done(&rnp->gp_seq, cur_gp_seq) &&
+ raw_spin_trylock_rcu_node(rnp)) { /* irqs already disabled. */
needwake_gp = rcu_advance_cbs(rdp->mynode, rdp);
raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
}