aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrederic Weisbecker <frederic@kernel.org>2021-02-23 01:10:00 +0100
committerPaul E. McKenney <paulmck@kernel.org>2021-03-15 13:54:54 -0700
commit76d00b494d7962e88d4bbd4135f34aba9019c67f (patch)
tree82c01fb3a120706461a1f752d96b3baf7174ae13
parentrcu/nocb: Fix missed nocb_timer requeue (diff)
downloadlinux-dev-76d00b494d7962e88d4bbd4135f34aba9019c67f.tar.xz
linux-dev-76d00b494d7962e88d4bbd4135f34aba9019c67f.zip
rcu/nocb: Disable bypass when CPU isn't completely offloaded
Currently, the bypass is flushed at the very last moment in the deoffloading procedure. However, this approach leads to a larger state space than would be preferred. This commit therefore disables the bypass at soon as the deoffloading procedure begins, then flushes it. This guarantees that the bypass remains empty and thus out of the way of the deoffloading procedure. Symmetrically, this commit waits to enable the bypass until the offloading procedure has completed. Reported-by: Paul E. McKenney <paulmck@kernel.org> Cc: Josh Triplett <josh@joshtriplett.org> Cc: Lai Jiangshan <jiangshanlai@gmail.com> Cc: Joel Fernandes <joel@joelfernandes.org> Cc: Neeraj Upadhyay <neeraju@codeaurora.org> Cc: Boqun Feng <boqun.feng@gmail.com> Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
-rw-r--r--include/linux/rcu_segcblist.h7
-rw-r--r--kernel/rcu/tree_plugin.h38
2 files changed, 33 insertions, 12 deletions
diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h
index 8afe886e85f1..3db96c4f45fd 100644
--- a/include/linux/rcu_segcblist.h
+++ b/include/linux/rcu_segcblist.h
@@ -109,7 +109,7 @@ struct rcu_cblist {
* | SEGCBLIST_KTHREAD_GP |
* | |
* | Kthreads handle callbacks holding nocb_lock, local rcu_core() stops |
- * | handling callbacks. |
+ * | handling callbacks. Enable bypass queueing. |
* ----------------------------------------------------------------------------
*/
@@ -125,7 +125,7 @@ struct rcu_cblist {
* | SEGCBLIST_KTHREAD_GP |
* | |
* | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() |
- * | ignores callbacks. |
+ * | ignores callbacks. Bypass enqueue is enabled. |
* ----------------------------------------------------------------------------
* |
* v
@@ -134,7 +134,8 @@ struct rcu_cblist {
* | SEGCBLIST_KTHREAD_GP |
* | |
* | CB/GP kthreads and local rcu_core() handle callbacks concurrently |
- * | holding nocb_lock. Wake up CB and GP kthreads if necessary. |
+ * | holding nocb_lock. Wake up CB and GP kthreads if necessary. Disable |
+ * | bypass enqueue. |
* ----------------------------------------------------------------------------
* |
* v
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index e392bd129316..b08564b2bcf7 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -1830,11 +1830,22 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
unsigned long j = jiffies;
long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
+ lockdep_assert_irqs_disabled();
+
+ // Pure softirq/rcuc based processing: no bypassing, no
+ // locking.
if (!rcu_rdp_is_offloaded(rdp)) {
*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
+ return false;
+ }
+
+ // In the process of (de-)offloading: no bypassing, but
+ // locking.
+ if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) {
+ rcu_nocb_lock(rdp);
+ *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
return false; /* Not offloaded, no bypassing. */
}
- lockdep_assert_irqs_disabled();
// Don't use ->nocb_bypass during early boot.
if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
@@ -2416,7 +2427,16 @@ static long rcu_nocb_rdp_deoffload(void *arg)
pr_info("De-offloading %d\n", rdp->cpu);
rcu_nocb_lock_irqsave(rdp, flags);
-
+ /*
+ * Flush once and for all now. This suffices because we are
+ * running on the target CPU holding ->nocb_lock (thus having
+ * interrupts disabled), and because rdp_offload_toggle()
+ * invokes rcu_segcblist_offload(), which clears SEGCBLIST_OFFLOADED.
+ * Thus future calls to rcu_segcblist_completely_offloaded() will
+ * return false, which means that future calls to rcu_nocb_try_bypass()
+ * will refuse to put anything into the bypass.
+ */
+ WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
ret = rdp_offload_toggle(rdp, false, flags);
swait_event_exclusive(rdp->nocb_state_wq,
!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB |
@@ -2428,21 +2448,21 @@ static long rcu_nocb_rdp_deoffload(void *arg)
del_timer_sync(&rdp->nocb_timer);
/*
- * Flush bypass. While IRQs are disabled and once we set
- * SEGCBLIST_SOFTIRQ_ONLY, no callback is supposed to be
- * enqueued on bypass.
+ * Theoretically we could set SEGCBLIST_SOFTIRQ_ONLY with CB unlocked
+ * and IRQs disabled but let's be paranoid.
*/
rcu_nocb_lock_irqsave(rdp, flags);
- rcu_nocb_flush_bypass(rdp, NULL, jiffies);
rcu_segcblist_set_flags(cblist, SEGCBLIST_SOFTIRQ_ONLY);
/*
* With SEGCBLIST_SOFTIRQ_ONLY, we can't use
- * rcu_nocb_unlock_irqrestore() anymore. Theoretically we
- * could set SEGCBLIST_SOFTIRQ_ONLY with cb unlocked and IRQs
- * disabled now, but let's be paranoid.
+ * rcu_nocb_unlock_irqrestore() anymore.
*/
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
+ /* Sanity check */
+ WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
+
+
return ret;
}