aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree_plugin.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcu/tree_plugin.h')
-rw-r--r--kernel/rcu/tree_plugin.h249
1 files changed, 164 insertions, 85 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index a32494c4b6f6..ad0156b86937 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -16,8 +16,70 @@
#ifdef CONFIG_RCU_NOCB_CPU
static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */
+static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp)
+{
+ return lockdep_is_held(&rdp->nocb_lock);
+}
+
+static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
+{
+ /* Race on early boot between thread creation and assignment */
+ if (!rdp->nocb_cb_kthread || !rdp->nocb_gp_kthread)
+ return true;
+
+ if (current == rdp->nocb_cb_kthread || current == rdp->nocb_gp_kthread)
+ if (in_task())
+ return true;
+ return false;
+}
+
+static inline bool rcu_running_nocb_timer(struct rcu_data *rdp)
+{
+ return (timer_curr_running(&rdp->nocb_timer) && !in_irq());
+}
+#else
+static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp)
+{
+ return 0;
+}
+
+static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
+{
+ return false;
+}
+
+static inline bool rcu_running_nocb_timer(struct rcu_data *rdp)
+{
+ return false;
+}
+
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
+static bool rcu_rdp_is_offloaded(struct rcu_data *rdp)
+{
+ /*
+ * In order to read the offloaded state of an rdp is a safe
+ * and stable way and prevent from its value to be changed
+ * under us, we must either hold the barrier mutex, the cpu
+ * hotplug lock (read or write) or the nocb lock. Local
+ * non-preemptible reads are also safe. NOCB kthreads and
+ * timers have their own means of synchronization against the
+ * offloaded state updaters.
+ */
+ RCU_LOCKDEP_WARN(
+ !(lockdep_is_held(&rcu_state.barrier_mutex) ||
+ (IS_ENABLED(CONFIG_HOTPLUG_CPU) && lockdep_is_cpus_held()) ||
+ rcu_lockdep_is_held_nocb(rdp) ||
+ (rdp == this_cpu_ptr(&rcu_data) &&
+ !(IS_ENABLED(CONFIG_PREEMPT_COUNT) && preemptible())) ||
+ rcu_current_is_nocb_kthread(rdp) ||
+ rcu_running_nocb_timer(rdp)),
+ "Unsafe read of RCU_NOCB offloaded state"
+ );
+
+ return rcu_segcblist_is_offloaded(&rdp->cblist);
+}
+
/*
* Check the RCU kernel configuration parameters and print informative
* messages about anything out of the ordinary.
@@ -599,9 +661,9 @@ static void rcu_preempt_deferred_qs_handler(struct irq_work *iwp)
static void rcu_read_unlock_special(struct task_struct *t)
{
unsigned long flags;
+ bool irqs_were_disabled;
bool preempt_bh_were_disabled =
!!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK));
- bool irqs_were_disabled;
/* NMI handlers cannot block and cannot safely manipulate state. */
if (in_nmi())
@@ -610,30 +672,33 @@ static void rcu_read_unlock_special(struct task_struct *t)
local_irq_save(flags);
irqs_were_disabled = irqs_disabled_flags(flags);
if (preempt_bh_were_disabled || irqs_were_disabled) {
- bool exp;
+ bool expboost; // Expedited GP in flight or possible boosting.
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
struct rcu_node *rnp = rdp->mynode;
- exp = (t->rcu_blocked_node &&
- READ_ONCE(t->rcu_blocked_node->exp_tasks)) ||
- (rdp->grpmask & READ_ONCE(rnp->expmask));
+ expboost = (t->rcu_blocked_node && READ_ONCE(t->rcu_blocked_node->exp_tasks)) ||
+ (rdp->grpmask & READ_ONCE(rnp->expmask)) ||
+ IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ||
+ (IS_ENABLED(CONFIG_RCU_BOOST) && irqs_were_disabled &&
+ t->rcu_blocked_node);
// Need to defer quiescent state until everything is enabled.
- if (use_softirq && (in_irq() || (exp && !irqs_were_disabled))) {
+ if (use_softirq && (in_irq() || (expboost && !irqs_were_disabled))) {
// Using softirq, safe to awaken, and either the
- // wakeup is free or there is an expedited GP.
+ // wakeup is free or there is either an expedited
+ // GP in flight or a potential need to deboost.
raise_softirq_irqoff(RCU_SOFTIRQ);
} else {
// Enabling BH or preempt does reschedule, so...
- // Also if no expediting, slow is OK.
- // Plus nohz_full CPUs eventually get tick enabled.
+ // Also if no expediting and no possible deboosting,
+ // slow is OK. Plus nohz_full CPUs eventually get
+ // tick enabled.
set_tsk_need_resched(current);
set_preempt_need_resched();
if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled &&
- !rdp->defer_qs_iw_pending && exp && cpu_online(rdp->cpu)) {
+ expboost && !rdp->defer_qs_iw_pending && cpu_online(rdp->cpu)) {
// Get scheduler to re-evaluate and call hooks.
// If !IRQ_WORK, FQS scan will eventually IPI.
- init_irq_work(&rdp->defer_qs_iw,
- rcu_preempt_deferred_qs_handler);
+ init_irq_work(&rdp->defer_qs_iw, rcu_preempt_deferred_qs_handler);
rdp->defer_qs_iw_pending = true;
irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu);
}
@@ -1258,7 +1323,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt)
{
*nextevt = KTIME_MAX;
return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) &&
- !rcu_segcblist_is_offloaded(&this_cpu_ptr(&rcu_data)->cblist);
+ !rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data));
}
/*
@@ -1353,7 +1418,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt)
/* If no non-offloaded callbacks, RCU doesn't need the CPU. */
if (rcu_segcblist_empty(&rdp->cblist) ||
- rcu_segcblist_is_offloaded(&this_cpu_ptr(&rcu_data)->cblist)) {
+ rcu_rdp_is_offloaded(rdp)) {
*nextevt = KTIME_MAX;
return 0;
}
@@ -1389,7 +1454,7 @@ static void rcu_prepare_for_idle(void)
int tne;
lockdep_assert_irqs_disabled();
- if (rcu_segcblist_is_offloaded(&rdp->cblist))
+ if (rcu_rdp_is_offloaded(rdp))
return;
/* Handle nohz enablement switches conservatively. */
@@ -1430,7 +1495,7 @@ static void rcu_cleanup_after_idle(void)
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
lockdep_assert_irqs_disabled();
- if (rcu_segcblist_is_offloaded(&rdp->cblist))
+ if (rcu_rdp_is_offloaded(rdp))
return;
if (rcu_try_advance_all_cbs())
invoke_rcu_core();
@@ -1465,14 +1530,12 @@ static void rcu_cleanup_after_idle(void)
/*
* Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters.
- * The string after the "rcu_nocbs=" is either "all" for all CPUs, or a
- * comma-separated list of CPUs and/or CPU ranges. If an invalid list is
- * given, a warning is emitted and all CPUs are offloaded.
+ * If the list is invalid, a warning is emitted and all CPUs are offloaded.
*/
static int __init rcu_nocb_setup(char *str)
{
alloc_bootmem_cpumask_var(&rcu_nocb_mask);
- if (!strcasecmp(str, "all"))
+ if (!strcasecmp(str, "all")) /* legacy: use "0-N" instead */
cpumask_setall(rcu_nocb_mask);
else
if (cpulist_parse(str, rcu_nocb_mask)) {
@@ -1495,7 +1558,7 @@ early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
* After all, the main point of bypassing is to avoid lock contention
* on ->nocb_lock, which only can happen at high call_rcu() rates.
*/
-int nocb_nobypass_lim_per_jiffy = 16 * 1000 / HZ;
+static int nocb_nobypass_lim_per_jiffy = 16 * 1000 / HZ;
module_param(nocb_nobypass_lim_per_jiffy, int, 0);
/*
@@ -1561,7 +1624,7 @@ static void rcu_nocb_bypass_unlock(struct rcu_data *rdp)
static void rcu_nocb_lock(struct rcu_data *rdp)
{
lockdep_assert_irqs_disabled();
- if (!rcu_segcblist_is_offloaded(&rdp->cblist))
+ if (!rcu_rdp_is_offloaded(rdp))
return;
raw_spin_lock(&rdp->nocb_lock);
}
@@ -1572,7 +1635,7 @@ static void rcu_nocb_lock(struct rcu_data *rdp)
*/
static void rcu_nocb_unlock(struct rcu_data *rdp)
{
- if (rcu_segcblist_is_offloaded(&rdp->cblist)) {
+ if (rcu_rdp_is_offloaded(rdp)) {
lockdep_assert_irqs_disabled();
raw_spin_unlock(&rdp->nocb_lock);
}
@@ -1585,7 +1648,7 @@ static void rcu_nocb_unlock(struct rcu_data *rdp)
static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
unsigned long flags)
{
- if (rcu_segcblist_is_offloaded(&rdp->cblist)) {
+ if (rcu_rdp_is_offloaded(rdp)) {
lockdep_assert_irqs_disabled();
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
} else {
@@ -1597,7 +1660,7 @@ static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
{
lockdep_assert_irqs_disabled();
- if (rcu_segcblist_is_offloaded(&rdp->cblist))
+ if (rcu_rdp_is_offloaded(rdp))
lockdep_assert_held(&rdp->nocb_lock);
}
@@ -1642,12 +1705,16 @@ static bool wake_nocb_gp(struct rcu_data *rdp, bool force,
lockdep_assert_held(&rdp->nocb_lock);
if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) {
+ rcu_nocb_unlock_irqrestore(rdp, flags);
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
TPS("AlreadyAwake"));
- rcu_nocb_unlock_irqrestore(rdp, flags);
return false;
}
- del_timer(&rdp->nocb_timer);
+
+ if (READ_ONCE(rdp->nocb_defer_wakeup) > RCU_NOCB_WAKE_NOT) {
+ WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
+ del_timer(&rdp->nocb_timer);
+ }
rcu_nocb_unlock_irqrestore(rdp, flags);
raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
@@ -1691,7 +1758,7 @@ static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
{
struct rcu_cblist rcl;
- WARN_ON_ONCE(!rcu_segcblist_is_offloaded(&rdp->cblist));
+ WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp));
rcu_lockdep_assert_cblist_protected(rdp);
lockdep_assert_held(&rdp->nocb_bypass_lock);
if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) {
@@ -1719,7 +1786,7 @@ static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
unsigned long j)
{
- if (!rcu_segcblist_is_offloaded(&rdp->cblist))
+ if (!rcu_rdp_is_offloaded(rdp))
return true;
rcu_lockdep_assert_cblist_protected(rdp);
rcu_nocb_bypass_lock(rdp);
@@ -1733,7 +1800,7 @@ static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
static void rcu_nocb_try_flush_bypass(struct rcu_data *rdp, unsigned long j)
{
rcu_lockdep_assert_cblist_protected(rdp);
- if (!rcu_segcblist_is_offloaded(&rdp->cblist) ||
+ if (!rcu_rdp_is_offloaded(rdp) ||
!rcu_nocb_bypass_trylock(rdp))
return;
WARN_ON_ONCE(!rcu_nocb_do_flush_bypass(rdp, NULL, j));
@@ -1765,11 +1832,22 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
unsigned long j = jiffies;
long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
- if (!rcu_segcblist_is_offloaded(&rdp->cblist)) {
+ lockdep_assert_irqs_disabled();
+
+ // Pure softirq/rcuc based processing: no bypassing, no
+ // locking.
+ if (!rcu_rdp_is_offloaded(rdp)) {
+ *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
+ return false;
+ }
+
+ // In the process of (de-)offloading: no bypassing, but
+ // locking.
+ if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) {
+ rcu_nocb_lock(rdp);
*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
return false; /* Not offloaded, no bypassing. */
}
- lockdep_assert_irqs_disabled();
// Don't use ->nocb_bypass during early boot.
if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
@@ -1879,9 +1957,9 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
// If we are being polled or there is no kthread, just leave.
t = READ_ONCE(rdp->nocb_gp_kthread);
if (rcu_nocb_poll || !t) {
+ rcu_nocb_unlock_irqrestore(rdp, flags);
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
TPS("WakeNotPoll"));
- rcu_nocb_unlock_irqrestore(rdp, flags);
return;
}
// Need to actually to a wakeup.
@@ -1916,8 +1994,8 @@ static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
TPS("WakeOvfIsDeferred"));
rcu_nocb_unlock_irqrestore(rdp, flags);
} else {
- trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
rcu_nocb_unlock_irqrestore(rdp, flags);
+ trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
}
return;
}
@@ -1955,7 +2033,8 @@ static inline bool nocb_gp_enabled_cb(struct rcu_data *rdp)
return rcu_segcblist_test_flags(&rdp->cblist, flags);
}
-static inline bool nocb_gp_update_state(struct rcu_data *rdp, bool *needwake_state)
+static inline bool nocb_gp_update_state_deoffloading(struct rcu_data *rdp,
+ bool *needwake_state)
{
struct rcu_segcblist *cblist = &rdp->cblist;
@@ -1965,7 +2044,7 @@ static inline bool nocb_gp_update_state(struct rcu_data *rdp, bool *needwake_sta
if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
*needwake_state = true;
}
- return true;
+ return false;
}
/*
@@ -1976,7 +2055,7 @@ static inline bool nocb_gp_update_state(struct rcu_data *rdp, bool *needwake_sta
rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_GP);
if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
*needwake_state = true;
- return false;
+ return true;
}
@@ -2014,7 +2093,7 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
continue;
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
rcu_nocb_lock_irqsave(rdp, flags);
- if (!nocb_gp_update_state(rdp, &needwake_state)) {
+ if (nocb_gp_update_state_deoffloading(rdp, &needwake_state)) {
rcu_nocb_unlock_irqrestore(rdp, flags);
if (needwake_state)
swake_up_one(&rdp->nocb_state_wq);
@@ -2169,11 +2248,18 @@ static void nocb_cb_wait(struct rcu_data *rdp)
unsigned long flags;
bool needwake_state = false;
bool needwake_gp = false;
+ bool can_sleep = true;
struct rcu_node *rnp = rdp->mynode;
local_irq_save(flags);
rcu_momentary_dyntick_idle();
local_irq_restore(flags);
+ /*
+ * Disable BH to provide the expected environment. Also, when
+ * transitioning to/from NOCB mode, a self-requeuing callback might
+ * be invoked from softirq. A short grace period could cause both
+ * instances of this callback would execute concurrently.
+ */
local_bh_disable();
rcu_do_batch(rdp);
local_bh_enable();
@@ -2186,8 +2272,6 @@ static void nocb_cb_wait(struct rcu_data *rdp)
raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
}
- WRITE_ONCE(rdp->nocb_cb_sleep, true);
-
if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB)) {
rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_CB);
@@ -2195,7 +2279,7 @@ static void nocb_cb_wait(struct rcu_data *rdp)
needwake_state = true;
}
if (rcu_segcblist_ready_cbs(cblist))
- WRITE_ONCE(rdp->nocb_cb_sleep, false);
+ can_sleep = false;
} else {
/*
* De-offloading. Clear our flag and notify the de-offload worker.
@@ -2208,6 +2292,8 @@ static void nocb_cb_wait(struct rcu_data *rdp)
needwake_state = true;
}
+ WRITE_ONCE(rdp->nocb_cb_sleep, can_sleep);
+
if (rdp->nocb_cb_sleep)
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep"));
@@ -2266,7 +2352,6 @@ static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp)
return false;
}
ndw = READ_ONCE(rdp->nocb_defer_wakeup);
- WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
ret = wake_nocb_gp(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
@@ -2332,24 +2417,28 @@ static int rdp_offload_toggle(struct rcu_data *rdp,
return 0;
}
-static int __rcu_nocb_rdp_deoffload(struct rcu_data *rdp)
+static long rcu_nocb_rdp_deoffload(void *arg)
{
+ struct rcu_data *rdp = arg;
struct rcu_segcblist *cblist = &rdp->cblist;
unsigned long flags;
int ret;
+ WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
+
pr_info("De-offloading %d\n", rdp->cpu);
rcu_nocb_lock_irqsave(rdp, flags);
/*
- * If there are still pending work offloaded, the offline
- * CPU won't help much handling them.
+ * Flush once and for all now. This suffices because we are
+ * running on the target CPU holding ->nocb_lock (thus having
+ * interrupts disabled), and because rdp_offload_toggle()
+ * invokes rcu_segcblist_offload(), which clears SEGCBLIST_OFFLOADED.
+ * Thus future calls to rcu_segcblist_completely_offloaded() will
+ * return false, which means that future calls to rcu_nocb_try_bypass()
+ * will refuse to put anything into the bypass.
*/
- if (cpu_is_offline(rdp->cpu) && !rcu_segcblist_empty(&rdp->cblist)) {
- rcu_nocb_unlock_irqrestore(rdp, flags);
- return -EBUSY;
- }
-
+ WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
ret = rdp_offload_toggle(rdp, false, flags);
swait_event_exclusive(rdp->nocb_state_wq,
!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB |
@@ -2361,30 +2450,22 @@ static int __rcu_nocb_rdp_deoffload(struct rcu_data *rdp)
del_timer_sync(&rdp->nocb_timer);
/*
- * Flush bypass. While IRQs are disabled and once we set
- * SEGCBLIST_SOFTIRQ_ONLY, no callback is supposed to be
- * enqueued on bypass.
+ * Theoretically we could set SEGCBLIST_SOFTIRQ_ONLY with CB unlocked
+ * and IRQs disabled but let's be paranoid.
*/
rcu_nocb_lock_irqsave(rdp, flags);
- rcu_nocb_flush_bypass(rdp, NULL, jiffies);
rcu_segcblist_set_flags(cblist, SEGCBLIST_SOFTIRQ_ONLY);
/*
* With SEGCBLIST_SOFTIRQ_ONLY, we can't use
- * rcu_nocb_unlock_irqrestore() anymore. Theoretically we
- * could set SEGCBLIST_SOFTIRQ_ONLY with cb unlocked and IRQs
- * disabled now, but let's be paranoid.
+ * rcu_nocb_unlock_irqrestore() anymore.
*/
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
- return ret;
-}
+ /* Sanity check */
+ WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
-static long rcu_nocb_rdp_deoffload(void *arg)
-{
- struct rcu_data *rdp = arg;
- WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
- return __rcu_nocb_rdp_deoffload(rdp);
+ return ret;
}
int rcu_nocb_cpu_deoffload(int cpu)
@@ -2398,13 +2479,15 @@ int rcu_nocb_cpu_deoffload(int cpu)
}
mutex_lock(&rcu_state.barrier_mutex);
cpus_read_lock();
- if (rcu_segcblist_is_offloaded(&rdp->cblist)) {
- if (cpu_online(cpu))
+ if (rcu_rdp_is_offloaded(rdp)) {
+ if (cpu_online(cpu)) {
ret = work_on_cpu(cpu, rcu_nocb_rdp_deoffload, rdp);
- else
- ret = __rcu_nocb_rdp_deoffload(rdp);
- if (!ret)
- cpumask_clear_cpu(cpu, rcu_nocb_mask);
+ if (!ret)
+ cpumask_clear_cpu(cpu, rcu_nocb_mask);
+ } else {
+ pr_info("NOCB: Can't CB-deoffload an offline CPU\n");
+ ret = -EINVAL;
+ }
}
cpus_read_unlock();
mutex_unlock(&rcu_state.barrier_mutex);
@@ -2413,12 +2496,14 @@ int rcu_nocb_cpu_deoffload(int cpu)
}
EXPORT_SYMBOL_GPL(rcu_nocb_cpu_deoffload);
-static int __rcu_nocb_rdp_offload(struct rcu_data *rdp)
+static long rcu_nocb_rdp_offload(void *arg)
{
+ struct rcu_data *rdp = arg;
struct rcu_segcblist *cblist = &rdp->cblist;
unsigned long flags;
int ret;
+ WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
/*
* For now we only support re-offload, ie: the rdp must have been
* offloaded on boot first.
@@ -2458,14 +2543,6 @@ static int __rcu_nocb_rdp_offload(struct rcu_data *rdp)
return ret;
}
-static long rcu_nocb_rdp_offload(void *arg)
-{
- struct rcu_data *rdp = arg;
-
- WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
- return __rcu_nocb_rdp_offload(rdp);
-}
-
int rcu_nocb_cpu_offload(int cpu)
{
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
@@ -2473,13 +2550,15 @@ int rcu_nocb_cpu_offload(int cpu)
mutex_lock(&rcu_state.barrier_mutex);
cpus_read_lock();
- if (!rcu_segcblist_is_offloaded(&rdp->cblist)) {
- if (cpu_online(cpu))
+ if (!rcu_rdp_is_offloaded(rdp)) {
+ if (cpu_online(cpu)) {
ret = work_on_cpu(cpu, rcu_nocb_rdp_offload, rdp);
- else
- ret = __rcu_nocb_rdp_offload(rdp);
- if (!ret)
- cpumask_set_cpu(cpu, rcu_nocb_mask);
+ if (!ret)
+ cpumask_set_cpu(cpu, rcu_nocb_mask);
+ } else {
+ pr_info("NOCB: Can't CB-offload an offline CPU\n");
+ ret = -EINVAL;
+ }
}
cpus_read_unlock();
mutex_unlock(&rcu_state.barrier_mutex);