aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-08-03 21:00:38 -0700
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2018-08-30 16:03:50 -0700
commit2dba13f0b6c2b26ff371b8927ac58d20a7d94713 (patch)
treefa3fcb58680ea2b5afc82f91b4d148234ea78114 /kernel/rcu/tree_plugin.h
parentrcu: Switch lazy counts to rcu_data structure (diff)
downloadlinux-dev-2dba13f0b6c2b26ff371b8927ac58d20a7d94713.tar.xz
linux-dev-2dba13f0b6c2b26ff371b8927ac58d20a7d94713.zip
rcu: Switch urgent quiescent-state requests to rcu_data structure
This commit removes ->rcu_need_heavy_qs and ->rcu_urgent_qs from the rcu_dynticks structure and updates the code to access them from the rcu_data structure. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree_plugin.h')
-rw-r--r--kernel/rcu/tree_plugin.h14
1 files changed, 7 insertions, 7 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index b5aeb2fe4cfe..161760957a07 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -967,17 +967,17 @@ void rcu_all_qs(void)
{
unsigned long flags;
- if (!raw_cpu_read(rcu_dynticks.rcu_urgent_qs))
+ if (!raw_cpu_read(rcu_data.rcu_urgent_qs))
return;
preempt_disable();
/* Load rcu_urgent_qs before other flags. */
- if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) {
+ if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
preempt_enable();
return;
}
- this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
+ this_cpu_write(rcu_data.rcu_urgent_qs, false);
barrier(); /* Avoid RCU read-side critical sections leaking down. */
- if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) {
+ if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) {
local_irq_save(flags);
rcu_momentary_dyntick_idle();
local_irq_restore(flags);
@@ -997,10 +997,10 @@ void rcu_note_context_switch(bool preempt)
trace_rcu_utilization(TPS("Start context switch"));
rcu_qs();
/* Load rcu_urgent_qs before other flags. */
- if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs)))
+ if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs)))
goto out;
- this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
- if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs)))
+ this_cpu_write(rcu_data.rcu_urgent_qs, false);
+ if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs)))
rcu_momentary_dyntick_idle();
if (!preempt)
rcu_tasks_qs(current);