aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@kernel.org>2020-09-09 21:36:34 -0700
committerPaul E. McKenney <paulmck@kernel.org>2020-09-16 16:32:36 -0700
commit2393a613d2e3da35bd73ee55d9dca0fb04810955 (patch)
tree6eafc01db60199ab348dca5766fd23f8ba07aafd /kernel/rcu
parentrcu-tasks: Mark variables static (diff)
downloadlinux-dev-2393a613d2e3da35bd73ee55d9dca0fb04810955.tar.xz
linux-dev-2393a613d2e3da35bd73ee55d9dca0fb04810955.zip
rcu-tasks: Use more aggressive polling for RCU Tasks Trace
The RCU Tasks Trace grace periods are too slow, as in 40x slower than those of RCU Tasks. This is due to my having assumed a one-second grace period was OK, and thus not having optimized any further. This commit provides the first step in this optimization process, namely by allowing the task_list scan backoff interval to be specified on a per-flavor basis, and then speeding up the scans for RCU Tasks Trace. However, kernels built with CONFIG_TASKS_TRACE_RCU_READ_MB=y continue to use the old slower backoff, consistent with that Kconfig option's goal of reducing IPIs. Link: https://lore.kernel.org/bpf/CAADnVQK_AiX+S_L_A4CQWT11XyveppBbQSQgH_qWGyzu_E8Yeg@mail.gmail.com/ Reported-by: Alexei Starovoitov <alexei.starovoitov@gmail.com> Cc: Daniel Borkmann <daniel@iogearbox.net> Cc: Jiri Olsa <jolsa@redhat.com> Cc: <bpf@vger.kernel.org> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tasks.h16
1 files changed, 14 insertions, 2 deletions
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index 978508ec39c1..ad8c4f3f44d2 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -28,6 +28,7 @@ typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
* @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
* @gp_func: This flavor's grace-period-wait function.
* @gp_state: Grace period's most recent state transition (debugging).
+ * @init_fract: Initial backoff sleep interval.
* @gp_jiffies: Time of last @gp_state transition.
* @gp_start: Most recent grace-period start in jiffies.
* @n_gps: Number of grace periods completed since boot.
@@ -48,6 +49,7 @@ struct rcu_tasks {
struct wait_queue_head cbs_wq;
raw_spinlock_t cbs_lock;
int gp_state;
+ int init_fract;
unsigned long gp_jiffies;
unsigned long gp_start;
unsigned long n_gps;
@@ -329,8 +331,10 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
*/
lastreport = jiffies;
- /* Start off with HZ/10 wait and slowly back off to 1 HZ wait. */
- fract = 10;
+ // Start off with initial wait and slowly back off to 1 HZ wait.
+ fract = rtp->init_fract;
+ if (fract > HZ)
+ fract = HZ;
for (;;) {
bool firstreport;
@@ -553,6 +557,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
static int __init rcu_spawn_tasks_kthread(void)
{
+ rcu_tasks.init_fract = 10;
rcu_tasks.pregp_func = rcu_tasks_pregp_step;
rcu_tasks.pertask_func = rcu_tasks_pertask;
rcu_tasks.postscan_func = rcu_tasks_postscan;
@@ -1163,6 +1168,13 @@ EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
static int __init rcu_spawn_tasks_trace_kthread(void)
{
+ if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
+ rcu_tasks_trace.init_fract = 10;
+ } else {
+ rcu_tasks_trace.init_fract = HZ / 5;
+ if (rcu_tasks_trace.init_fract <= 0)
+ rcu_tasks_trace.init_fract = 1;
+ }
rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
rcu_tasks_trace.pertask_func = rcu_tasks_trace_pertask;
rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;