aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@kernel.org>2020-05-26 12:34:57 -0700
committerPaul E. McKenney <paulmck@kernel.org>2020-06-29 12:00:45 -0700
commit2db0bda38453f472640f4ece1e2a495cbd44f892 (patch)
treec7a3b1ee3080df7c0d64b520b343653c4acaf250 /kernel/rcu
parentrefperf: More closely synchronize reader start times (diff)
downloadwireguard-linux-2db0bda38453f472640f4ece1e2a495cbd44f892.tar.xz
wireguard-linux-2db0bda38453f472640f4ece1e2a495cbd44f892.zip
refperf: Add warmup and cooldown processing phases
This commit causes all the readers to start running unmeasured load until all readers have done at least one such run (thus having warmed up), then run the measured load, and then run unmeasured load until all readers have completed their measured load. This approach avoids any thread running measured load while other readers are idle. Cc: Joel Fernandes (Google) <joel@joelfernandes.org> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/refperf.c20
1 files changed, 18 insertions, 2 deletions
diff --git a/kernel/rcu/refperf.c b/kernel/rcu/refperf.c
index 234bb0e84a8b..445190b97b05 100644
--- a/kernel/rcu/refperf.c
+++ b/kernel/rcu/refperf.c
@@ -100,6 +100,8 @@ static atomic_t nreaders_exp;
// Use to wait for all threads to start.
static atomic_t n_init;
static atomic_t n_started;
+static atomic_t n_warmedup;
+static atomic_t n_cooleddown;
// Track which experiment is currently running.
static int exp_idx;
@@ -260,8 +262,15 @@ repeat:
VERBOSE_PERFOUT("ref_perf_reader %ld: experiment %d started", me, exp_idx);
- // To prevent noise, keep interrupts disabled. This also has the
- // effect of preventing entries into slow path for rcu_read_unlock().
+
+ // To reduce noise, do an initial cache-warming invocation, check
+ // in, and then keep warming until everyone has checked in.
+ cur_ops->readsection(loops);
+ if (!atomic_dec_return(&n_warmedup))
+ while (atomic_read_acquire(&n_warmedup))
+ cur_ops->readsection(loops);
+ // Also keep interrupts disabled. This also has the effect
+ // of preventing entries into slow path for rcu_read_unlock().
local_irq_save(flags);
start = ktime_get_mono_fast_ns();
@@ -271,6 +280,11 @@ repeat:
local_irq_restore(flags);
rt->last_duration_ns = WARN_ON_ONCE(duration < 0) ? 0 : duration;
+ // To reduce runtime-skew noise, do maintain-load invocations until
+ // everyone is done.
+ if (!atomic_dec_return(&n_cooleddown))
+ while (atomic_read_acquire(&n_cooleddown))
+ cur_ops->readsection(loops);
if (atomic_dec_and_test(&nreaders_exp))
wake_up(&main_wq);
@@ -372,6 +386,8 @@ static int main_func(void *arg)
reset_readers();
atomic_set(&nreaders_exp, nreaders);
atomic_set(&n_started, nreaders);
+ atomic_set(&n_warmedup, nreaders);
+ atomic_set(&n_cooleddown, nreaders);
exp_idx = exp;