aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events/core.c
diff options
context:
space:
mode:
authorleilei.lin <leilei.lin@alibaba-inc.com>2017-08-09 08:29:21 +0800
committerIngo Molnar <mingo@kernel.org>2017-08-10 12:08:40 +0200
commitfdccc3fb7a42ea4e4cd77d2fb8fa3a45c66ec0bf (patch)
tree0bc643ab390edab5ded23a9fd381f058425a8438 /kernel/events/core.c
parentperf/x86/amd/uncore: Get correct number of cores sharing last level cache (diff)
downloadlinux-dev-fdccc3fb7a42ea4e4cd77d2fb8fa3a45c66ec0bf.tar.xz
linux-dev-fdccc3fb7a42ea4e4cd77d2fb8fa3a45c66ec0bf.zip
perf/core: Reduce context switch overhead
Skip most of the PMU context switching overhead when ctx->nr_events is 0. 50% performance overhead was observed under an extreme testcase. Signed-off-by: leilei.lin <leilei.lin@alibaba-inc.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: acme@kernel.org Cc: alexander.shishkin@linux.intel.com Cc: eranian@gmail.com Cc: jolsa@redhat.com Cc: linxiulei@gmail.com Cc: yang_oliver@hotmail.com Link: http://lkml.kernel.org/r/20170809002921.69813-1-leilei.lin@alibaba-inc.com [ Rewrote the changelog. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/events/core.c')
-rw-r--r--kernel/events/core.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index ee20d4c546b5..d704e23914bf 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3211,6 +3211,13 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
return;
perf_ctx_lock(cpuctx, ctx);
+ /*
+ * We must check ctx->nr_events while holding ctx->lock, such
+ * that we serialize against perf_install_in_context().
+ */
+ if (!ctx->nr_events)
+ goto unlock;
+
perf_pmu_disable(ctx->pmu);
/*
* We want to keep the following priority order:
@@ -3224,6 +3231,8 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
perf_event_sched_in(cpuctx, ctx, task);
perf_pmu_enable(ctx->pmu);
+
+unlock:
perf_ctx_unlock(cpuctx, ctx);
}