aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events/core.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2013-11-27 13:54:38 +0000
committerIngo Molnar <mingo@kernel.org>2013-12-17 15:21:33 +0100
commitbad7192b842c83e580747ca57104dd51fe08c223 (patch)
tree5a73fe2fc627384fa6ab621cc59433dc922f1155 /kernel/events/core.c
parentperf/x86: enable Haswell Celeron RAPL support (diff)
downloadlinux-dev-bad7192b842c83e580747ca57104dd51fe08c223.tar.xz
linux-dev-bad7192b842c83e580747ca57104dd51fe08c223.zip
perf: Fix PERF_EVENT_IOC_PERIOD to force-reset the period
Vince Weaver reports that, on all architectures apart from ARM, PERF_EVENT_IOC_PERIOD doesn't actually update the period until the next event fires. This is counter-intuitive behaviour and is better dealt with in the core code. This patch ensures that the period is forcefully reset when dealing with such a request in the core code. A subsequent patch removes the equivalent hack from the ARM back-end. Reported-by: Vince Weaver <vincent.weaver@maine.edu> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Will Deacon <will.deacon@arm.com> Link: http://lkml.kernel.org/r/1385560479-11014-1-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to '')
-rw-r--r--kernel/events/core.c16
1 files changed, 15 insertions, 1 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 403b781daafb..89d34f9bb8cb 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3527,7 +3527,7 @@ static void perf_event_for_each(struct perf_event *event,
static int perf_event_period(struct perf_event *event, u64 __user *arg)
{
struct perf_event_context *ctx = event->ctx;
- int ret = 0;
+ int ret = 0, active;
u64 value;
if (!is_sampling_event(event))
@@ -3551,6 +3551,20 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
event->attr.sample_period = value;
event->hw.sample_period = value;
}
+
+ active = (event->state == PERF_EVENT_STATE_ACTIVE);
+ if (active) {
+ perf_pmu_disable(ctx->pmu);
+ event->pmu->stop(event, PERF_EF_UPDATE);
+ }
+
+ local64_set(&event->hw.period_left, 0);
+
+ if (active) {
+ event->pmu->start(event, PERF_EF_RELOAD);
+ perf_pmu_enable(ctx->pmu);
+ }
+
unlock:
raw_spin_unlock_irq(&ctx->lock);