aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-09-17 18:47:11 +0200
committerIngo Molnar <mingo@elte.hu>2009-09-17 22:08:25 +0200
commit850bc73ffcc99cddfb52bc23217c60810c508853 (patch)
tree787a863ccb2aaa1de48a6690b33026beadecce20 /kernel/perf_counter.c
parentperf sched: Add 'perf sched map' scheduling event map printout (diff)
downloadlinux-dev-850bc73ffcc99cddfb52bc23217c60810c508853.tar.xz
linux-dev-850bc73ffcc99cddfb52bc23217c60810c508853.zip
perf_counter: Do not throttle single swcounter events
We can have swcounter events that contribute more than a single count per event, when used with a non-zero period, those can generate multiple events, which is when we need throttling. However, swcounter that contribute only a single count per event can only come as fast as we can run code, hence don't throttle them. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c17
1 files changed, 13 insertions, 4 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 667ab25ad3d5..fe0d1adde804 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -3494,14 +3494,15 @@ static void perf_log_throttle(struct perf_counter *counter, int enable)
* Generic counter overflow handling, sampling.
*/
-int perf_counter_overflow(struct perf_counter *counter, int nmi,
- struct perf_sample_data *data)
+static int __perf_counter_overflow(struct perf_counter *counter, int nmi,
+ int throttle, struct perf_sample_data *data)
{
int events = atomic_read(&counter->event_limit);
- int throttle = counter->pmu->unthrottle != NULL;
struct hw_perf_counter *hwc = &counter->hw;
int ret = 0;
+ throttle = (throttle && counter->pmu->unthrottle != NULL);
+
if (!throttle) {
hwc->interrupts++;
} else {
@@ -3554,6 +3555,12 @@ int perf_counter_overflow(struct perf_counter *counter, int nmi,
return ret;
}
+int perf_counter_overflow(struct perf_counter *counter, int nmi,
+ struct perf_sample_data *data)
+{
+ return __perf_counter_overflow(counter, nmi, 1, data);
+}
+
/*
* Generic software counter infrastructure
*/
@@ -3592,6 +3599,7 @@ static void perf_swcounter_overflow(struct perf_counter *counter,
int nmi, struct perf_sample_data *data)
{
struct hw_perf_counter *hwc = &counter->hw;
+ int throttle = 0;
u64 overflow;
data->period = counter->hw.last_period;
@@ -3601,13 +3609,14 @@ static void perf_swcounter_overflow(struct perf_counter *counter,
return;
for (; overflow; overflow--) {
- if (perf_counter_overflow(counter, nmi, data)) {
+ if (__perf_counter_overflow(counter, nmi, throttle, data)) {
/*
* We inhibit the overflow from happening when
* hwc->interrupts == MAX_INTERRUPTS.
*/
break;
}
+ throttle = 0;
}
}