aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-05-21 14:43:08 +0200
committerIngo Molnar <mingo@elte.hu>2010-06-09 11:12:37 +0200
commite78505958cf123048fb48cb56b79cebb8edd15fb (patch)
tree3688d124cdc906cbe9f6587c8671ba0a14c95262 /arch/powerpc
parentperf: Add perf_event::child_count (diff)
downloadlinux-dev-e78505958cf123048fb48cb56b79cebb8edd15fb.tar.xz
linux-dev-e78505958cf123048fb48cb56b79cebb8edd15fb.zip
perf: Convert perf_event to local_t
Since now all modification to event->count (and ->prev_count and ->period_left) are local to a cpu, change then to local64_t so we avoid the LOCK'ed ops. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/perf_event.c34
1 files changed, 17 insertions, 17 deletions
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index ac2a8c2554d9..af1d9a7c65d1 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -410,15 +410,15 @@ static void power_pmu_read(struct perf_event *event)
* Therefore we treat them like NMIs.
*/
do {
- prev = atomic64_read(&event->hw.prev_count);
+ prev = local64_read(&event->hw.prev_count);
barrier();
val = read_pmc(event->hw.idx);
- } while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
+ } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
/* The counters are only 32 bits wide */
delta = (val - prev) & 0xfffffffful;
- atomic64_add(delta, &event->count);
- atomic64_sub(delta, &event->hw.period_left);
+ local64_add(delta, &event->count);
+ local64_sub(delta, &event->hw.period_left);
}
/*
@@ -444,10 +444,10 @@ static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
if (!event->hw.idx)
continue;
val = (event->hw.idx == 5) ? pmc5 : pmc6;
- prev = atomic64_read(&event->hw.prev_count);
+ prev = local64_read(&event->hw.prev_count);
event->hw.idx = 0;
delta = (val - prev) & 0xfffffffful;
- atomic64_add(delta, &event->count);
+ local64_add(delta, &event->count);
}
}
@@ -462,7 +462,7 @@ static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
event = cpuhw->limited_counter[i];
event->hw.idx = cpuhw->limited_hwidx[i];
val = (event->hw.idx == 5) ? pmc5 : pmc6;
- atomic64_set(&event->hw.prev_count, val);
+ local64_set(&event->hw.prev_count, val);
perf_event_update_userpage(event);
}
}
@@ -666,11 +666,11 @@ void hw_perf_enable(void)
}
val = 0;
if (event->hw.sample_period) {
- left = atomic64_read(&event->hw.period_left);
+ left = local64_read(&event->hw.period_left);
if (left < 0x80000000L)
val = 0x80000000L - left;
}
- atomic64_set(&event->hw.prev_count, val);
+ local64_set(&event->hw.prev_count, val);
event->hw.idx = idx;
write_pmc(idx, val);
perf_event_update_userpage(event);
@@ -842,8 +842,8 @@ static void power_pmu_unthrottle(struct perf_event *event)
if (left < 0x80000000L)
val = 0x80000000L - left;
write_pmc(event->hw.idx, val);
- atomic64_set(&event->hw.prev_count, val);
- atomic64_set(&event->hw.period_left, left);
+ local64_set(&event->hw.prev_count, val);
+ local64_set(&event->hw.period_left, left);
perf_event_update_userpage(event);
perf_enable();
local_irq_restore(flags);
@@ -1109,7 +1109,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
event->hw.config = events[n];
event->hw.event_base = cflags[n];
event->hw.last_period = event->hw.sample_period;
- atomic64_set(&event->hw.period_left, event->hw.last_period);
+ local64_set(&event->hw.period_left, event->hw.last_period);
/*
* See if we need to reserve the PMU.
@@ -1147,16 +1147,16 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
int record = 0;
/* we don't have to worry about interrupts here */
- prev = atomic64_read(&event->hw.prev_count);
+ prev = local64_read(&event->hw.prev_count);
delta = (val - prev) & 0xfffffffful;
- atomic64_add(delta, &event->count);
+ local64_add(delta, &event->count);
/*
* See if the total period for this event has expired,
* and update for the next period.
*/
val = 0;
- left = atomic64_read(&event->hw.period_left) - delta;
+ left = local64_read(&event->hw.period_left) - delta;
if (period) {
if (left <= 0) {
left += period;
@@ -1194,8 +1194,8 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
}
write_pmc(event->hw.idx, val);
- atomic64_set(&event->hw.prev_count, val);
- atomic64_set(&event->hw.period_left, left);
+ local64_set(&event->hw.prev_count, val);
+ local64_set(&event->hw.period_left, left);
perf_event_update_userpage(event);
}