aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/perf/core-book3s.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/perf/core-book3s.c')
-rw-r--r--arch/powerpc/perf/core-book3s.c93
1 files changed, 68 insertions, 25 deletions
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 4520c9356b54..b7cd00b0171e 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -36,7 +36,12 @@ struct cpu_hw_events {
struct perf_event *event[MAX_HWEVENTS];
u64 events[MAX_HWEVENTS];
unsigned int flags[MAX_HWEVENTS];
- unsigned long mmcr[3];
+ /*
+ * The order of the MMCR array is:
+ * - 64-bit, MMCR0, MMCR1, MMCRA, MMCR2
+ * - 32-bit, MMCR0, MMCR1, MMCR2
+ */
+ unsigned long mmcr[4];
struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS];
u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
@@ -112,9 +117,9 @@ static bool is_ebb_event(struct perf_event *event) { return false; }
static int ebb_event_check(struct perf_event *event) { return 0; }
static void ebb_event_add(struct perf_event *event) { }
static void ebb_switch_out(unsigned long mmcr0) { }
-static unsigned long ebb_switch_in(bool ebb, unsigned long mmcr0)
+static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
{
- return mmcr0;
+ return cpuhw->mmcr[0];
}
static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
@@ -485,7 +490,7 @@ static bool is_ebb_event(struct perf_event *event)
* check that the PMU supports EBB, meaning those that don't can still
* use bit 63 of the event code for something else if they wish.
*/
- return (ppmu->flags & PPMU_EBB) &&
+ return (ppmu->flags & PPMU_ARCH_207S) &&
((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1);
}
@@ -542,8 +547,10 @@ static void ebb_switch_out(unsigned long mmcr0)
current->thread.mmcr2 = mfspr(SPRN_MMCR2) & MMCR2_USER_MASK;
}
-static unsigned long ebb_switch_in(bool ebb, unsigned long mmcr0)
+static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
{
+ unsigned long mmcr0 = cpuhw->mmcr[0];
+
if (!ebb)
goto out;
@@ -568,7 +575,15 @@ static unsigned long ebb_switch_in(bool ebb, unsigned long mmcr0)
mtspr(SPRN_SIAR, current->thread.siar);
mtspr(SPRN_SIER, current->thread.sier);
mtspr(SPRN_SDAR, current->thread.sdar);
- mtspr(SPRN_MMCR2, current->thread.mmcr2);
+
+ /*
+ * Merge the kernel & user values of MMCR2. The semantics we implement
+ * are that the user MMCR2 can set bits, ie. cause counters to freeze,
+ * but not clear bits. If a task wants to be able to clear bits, ie.
+ * unfreeze counters, it should not set exclude_xxx in its events and
+ * instead manage the MMCR2 entirely by itself.
+ */
+ mtspr(SPRN_MMCR2, cpuhw->mmcr[3] | current->thread.mmcr2);
out:
return mmcr0;
}
@@ -777,7 +792,7 @@ void perf_event_print_debug(void)
if (ppmu->flags & PPMU_HAS_SIER)
sier = mfspr(SPRN_SIER);
- if (ppmu->flags & PPMU_EBB) {
+ if (ppmu->flags & PPMU_ARCH_207S) {
pr_info("MMCR2: %016lx EBBHR: %016lx\n",
mfspr(SPRN_MMCR2), mfspr(SPRN_EBBHR));
pr_info("EBBRR: %016lx BESCR: %016lx\n",
@@ -915,6 +930,14 @@ static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
int i, n, first;
struct perf_event *event;
+ /*
+ * If the PMU we're on supports per event exclude settings then we
+ * don't need to do any of this logic. NB. This assumes no PMU has both
+ * per event exclude and limited PMCs.
+ */
+ if (ppmu->flags & PPMU_ARCH_207S)
+ return 0;
+
n = n_prev + n_new;
if (n <= 1)
return 0;
@@ -996,7 +1019,22 @@ static void power_pmu_read(struct perf_event *event)
} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
local64_add(delta, &event->count);
- local64_sub(delta, &event->hw.period_left);
+
+ /*
+ * A number of places program the PMC with (0x80000000 - period_left).
+ * We never want period_left to be less than 1 because we will program
+ * the PMC with a value >= 0x800000000 and an edge detected PMC will
+ * roll around to 0 before taking an exception. We have seen this
+ * on POWER8.
+ *
+ * To fix this, clamp the minimum value of period_left to 1.
+ */
+ do {
+ prev = local64_read(&event->hw.period_left);
+ val = prev - delta;
+ if (val < 1)
+ val = 1;
+ } while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev);
}
/*
@@ -1204,28 +1242,31 @@ static void power_pmu_enable(struct pmu *pmu)
}
/*
- * Compute MMCR* values for the new set of events
+ * Clear all MMCR settings and recompute them for the new set of events.
*/
+ memset(cpuhw->mmcr, 0, sizeof(cpuhw->mmcr));
+
if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index,
- cpuhw->mmcr)) {
+ cpuhw->mmcr, cpuhw->event)) {
/* shouldn't ever get here */
printk(KERN_ERR "oops compute_mmcr failed\n");
goto out;
}
- /*
- * Add in MMCR0 freeze bits corresponding to the
- * attr.exclude_* bits for the first event.
- * We have already checked that all events have the
- * same values for these bits as the first event.
- */
- event = cpuhw->event[0];
- if (event->attr.exclude_user)
- cpuhw->mmcr[0] |= MMCR0_FCP;
- if (event->attr.exclude_kernel)
- cpuhw->mmcr[0] |= freeze_events_kernel;
- if (event->attr.exclude_hv)
- cpuhw->mmcr[0] |= MMCR0_FCHV;
+ if (!(ppmu->flags & PPMU_ARCH_207S)) {
+ /*
+ * Add in MMCR0 freeze bits corresponding to the attr.exclude_*
+ * bits for the first event. We have already checked that all
+ * events have the same value for these bits as the first event.
+ */
+ event = cpuhw->event[0];
+ if (event->attr.exclude_user)
+ cpuhw->mmcr[0] |= MMCR0_FCP;
+ if (event->attr.exclude_kernel)
+ cpuhw->mmcr[0] |= freeze_events_kernel;
+ if (event->attr.exclude_hv)
+ cpuhw->mmcr[0] |= MMCR0_FCHV;
+ }
/*
* Write the new configuration to MMCR* with the freeze
@@ -1237,6 +1278,8 @@ static void power_pmu_enable(struct pmu *pmu)
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
| MMCR0_FC);
+ if (ppmu->flags & PPMU_ARCH_207S)
+ mtspr(SPRN_MMCR2, cpuhw->mmcr[3]);
/*
* Read off any pre-existing events that need to move
@@ -1292,7 +1335,7 @@ static void power_pmu_enable(struct pmu *pmu)
out_enable:
pmao_restore_workaround(ebb);
- mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]);
+ mmcr0 = ebb_switch_in(ebb, cpuhw);
mb();
if (cpuhw->bhrb_users)
@@ -1696,7 +1739,7 @@ static int power_pmu_event_init(struct perf_event *event)
if (has_branch_stack(event)) {
/* PMU has BHRB enabled */
- if (!(ppmu->flags & PPMU_BHRB))
+ if (!(ppmu->flags & PPMU_ARCH_207S))
return -EOPNOTSUPP;
}