aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/perf/arm_pmuv3.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--drivers/perf/arm_pmuv3.c (renamed from arch/arm64/kernel/perf_event.c)158
1 files changed, 55 insertions, 103 deletions
diff --git a/arch/arm64/kernel/perf_event.c b/drivers/perf/arm_pmuv3.c
index dde06c0f97f3..c98e4039386d 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/drivers/perf/arm_pmuv3.c
@@ -10,20 +10,21 @@
#include <asm/irq_regs.h>
#include <asm/perf_event.h>
-#include <asm/sysreg.h>
#include <asm/virt.h>
#include <clocksource/arm_arch_timer.h>
#include <linux/acpi.h>
#include <linux/clocksource.h>
-#include <linux/kvm_host.h>
#include <linux/of.h>
#include <linux/perf/arm_pmu.h>
+#include <linux/perf/arm_pmuv3.h>
#include <linux/platform_device.h>
#include <linux/sched_clock.h>
#include <linux/smp.h>
+#include <asm/arm_pmuv3.h>
+
/* ARMv8 Cortex-A53 specific event types. */
#define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
@@ -45,7 +46,6 @@ static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INST_RETIRED,
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
[PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
[PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND,
@@ -387,10 +387,13 @@ static const struct attribute_group armv8_pmuv3_caps_attr_group = {
* We unconditionally enable ARMv8.5-PMU long event counter support
* (64-bit events) where supported. Indicate if this arm_pmu has long
* event counter support.
+ *
+ * On AArch32, long counters make no sense (you can't access the top
+ * bits), so we only enable this on AArch64.
*/
static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu)
{
- return (cpu_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5);
+ return (IS_ENABLED(CONFIG_ARM64) && is_pmuv3p5(cpu_pmu->pmuver));
}
static inline bool armv8pmu_event_has_user_read(struct perf_event *event)
@@ -424,83 +427,16 @@ static inline bool armv8pmu_event_is_chained(struct perf_event *event)
#define ARMV8_IDX_TO_COUNTER(x) \
(((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
-/*
- * This code is really good
- */
-
-#define PMEVN_CASE(n, case_macro) \
- case n: case_macro(n); break
-
-#define PMEVN_SWITCH(x, case_macro) \
- do { \
- switch (x) { \
- PMEVN_CASE(0, case_macro); \
- PMEVN_CASE(1, case_macro); \
- PMEVN_CASE(2, case_macro); \
- PMEVN_CASE(3, case_macro); \
- PMEVN_CASE(4, case_macro); \
- PMEVN_CASE(5, case_macro); \
- PMEVN_CASE(6, case_macro); \
- PMEVN_CASE(7, case_macro); \
- PMEVN_CASE(8, case_macro); \
- PMEVN_CASE(9, case_macro); \
- PMEVN_CASE(10, case_macro); \
- PMEVN_CASE(11, case_macro); \
- PMEVN_CASE(12, case_macro); \
- PMEVN_CASE(13, case_macro); \
- PMEVN_CASE(14, case_macro); \
- PMEVN_CASE(15, case_macro); \
- PMEVN_CASE(16, case_macro); \
- PMEVN_CASE(17, case_macro); \
- PMEVN_CASE(18, case_macro); \
- PMEVN_CASE(19, case_macro); \
- PMEVN_CASE(20, case_macro); \
- PMEVN_CASE(21, case_macro); \
- PMEVN_CASE(22, case_macro); \
- PMEVN_CASE(23, case_macro); \
- PMEVN_CASE(24, case_macro); \
- PMEVN_CASE(25, case_macro); \
- PMEVN_CASE(26, case_macro); \
- PMEVN_CASE(27, case_macro); \
- PMEVN_CASE(28, case_macro); \
- PMEVN_CASE(29, case_macro); \
- PMEVN_CASE(30, case_macro); \
- default: WARN(1, "Invalid PMEV* index\n"); \
- } \
- } while (0)
-
-#define RETURN_READ_PMEVCNTRN(n) \
- return read_sysreg(pmevcntr##n##_el0)
-static unsigned long read_pmevcntrn(int n)
-{
- PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
- return 0;
-}
-
-#define WRITE_PMEVCNTRN(n) \
- write_sysreg(val, pmevcntr##n##_el0)
-static void write_pmevcntrn(int n, unsigned long val)
-{
- PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
-}
-
-#define WRITE_PMEVTYPERN(n) \
- write_sysreg(val, pmevtyper##n##_el0)
-static void write_pmevtypern(int n, unsigned long val)
-{
- PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
-}
-
static inline u32 armv8pmu_pmcr_read(void)
{
- return read_sysreg(pmcr_el0);
+ return read_pmcr();
}
static inline void armv8pmu_pmcr_write(u32 val)
{
val &= ARMV8_PMU_PMCR_MASK;
isb();
- write_sysreg(val, pmcr_el0);
+ write_pmcr(val);
}
static inline int armv8pmu_has_overflowed(u32 pmovsr)
@@ -555,7 +491,7 @@ static bool armv8pmu_event_needs_bias(struct perf_event *event)
static u64 armv8pmu_bias_long_counter(struct perf_event *event, u64 value)
{
if (armv8pmu_event_needs_bias(event))
- value |= GENMASK(63, 32);
+ value |= GENMASK_ULL(63, 32);
return value;
}
@@ -563,7 +499,7 @@ static u64 armv8pmu_bias_long_counter(struct perf_event *event, u64 value)
static u64 armv8pmu_unbias_long_counter(struct perf_event *event, u64 value)
{
if (armv8pmu_event_needs_bias(event))
- value &= ~GENMASK(63, 32);
+ value &= ~GENMASK_ULL(63, 32);
return value;
}
@@ -575,7 +511,7 @@ static u64 armv8pmu_read_counter(struct perf_event *event)
u64 value;
if (idx == ARMV8_IDX_CYCLE_COUNTER)
- value = read_sysreg(pmccntr_el0);
+ value = read_pmccntr();
else
value = armv8pmu_read_hw_counter(event);
@@ -610,7 +546,7 @@ static void armv8pmu_write_counter(struct perf_event *event, u64 value)
value = armv8pmu_bias_long_counter(event, value);
if (idx == ARMV8_IDX_CYCLE_COUNTER)
- write_sysreg(value, pmccntr_el0);
+ write_pmccntr(value);
else
armv8pmu_write_hw_counter(event, value);
}
@@ -641,7 +577,7 @@ static inline void armv8pmu_write_event_type(struct perf_event *event)
armv8pmu_write_evtype(idx, chain_evt);
} else {
if (idx == ARMV8_IDX_CYCLE_COUNTER)
- write_sysreg(hwc->config_base, pmccfiltr_el0);
+ write_pmccfiltr(hwc->config_base);
else
armv8pmu_write_evtype(idx, hwc->config_base);
}
@@ -664,7 +600,7 @@ static inline void armv8pmu_enable_counter(u32 mask)
* enable the counter.
* */
isb();
- write_sysreg(mask, pmcntenset_el0);
+ write_pmcntenset(mask);
}
static inline void armv8pmu_enable_event_counter(struct perf_event *event)
@@ -681,7 +617,7 @@ static inline void armv8pmu_enable_event_counter(struct perf_event *event)
static inline void armv8pmu_disable_counter(u32 mask)
{
- write_sysreg(mask, pmcntenclr_el0);
+ write_pmcntenclr(mask);
/*
* Make sure the effects of disabling the counter are visible before we
* start configuring the event.
@@ -703,7 +639,7 @@ static inline void armv8pmu_disable_event_counter(struct perf_event *event)
static inline void armv8pmu_enable_intens(u32 mask)
{
- write_sysreg(mask, pmintenset_el1);
+ write_pmintenset(mask);
}
static inline void armv8pmu_enable_event_irq(struct perf_event *event)
@@ -714,10 +650,10 @@ static inline void armv8pmu_enable_event_irq(struct perf_event *event)
static inline void armv8pmu_disable_intens(u32 mask)
{
- write_sysreg(mask, pmintenclr_el1);
+ write_pmintenclr(mask);
isb();
/* Clear the overflow flag in case an interrupt is pending. */
- write_sysreg(mask, pmovsclr_el0);
+ write_pmovsclr(mask);
isb();
}
@@ -732,18 +668,18 @@ static inline u32 armv8pmu_getreset_flags(void)
u32 value;
/* Read */
- value = read_sysreg(pmovsclr_el0);
+ value = read_pmovsclr();
/* Write to clear flags */
value &= ARMV8_PMU_OVSR_MASK;
- write_sysreg(value, pmovsclr_el0);
+ write_pmovsclr(value);
return value;
}
static void armv8pmu_disable_user_access(void)
{
- write_sysreg(0, pmuserenr_el0);
+ write_pmuserenr(0);
}
static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu)
@@ -754,13 +690,13 @@ static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu)
/* Clear any unused counters to avoid leaking their contents */
for_each_clear_bit(i, cpuc->used_mask, cpu_pmu->num_events) {
if (i == ARMV8_IDX_CYCLE_COUNTER)
- write_sysreg(0, pmccntr_el0);
+ write_pmccntr(0);
else
armv8pmu_write_evcntr(i, 0);
}
- write_sysreg(0, pmuserenr_el0);
- write_sysreg(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR, pmuserenr_el0);
+ write_pmuserenr(0);
+ write_pmuserenr(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR);
}
static void armv8pmu_enable_event(struct perf_event *event)
@@ -1048,6 +984,28 @@ static void armv8pmu_reset(void *info)
armv8pmu_pmcr_write(pmcr);
}
+static int __armv8_pmuv3_map_event_id(struct arm_pmu *armpmu,
+ struct perf_event *event)
+{
+ if (event->attr.type == PERF_TYPE_HARDWARE &&
+ event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) {
+
+ if (test_bit(ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
+ armpmu->pmceid_bitmap))
+ return ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED;
+
+ if (test_bit(ARMV8_PMUV3_PERFCTR_BR_RETIRED,
+ armpmu->pmceid_bitmap))
+ return ARMV8_PMUV3_PERFCTR_BR_RETIRED;
+
+ return HW_OP_UNSUPPORTED;
+ }
+
+ return armpmu_map_event(event, &armv8_pmuv3_perf_map,
+ &armv8_pmuv3_perf_cache_map,
+ ARMV8_PMU_EVTYPE_EVENT);
+}
+
static int __armv8_pmuv3_map_event(struct perf_event *event,
const unsigned (*extra_event_map)
[PERF_COUNT_HW_MAX],
@@ -1059,9 +1017,7 @@ static int __armv8_pmuv3_map_event(struct perf_event *event,
int hw_event_id;
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
- hw_event_id = armpmu_map_event(event, &armv8_pmuv3_perf_map,
- &armv8_pmuv3_perf_cache_map,
- ARMV8_PMU_EVTYPE_EVENT);
+ hw_event_id = __armv8_pmuv3_map_event_id(armpmu, event);
/*
* CHAIN events only work when paired with an adjacent counter, and it
@@ -1144,16 +1100,12 @@ static void __armv8pmu_probe_pmu(void *info)
{
struct armv8pmu_probe_info *probe = info;
struct arm_pmu *cpu_pmu = probe->pmu;
- u64 dfr0;
u64 pmceid_raw[2];
u32 pmceid[2];
int pmuver;
- dfr0 = read_sysreg(id_aa64dfr0_el1);
- pmuver = cpuid_feature_extract_unsigned_field(dfr0,
- ID_AA64DFR0_EL1_PMUVer_SHIFT);
- if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF ||
- pmuver == ID_AA64DFR0_EL1_PMUVer_NI)
+ pmuver = read_pmuver();
+ if (!pmuv3_implemented(pmuver))
return;
cpu_pmu->pmuver = pmuver;
@@ -1166,8 +1118,8 @@ static void __armv8pmu_probe_pmu(void *info)
/* Add the CPU cycles counter */
cpu_pmu->num_events += 1;
- pmceid[0] = pmceid_raw[0] = read_sysreg(pmceid0_el0);
- pmceid[1] = pmceid_raw[1] = read_sysreg(pmceid1_el0);
+ pmceid[0] = pmceid_raw[0] = read_pmceid0();
+ pmceid[1] = pmceid_raw[1] = read_pmceid1();
bitmap_from_arr32(cpu_pmu->pmceid_bitmap,
pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
@@ -1178,9 +1130,9 @@ static void __armv8pmu_probe_pmu(void *info)
bitmap_from_arr32(cpu_pmu->pmceid_ext_bitmap,
pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
- /* store PMMIR_EL1 register for sysfs */
- if (pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4 && (pmceid_raw[1] & BIT(31)))
- cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1);
+ /* store PMMIR register for sysfs */
+ if (is_pmuv3p4(pmuver) && (pmceid_raw[1] & BIT(31)))
+ cpu_pmu->reg_pmmir = read_pmmir();
else
cpu_pmu->reg_pmmir = 0;
}