aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2022-09-10 17:33:51 +0100
committerCatalin Marinas <catalin.marinas@arm.com>2022-09-16 12:38:57 +0100
commit121a8fc088f13c64d9f3c9b3e7faa4c246e0a32c (patch)
tree2214fa90c0cec2a37029832ba0e223f8f090abae
parentarm64/sysreg: Add _EL1 into ID_AA64DFR0_EL1 definition names (diff)
downloadlinux-dev-121a8fc088f13c64d9f3c9b3e7faa4c246e0a32c.tar.xz
linux-dev-121a8fc088f13c64d9f3c9b3e7faa4c246e0a32c.zip
arm64/sysreg: Use feature numbering for PMU and SPE revisions
Currently the kernel refers to the versions of the PMU and SPE features by the version of the architecture where those features were updated but the ARM refers to them using the FEAT_ names for the features. To improve consistency and help with updating for newer features and since v9 will make our current naming scheme a bit more confusing update the macros identfying features to use the FEAT_ based scheme. Signed-off-by: Mark Brown <broonie@kernel.org> Link: https://lore.kernel.org/r/20220910163354.860255-4-broonie@kernel.org Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r--arch/arm64/include/asm/sysreg.h14
-rw-r--r--arch/arm64/kernel/perf_event.c4
-rw-r--r--arch/arm64/kvm/pmu-emul.c12
-rw-r--r--arch/arm64/kvm/sys_regs.c2
4 files changed, 16 insertions, 16 deletions
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index a9544561397d..aea3ec657c3f 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -711,15 +711,15 @@
#define ID_AA64DFR0_EL1_TraceVer_SHIFT 4
#define ID_AA64DFR0_EL1_DebugVer_SHIFT 0
-#define ID_AA64DFR0_EL1_PMUVer_8_0 0x1
-#define ID_AA64DFR0_EL1_PMUVer_8_1 0x4
-#define ID_AA64DFR0_EL1_PMUVer_8_4 0x5
-#define ID_AA64DFR0_EL1_PMUVer_8_5 0x6
-#define ID_AA64DFR0_EL1_PMUVer_8_7 0x7
+#define ID_AA64DFR0_EL1_PMUVer_IMP 0x1
+#define ID_AA64DFR0_EL1_PMUVer_V3P1 0x4
+#define ID_AA64DFR0_EL1_PMUVer_V3P4 0x5
+#define ID_AA64DFR0_EL1_PMUVer_V3P5 0x6
+#define ID_AA64DFR0_EL1_PMUVer_V3P7 0x7
#define ID_AA64DFR0_EL1_PMUVer_IMP_DEF 0xf
-#define ID_AA64DFR0_EL1_PMSVer_8_2 0x1
-#define ID_AA64DFR0_EL1_PMSVer_8_3 0x2
+#define ID_AA64DFR0_EL1_PMSVer_IMP 0x1
+#define ID_AA64DFR0_EL1_PMSVer_V1P1 0x2
#define ID_DFR0_PERFMON_SHIFT 24
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 8b878837d8f1..7b0643fe2f13 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -390,7 +390,7 @@ static const struct attribute_group armv8_pmuv3_caps_attr_group = {
*/
static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu)
{
- return (cpu_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_8_5);
+ return (cpu_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5);
}
static inline bool armv8pmu_event_has_user_read(struct perf_event *event)
@@ -1172,7 +1172,7 @@ static void __armv8pmu_probe_pmu(void *info)
pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
/* store PMMIR_EL1 register for sysfs */
- if (pmuver >= ID_AA64DFR0_EL1_PMUVer_8_4 && (pmceid_raw[1] & BIT(31)))
+ if (pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4 && (pmceid_raw[1] & BIT(31)))
cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1);
else
cpu_pmu->reg_pmmir = 0;
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 7122b5387de6..0003c7d37533 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -33,12 +33,12 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm)
pmuver = kvm->arch.arm_pmu->pmuver;
switch (pmuver) {
- case ID_AA64DFR0_EL1_PMUVer_8_0:
+ case ID_AA64DFR0_EL1_PMUVer_IMP:
return GENMASK(9, 0);
- case ID_AA64DFR0_EL1_PMUVer_8_1:
- case ID_AA64DFR0_EL1_PMUVer_8_4:
- case ID_AA64DFR0_EL1_PMUVer_8_5:
- case ID_AA64DFR0_EL1_PMUVer_8_7:
+ case ID_AA64DFR0_EL1_PMUVer_V3P1:
+ case ID_AA64DFR0_EL1_PMUVer_V3P4:
+ case ID_AA64DFR0_EL1_PMUVer_V3P5:
+ case ID_AA64DFR0_EL1_PMUVer_V3P7:
return GENMASK(15, 0);
default: /* Shouldn't be here, just for sanity */
WARN_ONCE(1, "Unknown PMU version %d\n", pmuver);
@@ -856,7 +856,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
* Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
* as RAZ
*/
- if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_8_4)
+ if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4)
val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32);
base = 32;
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index d4546a5b0ea5..2ef1121ab844 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1115,7 +1115,7 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
/* Limit guests to PMUv3 for ARMv8.4 */
val = cpuid_feature_cap_perfmon_field(val,
ID_AA64DFR0_EL1_PMUVer_SHIFT,
- kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_EL1_PMUVer_8_4 : 0);
+ kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_EL1_PMUVer_V3P4 : 0);
/* Hide SPE from guests */
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer);
break;