From 53eaeb7fbe2702520125ae7d72742362c071a1f2 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 22 Jan 2024 18:13:35 +0000 Subject: arm64: Add macro to compose a sysreg field value A common idiom is to compose a tupple (reg, field, val) into a symbol matching an autogenerated definition. Add a help performing the concatenation and replace it when open-coded implementations exist. Suggested-by: Oliver Upton Signed-off-by: Marc Zyngier Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20240122181344.258974-2-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/include/asm/sysreg.h | 5 ++++- arch/arm64/kvm/sys_regs.c | 3 ++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index c3b19b376c86..9e8999592f3a 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -1181,6 +1181,8 @@ par; \ }) +#define SYS_FIELD_VALUE(reg, field, val) reg##_##field##_##val + #define SYS_FIELD_GET(reg, field, val) \ FIELD_GET(reg##_##field##_MASK, val) @@ -1188,7 +1190,8 @@ FIELD_PREP(reg##_##field##_MASK, val) #define SYS_FIELD_PREP_ENUM(reg, field, val) \ - FIELD_PREP(reg##_##field##_MASK, reg##_##field##_##val) + FIELD_PREP(reg##_##field##_MASK, \ + SYS_FIELD_VALUE(reg, field, val)) #endif diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 30253bd19917..88b8fbeafaa0 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1685,7 +1685,8 @@ static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, u64 __f_val = FIELD_GET(reg##_##field##_MASK, val); \ (val) &= ~reg##_##field##_MASK; \ (val) |= FIELD_PREP(reg##_##field##_MASK, \ - min(__f_val, (u64)reg##_##field##_##limit)); \ + min(__f_val, \ + (u64)SYS_FIELD_VALUE(reg, field, limit))); \ (val); \ }) -- cgit v1.2.3-59-g8ed1b From d9a065914dccce0bbd967cdef1ec21b4de545daf Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 22 Jan 2024 18:13:36 +0000 Subject: arm64: cpufeatures: Correctly handle signed values Although we've had signed values for some features such as PMUv3 and FP, the code that handles the comparaison with some limit has a couple of annoying issues: - the min_field_value is always unsigned, meaning that we cannot easily compare it with a negative value - it is not possible to have a range of values, let alone a range of negative values Fix this by: - adding an upper limit to the comparison, defaulting to all bits being set to the maximum positive value - ensuring that the signess of the min and max values are taken into account A ARM64_CPUID_FIELDS_NEG() macro is provided for signed features, but nothing is using it yet. Signed-off-by: Marc Zyngier Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20240122181344.258974-3-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/include/asm/cpufeature.h | 1 + arch/arm64/kernel/cpufeature.c | 65 ++++++++++++++++++++++++++++++++----- 2 files changed, 57 insertions(+), 9 deletions(-) diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 21c824edf8ce..a98d95f3492b 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -363,6 +363,7 @@ struct arm64_cpu_capabilities { u8 field_pos; u8 field_width; u8 min_field_value; + u8 max_field_value; u8 hwcap_type; bool sign; unsigned long hwcap; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 8d1a634a403e..92b1546f2622 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -140,12 +140,42 @@ void dump_cpu_features(void) pr_emerg("0x%*pb\n", ARM64_NCAPS, &system_cpucaps); } +#define __ARM64_MAX_POSITIVE(reg, field) \ + ((reg##_##field##_SIGNED ? \ + BIT(reg##_##field##_WIDTH - 1) : \ + BIT(reg##_##field##_WIDTH)) - 1) + +#define __ARM64_MIN_NEGATIVE(reg, field) BIT(reg##_##field##_WIDTH - 1) + +#define __ARM64_CPUID_FIELDS(reg, field, min_value, max_value) \ + .sys_reg = SYS_##reg, \ + .field_pos = reg##_##field##_SHIFT, \ + .field_width = reg##_##field##_WIDTH, \ + .sign = reg##_##field##_SIGNED, \ + .min_field_value = min_value, \ + .max_field_value = max_value, + +/* + * ARM64_CPUID_FIELDS() encodes a field with a range from min_value to + * an implicit maximum that depends on the sign-ess of the field. + * + * An unsigned field will be capped at all ones, while a signed field + * will be limited to the positive half only. + */ #define ARM64_CPUID_FIELDS(reg, field, min_value) \ - .sys_reg = SYS_##reg, \ - .field_pos = reg##_##field##_SHIFT, \ - .field_width = reg##_##field##_WIDTH, \ - .sign = reg##_##field##_SIGNED, \ - .min_field_value = reg##_##field##_##min_value, + __ARM64_CPUID_FIELDS(reg, field, \ + SYS_FIELD_VALUE(reg, field, min_value), \ + __ARM64_MAX_POSITIVE(reg, field)) + +/* + * ARM64_CPUID_FIELDS_NEG() encodes a field with a range from an + * implicit minimal value to max_value. This should be used when + * matching a non-implemented property. + */ +#define ARM64_CPUID_FIELDS_NEG(reg, field, max_value) \ + __ARM64_CPUID_FIELDS(reg, field, \ + __ARM64_MIN_NEGATIVE(reg, field), \ + SYS_FIELD_VALUE(reg, field, max_value)) #define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ { \ @@ -1451,11 +1481,28 @@ has_always(const struct arm64_cpu_capabilities *entry, int scope) static bool feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry) { - int val = cpuid_feature_extract_field_width(reg, entry->field_pos, - entry->field_width, - entry->sign); + int val, min, max; + u64 tmp; + + val = cpuid_feature_extract_field_width(reg, entry->field_pos, + entry->field_width, + entry->sign); + + tmp = entry->min_field_value; + tmp <<= entry->field_pos; + + min = cpuid_feature_extract_field_width(tmp, entry->field_pos, + entry->field_width, + entry->sign); + + tmp = entry->max_field_value; + tmp <<= entry->field_pos; + + max = cpuid_feature_extract_field_width(tmp, entry->field_pos, + entry->field_width, + entry->sign); - return val >= entry->min_field_value; + return val >= min && val <= max; } static u64 -- cgit v1.2.3-59-g8ed1b From d42bf63fd4db20e1a7a414c2ebe257a81c8c7d6e Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 22 Jan 2024 18:13:37 +0000 Subject: arm64: cpufeature: Correctly display signed override values When a field gets overriden, the kernel indicates the result of the override in dmesg. This works well with unsigned fields, but results in a pretty ugly output when the field is signed. Truncate the field to its width before displaying it. Reviewed-by: Oliver Upton Reviewed-by: Suzuki K Poulose Signed-off-by: Marc Zyngier Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20240122181344.258974-4-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kernel/cpufeature.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 92b1546f2622..bae5d0655262 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -949,7 +949,8 @@ static void init_cpu_ftr_reg(u32 sys_reg, u64 new) pr_warn("%s[%d:%d]: %s to %llx\n", reg->name, ftrp->shift + ftrp->width - 1, - ftrp->shift, str, tmp); + ftrp->shift, str, + tmp & (BIT(ftrp->width) - 1)); } else if ((ftr_mask & reg->override->val) == ftr_mask) { reg->override->val &= ~ftr_mask; pr_warn("%s[%d:%d]: impossible override, ignored\n", -- cgit v1.2.3-59-g8ed1b From cfc680bb04c54e61faa51a34d8383a0aa25b583f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 22 Jan 2024 18:13:38 +0000 Subject: arm64: sysreg: Add layout for ID_AA64MMFR4_EL1 ARMv9.5 has infroduced ID_AA64MMFR4_EL1 with a bunch of new features. Add the corresponding layout. This is extracted from the public ARM SysReg_xml_A_profile-2023-09 delivery, timestamped d55f5af8e09052abe92a02adf820deea2eaed717. Reviewed-by: Suzuki K Poulose Signed-off-by: Marc Zyngier Reviewed-by: Catalin Marinas Reviewed-by: Miguel Luis Link: https://lore.kernel.org/r/20240122181344.258974-5-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/tools/sysreg | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 4c9b67934367..fa3fe0856880 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -1791,6 +1791,43 @@ UnsignedEnum 3:0 TCRX EndEnum EndSysreg +Sysreg ID_AA64MMFR4_EL1 3 0 0 7 4 +Res0 63:40 +UnsignedEnum 39:36 E3DSE + 0b0000 NI + 0b0001 IMP +EndEnum +Res0 35:28 +SignedEnum 27:24 E2H0 + 0b0000 IMP + 0b1110 NI_NV1 + 0b1111 NI +EndEnum +UnsignedEnum 23:20 NV_frac + 0b0000 NV_NV2 + 0b0001 NV2_ONLY +EndEnum +UnsignedEnum 19:16 FGWTE3 + 0b0000 NI + 0b0001 IMP +EndEnum +UnsignedEnum 15:12 HACDBS + 0b0000 NI + 0b0001 IMP +EndEnum +UnsignedEnum 11:8 ASID2 + 0b0000 NI + 0b0001 IMP +EndEnum +SignedEnum 7:4 EIESB + 0b0000 NI + 0b0001 ToEL3 + 0b0010 ToELx + 0b1111 ANY +EndEnum +Res0 3:0 +EndSysreg + Sysreg SCTLR_EL1 3 0 1 0 0 Field 63 TIDCP Field 62 SPINTMASK -- cgit v1.2.3-59-g8ed1b From 805bb61f827997c59b92669ae8cc3740ebcf1087 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 22 Jan 2024 18:13:39 +0000 Subject: arm64: cpufeature: Add ID_AA64MMFR4_EL1 handling Add ID_AA64MMFR4_EL1 to the list of idregs the kernel knows about, and describe the E2H0 field. Reviewed-by: Oliver Upton Reviewed-by: Suzuki K Poulose Signed-off-by: Marc Zyngier Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20240122181344.258974-6-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/include/asm/cpu.h | 1 + arch/arm64/kernel/cpufeature.c | 7 +++++++ arch/arm64/kernel/cpuinfo.c | 1 + 3 files changed, 9 insertions(+) diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h index b1e43f56ee46..6c13fd47e170 100644 --- a/arch/arm64/include/asm/cpu.h +++ b/arch/arm64/include/asm/cpu.h @@ -56,6 +56,7 @@ struct cpuinfo_arm64 { u64 reg_id_aa64mmfr1; u64 reg_id_aa64mmfr2; u64 reg_id_aa64mmfr3; + u64 reg_id_aa64mmfr4; u64 reg_id_aa64pfr0; u64 reg_id_aa64pfr1; u64 reg_id_aa64zfr0; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index bae5d0655262..ad3753fbdcb1 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -437,6 +437,11 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr3[] = { ARM64_FTR_END, }; +static const struct arm64_ftr_bits ftr_id_aa64mmfr4[] = { + S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR4_EL1_E2H0_SHIFT, 4, 0), + ARM64_FTR_END, +}; + static const struct arm64_ftr_bits ftr_ctr[] = { ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_DIC_SHIFT, 1, 1), @@ -754,6 +759,7 @@ static const struct __ftr_reg_entry { &id_aa64mmfr1_override), ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2), ARM64_FTR_REG(SYS_ID_AA64MMFR3_EL1, ftr_id_aa64mmfr3), + ARM64_FTR_REG(SYS_ID_AA64MMFR4_EL1, ftr_id_aa64mmfr4), /* Op1 = 1, CRn = 0, CRm = 0 */ ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid), @@ -1078,6 +1084,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info) init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1); init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2); init_cpu_ftr_reg(SYS_ID_AA64MMFR3_EL1, info->reg_id_aa64mmfr3); + init_cpu_ftr_reg(SYS_ID_AA64MMFR4_EL1, info->reg_id_aa64mmfr4); init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0); init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1); init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0); diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 47043c0d95ec..7ca3fbd200f0 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -447,6 +447,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1); info->reg_id_aa64mmfr3 = read_cpuid(ID_AA64MMFR3_EL1); + info->reg_id_aa64mmfr4 = read_cpuid(ID_AA64MMFR4_EL1); info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1); info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1); info->reg_id_aa64zfr0 = read_cpuid(ID_AA64ZFR0_EL1); -- cgit v1.2.3-59-g8ed1b From da9af5071b25cb81682c3506c96516fe55186577 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 22 Jan 2024 18:13:40 +0000 Subject: arm64: cpufeature: Detect HCR_EL2.NV1 being RES0 A variant of FEAT_E2H0 not being implemented exists in the form of HCR_EL2.E2H being RES1 *and* HCR_EL2.NV1 being RES0 (indicating that only VHE is supported on the host and nested guests). Add the necessary infrastructure for this new CPU capability. Reviewed-by: Suzuki K Poulose Signed-off-by: Marc Zyngier Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20240122181344.258974-7-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kernel/cpufeature.c | 12 ++++++++++++ arch/arm64/tools/cpucaps | 1 + 2 files changed, 13 insertions(+) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index ad3753fbdcb1..91249d20883b 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1794,6 +1794,11 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, return !meltdown_safe; } +static bool has_nv1(const struct arm64_cpu_capabilities *entry, int scope) +{ + return !has_cpuid_feature(entry, scope); +} + #if defined(ID_AA64MMFR0_EL1_TGRAN_LPA2) && defined(ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2) static bool has_lpa2_at_stage1(u64 mmfr0) { @@ -2794,6 +2799,13 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_lpa2, }, + { + .desc = "NV1", + .capability = ARM64_HAS_HCR_NV1, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_nv1, + ARM64_CPUID_FIELDS_NEG(ID_AA64MMFR4_EL1, E2H0, NI_NV1) + }, {}, }; diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index b912b1409fc0..65090dd34641 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -35,6 +35,7 @@ HAS_GENERIC_AUTH_IMP_DEF HAS_GIC_CPUIF_SYSREGS HAS_GIC_PRIO_MASKING HAS_GIC_PRIO_RELAXED_SYNC +HAS_HCR_NV1 HAS_HCX HAS_LDAPR HAS_LPA2 -- cgit v1.2.3-59-g8ed1b From 3944382fa6f22b54fd399632b1af92c28123979b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 22 Jan 2024 18:13:41 +0000 Subject: arm64: Treat HCR_EL2.E2H as RES1 when ID_AA64MMFR4_EL1.E2H0 is negative For CPUs that have ID_AA64MMFR4_EL1.E2H0 as negative, it is important to avoid the boot path that sets HCR_EL2.E2H=0. Fortunately, we already have this path to cope with fruity CPUs. Tweak init_el2 to look at ID_AA64MMFR4_EL1.E2H0 first. Reviewed-by: Suzuki K Poulose Signed-off-by: Marc Zyngier Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20240122181344.258974-8-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kernel/head.S | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index cab7f91949d8..5bdafbcff009 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -584,25 +584,32 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL) mov_q x1, INIT_SCTLR_EL1_MMU_OFF /* - * Fruity CPUs seem to have HCR_EL2.E2H set to RES1, - * making it impossible to start in nVHE mode. Is that - * compliant with the architecture? Absolutely not! + * Compliant CPUs advertise their VHE-onlyness with + * ID_AA64MMFR4_EL1.E2H0 < 0. HCR_EL2.E2H can be + * RES1 in that case. + * + * Fruity CPUs seem to have HCR_EL2.E2H set to RES1, but + * don't advertise it (they predate this relaxation). */ + mrs_s x0, SYS_ID_AA64MMFR4_EL1 + ubfx x0, x0, #ID_AA64MMFR4_EL1_E2H0_SHIFT, #ID_AA64MMFR4_EL1_E2H0_WIDTH + tbnz x0, #(ID_AA64MMFR4_EL1_E2H0_SHIFT + ID_AA64MMFR4_EL1_E2H0_WIDTH - 1), 1f + mrs x0, hcr_el2 and x0, x0, #HCR_E2H - cbz x0, 1f - + cbz x0, 2f +1: /* Set a sane SCTLR_EL1, the VHE way */ pre_disable_mmu_workaround msr_s SYS_SCTLR_EL12, x1 mov x2, #BOOT_CPU_FLAG_E2H - b 2f + b 3f -1: +2: pre_disable_mmu_workaround msr sctlr_el1, x1 mov x2, xzr -2: +3: __init_el2_nvhe_prepare_eret mov w0, #BOOT_CPU_MODE_EL2 -- cgit v1.2.3-59-g8ed1b From c21df6e43f0ed51903738ee34cdb2ec2f978024d Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 22 Jan 2024 18:13:42 +0000 Subject: KVM: arm64: Expose ID_AA64MMFR4_EL1 to guests We can now expose ID_AA64MMFR4_EL1 to guests, and let NV guests understand that they cannot really switch HCR_EL2.E2H to 0 on some platforms. Reviewed-by: Suzuki K Poulose Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20240122181344.258974-9-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/nested.c | 7 +++++++ arch/arm64/kvm/sys_regs.c | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c index ba95d044bc98..d55e809e26cb 100644 --- a/arch/arm64/kvm/nested.c +++ b/arch/arm64/kvm/nested.c @@ -133,6 +133,13 @@ static u64 limit_nv_id_reg(u32 id, u64 val) val |= FIELD_PREP(NV_FTR(MMFR2, TTL), 0b0001); break; + case SYS_ID_AA64MMFR4_EL1: + val = 0; + if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1)) + val |= FIELD_PREP(NV_FTR(MMFR4, E2H0), + ID_AA64MMFR4_EL1_E2H0_NI_NV1); + break; + case SYS_ID_AA64DFR0_EL1: /* Only limited support for PMU, Debug, BPs and WPs */ val &= (NV_FTR(DFR0, PMUVer) | diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 88b8fbeafaa0..08a9571fa809 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -2350,7 +2350,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { ID_AA64MMFR2_EL1_NV | ID_AA64MMFR2_EL1_CCIDX)), ID_SANITISED(ID_AA64MMFR3_EL1), - ID_UNALLOCATED(7,4), + ID_SANITISED(ID_AA64MMFR4_EL1), ID_UNALLOCATED(7,5), ID_UNALLOCATED(7,6), ID_UNALLOCATED(7,7), -- cgit v1.2.3-59-g8ed1b From 94f29ab2d8018c035098b58d89fc4ae36894a706 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 22 Jan 2024 18:13:43 +0000 Subject: KVM: arm64: Force guest's HCR_EL2.E2H RES1 when NV1 is not implemented If NV1 isn't supported on a system, make sure we always evaluate the guest's HCR_EL2.E2H as RES1, irrespective of what the guest may have written there. Reviewed-by: Suzuki K Poulose Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20240122181344.258974-10-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/include/asm/kvm_emulate.h | 3 ++- arch/arm64/kvm/sys_regs.c | 12 +++++++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index b804fe832184..debc3753d2ef 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -209,7 +209,8 @@ static inline bool vcpu_is_el2(const struct kvm_vcpu *vcpu) static inline bool __vcpu_el2_e2h_is_set(const struct kvm_cpu_context *ctxt) { - return ctxt_sys_reg(ctxt, HCR_EL2) & HCR_E2H; + return (!cpus_have_final_cap(ARM64_HAS_HCR_NV1) || + (ctxt_sys_reg(ctxt, HCR_EL2) & HCR_E2H)); } static inline bool vcpu_el2_e2h_is_set(const struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 08a9571fa809..041b11825578 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -2175,6 +2175,16 @@ static bool access_spsr(struct kvm_vcpu *vcpu, return true; } +static u64 reset_hcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) +{ + u64 val = r->val; + + if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1)) + val |= HCR_E2H; + + return __vcpu_sys_reg(vcpu, r->reg) = val; +} + /* * Architected system registers. * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 @@ -2666,7 +2676,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { EL2_REG_VNCR(VMPIDR_EL2, reset_unknown, 0), EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1), EL2_REG(ACTLR_EL2, access_rw, reset_val, 0), - EL2_REG_VNCR(HCR_EL2, reset_val, 0), + EL2_REG_VNCR(HCR_EL2, reset_hcr, 0), EL2_REG(MDCR_EL2, access_rw, reset_val, 0), EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1), EL2_REG_VNCR(HSTR_EL2, reset_val, 0), -- cgit v1.2.3-59-g8ed1b From aade38faca631f25ec2842da4094d74bb2790bd5 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 22 Jan 2024 18:13:44 +0000 Subject: KVM: arm64: Handle Apple M2 as not having HCR_EL2.NV1 implemented Although the Apple M2 family of CPUs can have HCR_EL2.NV1 being set and clear, with the change in trap behaviour being OK, they explode spectacularily on an EL2 S1 page table using the nVHE format. This is no good. Let's pretend this HW doesn't have NV1, and move along. Signed-off-by: Marc Zyngier Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20240122181344.258974-11-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kernel/cpufeature.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 91249d20883b..0f29ac43c7a2 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1796,7 +1796,23 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, static bool has_nv1(const struct arm64_cpu_capabilities *entry, int scope) { - return !has_cpuid_feature(entry, scope); + /* + * Although the Apple M2 family appears to support NV1, the + * PTW barfs on the nVHE EL2 S1 page table format. Pretend + * that it doesn't support NV1 at all. + */ + static const struct midr_range nv1_ni_list[] = { + MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD), + MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE), + MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_PRO), + MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_PRO), + MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_MAX), + MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_MAX), + {} + }; + + return !(has_cpuid_feature(entry, scope) || + is_midr_in_range_list(read_cpuid_id(), nv1_ni_list)); } #if defined(ID_AA64MMFR0_EL1_TGRAN_LPA2) && defined(ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2) -- cgit v1.2.3-59-g8ed1b From 87b8cf2387c5ee79576988b2e72b84eeb92c57ec Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 12 Feb 2024 14:47:35 +0000 Subject: arm64: cpufeatures: Add missing ID_AA64MMFR4_EL1 to __read_sysreg_by_encoding() When triggering a CPU hotplug scenario, we reparse the CPU feature with SCOPE_LOCAL_CPU, for which we use __read_sysreg_by_encoding() to get the HW value for this CPU. As it turns out, we're missing the handling for ID_AA64MMFR4_EL1, and trigger a BUG(). Funnily enough, Marek isn't completely happy about that. Add the damn register to the list. Fixes: 805bb61f8279 ("arm64: cpufeature: Add ID_AA64MMFR4_EL1 handling") Reported-by: Marek Szyprowski Tested-by: Marek Szyprowski Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20240212144736.1933112-2-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kernel/cpufeature.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 0f29ac43c7a2..2f8958f27e9e 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1456,6 +1456,7 @@ u64 __read_sysreg_by_encoding(u32 sys_id) read_sysreg_case(SYS_ID_AA64MMFR1_EL1); read_sysreg_case(SYS_ID_AA64MMFR2_EL1); read_sysreg_case(SYS_ID_AA64MMFR3_EL1); + read_sysreg_case(SYS_ID_AA64MMFR4_EL1); read_sysreg_case(SYS_ID_AA64ISAR0_EL1); read_sysreg_case(SYS_ID_AA64ISAR1_EL1); read_sysreg_case(SYS_ID_AA64ISAR2_EL1); -- cgit v1.2.3-59-g8ed1b From 3673d01a2f555603cbf756874c7388b76bfbc967 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 12 Feb 2024 14:47:36 +0000 Subject: arm64: cpufeatures: Only check for NV1 if NV is present We handle ID_AA64MMFR4_EL1.E2H0 being 0 as NV1 being present. However, this is only true if FEAT_NV is implemented. Add the required check to has_nv1(), avoiding spuriously advertising NV1 on HW that doesn't have NV at all. Fixes: da9af5071b25 ("arm64: cpufeature: Detect HCR_EL2.NV1 being RES0") Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20240212144736.1933112-3-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kernel/cpufeature.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 2f8958f27e9e..3421b684d340 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1812,8 +1812,9 @@ static bool has_nv1(const struct arm64_cpu_capabilities *entry, int scope) {} }; - return !(has_cpuid_feature(entry, scope) || - is_midr_in_range_list(read_cpuid_id(), nv1_ni_list)); + return (this_cpu_has_cap(ARM64_HAS_NESTED_VIRT) && + !(has_cpuid_feature(entry, scope) || + is_midr_in_range_list(read_cpuid_id(), nv1_ni_list))); } #if defined(ID_AA64MMFR0_EL1_TGRAN_LPA2) && defined(ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2) -- cgit v1.2.3-59-g8ed1b From 9aa030cee1c45d6e962f6bf22ba63d4aff2b1644 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 15 Feb 2024 01:49:54 +0000 Subject: arm64: cpufeatures: Fix FEAT_NV check when checking for FEAT_NV1 Using this_cpu_has_cap() has the potential to go wrong when used system-wide on a preemptible kernel. Instead, use the __system_matches_cap() helper when checking for FEAT_NV in the FEAT_NV1 probing helper. Fixes: 3673d01a2f55 ("arm64: cpufeatures: Only check for NV1 if NV is present") Reported-by: Marek Szyprowski Signed-off-by: Marc Zyngier Reviewed-by: Suzuki K Poulose Tested-by: Marek Szyprowski Link: https://lore.kernel.org/kvmarm/86bk8k5ts3.wl-maz@kernel.org/ Signed-off-by: Oliver Upton --- arch/arm64/kernel/cpufeature.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 3421b684d340..f309fd542c20 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1812,7 +1812,7 @@ static bool has_nv1(const struct arm64_cpu_capabilities *entry, int scope) {} }; - return (this_cpu_has_cap(ARM64_HAS_NESTED_VIRT) && + return (__system_matches_cap(ARM64_HAS_NESTED_VIRT) && !(has_cpuid_feature(entry, scope) || is_midr_in_range_list(read_cpuid_id(), nv1_ni_list))); } -- cgit v1.2.3-59-g8ed1b