From 89f5074c503b6b6f181c0240c931f67bcaf266e9 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 19 Apr 2022 19:27:50 +0100 Subject: KVM: arm64: Handle blocking WFIT instruction When trapping a blocking WFIT instruction, take it into account when computing the deadline of the background timer. The state is tracked with a new vcpu flag, and is gated by a new CPU capability, which isn't currently enabled. Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220419182755.601427-6-maz@kernel.org --- arch/arm64/kvm/handle_exit.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'arch/arm64/kvm/handle_exit.c') diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 97fe14aab1a3..4260f2cd1971 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -85,16 +85,21 @@ static int handle_no_fpsimd(struct kvm_vcpu *vcpu) * WFI: Simply call kvm_vcpu_halt(), which will halt execution of * world-switches and schedule other host processes until there is an * incoming IRQ or FIQ to the VM. + * WFIT: Same as WFI, with a timed wakeup implemented as a background timer */ static int kvm_handle_wfx(struct kvm_vcpu *vcpu) { - if (kvm_vcpu_get_esr(vcpu) & ESR_ELx_WFx_ISS_WFE) { + u64 esr = kvm_vcpu_get_esr(vcpu); + + if (esr & ESR_ELx_WFx_ISS_WFE) { trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true); vcpu->stat.wfe_exit_stat++; kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu)); } else { trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false); vcpu->stat.wfi_exit_stat++; + if ((esr & (ESR_ELx_WFx_ISS_RV | ESR_ELx_WFx_ISS_WFxT)) == (ESR_ELx_WFx_ISS_RV | ESR_ELx_WFx_ISS_WFxT)) + vcpu->arch.flags |= KVM_ARM64_WFIT; kvm_vcpu_wfi(vcpu); } -- cgit v1.2.3-59-g8ed1b From a3fb59651449d8bd4dc4ed5413888819932c740b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 19 Apr 2022 19:27:51 +0100 Subject: KVM: arm64: Offer early resume for non-blocking WFxT instructions For WFxT instructions used with very small delays, it is not unlikely that the deadline is already expired by the time we reach the WFx handling code. Check for this condition as soon as possible, and return to the guest immediately if we can. Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220419182755.601427-7-maz@kernel.org --- arch/arm64/kvm/handle_exit.c | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) (limited to 'arch/arm64/kvm/handle_exit.c') diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 4260f2cd1971..7726b01dc09a 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -80,12 +80,14 @@ static int handle_no_fpsimd(struct kvm_vcpu *vcpu) * * @vcpu: the vcpu pointer * - * WFE: Yield the CPU and come back to this vcpu when the scheduler + * WFE[T]: Yield the CPU and come back to this vcpu when the scheduler * decides to. * WFI: Simply call kvm_vcpu_halt(), which will halt execution of * world-switches and schedule other host processes until there is an * incoming IRQ or FIQ to the VM. * WFIT: Same as WFI, with a timed wakeup implemented as a background timer + * + * WF{I,E}T can immediately return if the deadline has already expired. */ static int kvm_handle_wfx(struct kvm_vcpu *vcpu) { @@ -94,15 +96,35 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu) if (esr & ESR_ELx_WFx_ISS_WFE) { trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true); vcpu->stat.wfe_exit_stat++; - kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu)); } else { trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false); vcpu->stat.wfi_exit_stat++; - if ((esr & (ESR_ELx_WFx_ISS_RV | ESR_ELx_WFx_ISS_WFxT)) == (ESR_ELx_WFx_ISS_RV | ESR_ELx_WFx_ISS_WFxT)) + } + + if (esr & ESR_ELx_WFx_ISS_WFxT) { + if (esr & ESR_ELx_WFx_ISS_RV) { + u64 val, now; + + now = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_TIMER_CNT); + val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu)); + + if (now >= val) + goto out; + } else { + /* Treat WFxT as WFx if RN is invalid */ + esr &= ~ESR_ELx_WFx_ISS_WFxT; + } + } + + if (esr & ESR_ELx_WFx_ISS_WFE) { + kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu)); + } else { + if (esr & ESR_ELx_WFx_ISS_WFxT) vcpu->arch.flags |= KVM_ARM64_WFIT; + kvm_vcpu_wfi(vcpu); } - +out: kvm_incr_pc(vcpu); return 1; -- cgit v1.2.3-59-g8ed1b From 6ccf9cb557bd32073b0d68baed97f1bd8a40ff1d Mon Sep 17 00:00:00 2001 From: Kalesh Singh Date: Wed, 20 Apr 2022 14:42:57 -0700 Subject: KVM: arm64: Symbolize the nVHE HYP addresses Reintroduce the __kvm_nvhe_ symbols in kallsyms, ignoring the local symbols in this namespace. The local symbols are not informative and can cause aliasing issues when symbolizing the addresses. With the necessary symbols now in kallsyms we can symbolize nVHE addresses using the %p print format specifier: [ 98.916444][ T426] kvm [426]: nVHE hyp panic at: [] __kvm_nvhe_overflow_stack+0x8/0x34! Signed-off-by: Kalesh Singh Tested-by: Fuad Tabba Reviewed-by: Fuad Tabba Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220420214317.3303360-7-kaleshsingh@google.com --- arch/arm64/kvm/handle_exit.c | 13 +++++-------- scripts/kallsyms.c | 3 ++- 2 files changed, 7 insertions(+), 9 deletions(-) (limited to 'arch/arm64/kvm/handle_exit.c') diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 97fe14aab1a3..a377b871bf58 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -295,13 +295,8 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr_in_kimg = __phys_to_kimg(elr_phys); u64 hyp_offset = elr_in_kimg - kaslr_offset() - elr_virt; u64 mode = spsr & PSR_MODE_MASK; + u64 panic_addr = elr_virt + hyp_offset; - /* - * The nVHE hyp symbols are not included by kallsyms to avoid issues - * with aliasing. That means that the symbols cannot be printed with the - * "%pS" format specifier, so fall back to the vmlinux address if - * there's no better option. - */ if (mode != PSR_MODE_EL2t && mode != PSR_MODE_EL2h) { kvm_err("Invalid host exception to nVHE hyp!\n"); } else if (ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 && @@ -321,9 +316,11 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, if (file) kvm_err("nVHE hyp BUG at: %s:%u!\n", file, line); else - kvm_err("nVHE hyp BUG at: %016llx!\n", elr_virt + hyp_offset); + kvm_err("nVHE hyp BUG at: [<%016llx>] %pB!\n", panic_addr, + (void *)panic_addr); } else { - kvm_err("nVHE hyp panic at: %016llx!\n", elr_virt + hyp_offset); + kvm_err("nVHE hyp panic at: [<%016llx>] %pB!\n", panic_addr, + (void *)panic_addr); } /* diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index 8caabddf817c..e6906f79833d 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c @@ -111,7 +111,8 @@ static bool is_ignored_symbol(const char *name, char type) ".L", /* local labels, .LBB,.Ltmpxxx,.L__unnamed_xx,.LASANPC, etc. */ "__crc_", /* modversions */ "__efistub_", /* arm64 EFI stub namespace */ - "__kvm_nvhe_", /* arm64 non-VHE KVM namespace */ + "__kvm_nvhe_$", /* arm64 local symbols in non-VHE KVM namespace */ + "__kvm_nvhe_.L", /* arm64 local symbols in non-VHE KVM namespace */ "__AArch64ADRPThunk_", /* arm64 lld */ "__ARMV5PILongThunk_", /* arm lld */ "__ARMV7PILongThunk_", -- cgit v1.2.3-59-g8ed1b From 9369bc5c5e35985f38d04bd98c6d28a032e84b17 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 3 May 2022 06:02:02 +0000 Subject: KVM: arm64: Plumb cp10 ID traps through the AArch64 sysreg handler In order to enable HCR_EL2.TID3 for AArch32 guests KVM needs to handle traps where ESR_EL2.EC=0x8, which corresponds to an attempted VMRS access from an ID group register. Specifically, the MVFR{0-2} registers are accessed this way from AArch32. Conveniently, these registers are architecturally mapped to MVFR{0-2}_EL1 in AArch64. Furthermore, KVM already handles reads to these aliases in AArch64. Plumb VMRS read traps through to the general AArch64 system register handler. Signed-off-by: Oliver Upton Reviewed-by: Reiji Watanabe Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220503060205.2823727-5-oupton@google.com --- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/handle_exit.c | 1 + arch/arm64/kvm/sys_regs.c | 71 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 73 insertions(+) (limited to 'arch/arm64/kvm/handle_exit.c') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 94a27a7520f4..05081b9b7369 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -683,6 +683,7 @@ int kvm_handle_cp14_64(struct kvm_vcpu *vcpu); int kvm_handle_cp15_32(struct kvm_vcpu *vcpu); int kvm_handle_cp15_64(struct kvm_vcpu *vcpu); int kvm_handle_sys_reg(struct kvm_vcpu *vcpu); +int kvm_handle_cp10_id(struct kvm_vcpu *vcpu); void kvm_reset_sys_regs(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 97fe14aab1a3..5088a86ace5b 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -167,6 +167,7 @@ static exit_handle_fn arm_exit_handlers[] = { [ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64, [ESR_ELx_EC_CP14_MR] = kvm_handle_cp14_32, [ESR_ELx_EC_CP14_LS] = kvm_handle_cp14_load_store, + [ESR_ELx_EC_CP10_ID] = kvm_handle_cp10_id, [ESR_ELx_EC_CP14_64] = kvm_handle_cp14_64, [ESR_ELx_EC_HVC32] = handle_hvc, [ESR_ELx_EC_SMC32] = handle_smc, diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index f403ea47b8a3..586b292ca94f 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -2346,6 +2346,77 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, static bool emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params); +/* + * The CP10 ID registers are architecturally mapped to AArch64 feature + * registers. Abuse that fact so we can rely on the AArch64 handler for accesses + * from AArch32. + */ +static bool kvm_esr_cp10_id_to_sys64(u32 esr, struct sys_reg_params *params) +{ + u8 reg_id = (esr >> 10) & 0xf; + bool valid; + + params->is_write = ((esr & 1) == 0); + params->Op0 = 3; + params->Op1 = 0; + params->CRn = 0; + params->CRm = 3; + + /* CP10 ID registers are read-only */ + valid = !params->is_write; + + switch (reg_id) { + /* MVFR0 */ + case 0b0111: + params->Op2 = 0; + break; + /* MVFR1 */ + case 0b0110: + params->Op2 = 1; + break; + /* MVFR2 */ + case 0b0101: + params->Op2 = 2; + break; + default: + valid = false; + } + + if (valid) + return true; + + kvm_pr_unimpl("Unhandled cp10 register %s: %u\n", + params->is_write ? "write" : "read", reg_id); + return false; +} + +/** + * kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and + * VFP Register' from AArch32. + * @vcpu: The vCPU pointer + * + * MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers. + * Work out the correct AArch64 system register encoding and reroute to the + * AArch64 system register emulation. + */ +int kvm_handle_cp10_id(struct kvm_vcpu *vcpu) +{ + int Rt = kvm_vcpu_sys_get_rt(vcpu); + u32 esr = kvm_vcpu_get_esr(vcpu); + struct sys_reg_params params; + + /* UNDEF on any unhandled register access */ + if (!kvm_esr_cp10_id_to_sys64(esr, ¶ms)) { + kvm_inject_undefined(vcpu); + return 1; + } + + if (emulate_sys_reg(vcpu, ¶ms)) + vcpu_set_reg(vcpu, Rt, params.regval); + + return 1; +} + /** * kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where * CRn=0, which corresponds to the AArch32 feature -- cgit v1.2.3-59-g8ed1b