aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/arm64/kvm/handle_exit.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kvm/handle_exit.c')
-rw-r--r--arch/arm64/kvm/handle_exit.c174
1 files changed, 159 insertions, 15 deletions
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 617ae6dea5d5..453266c96481 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -10,6 +10,7 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
+#include <linux/ubsan.h>
#include <asm/esr.h>
#include <asm/exception.h>
@@ -56,6 +57,13 @@ static int handle_hvc(struct kvm_vcpu *vcpu)
static int handle_smc(struct kvm_vcpu *vcpu)
{
/*
+ * Forward this trapped smc instruction to the virtual EL2 if
+ * the guest has asked for it.
+ */
+ if (forward_smc_trap(vcpu))
+ return 1;
+
+ /*
* "If an SMC instruction executed at Non-secure EL1 is
* trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
* Trap exception, not a Secure Monitor Call exception [...]"
@@ -87,11 +95,19 @@ static int handle_smc(struct kvm_vcpu *vcpu)
}
/*
- * Guest access to FP/ASIMD registers are routed to this handler only
- * when the system doesn't support FP/ASIMD.
+ * This handles the cases where the system does not support FP/ASIMD or when
+ * we are running nested virtualization and the guest hypervisor is trapping
+ * FP/ASIMD accesses by its guest guest.
+ *
+ * All other handling of guest vs. host FP/ASIMD register state is handled in
+ * fixup_guest_exit().
*/
-static int handle_no_fpsimd(struct kvm_vcpu *vcpu)
+static int kvm_handle_fpasimd(struct kvm_vcpu *vcpu)
{
+ if (guest_hyp_fpsimd_traps_enabled(vcpu))
+ return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
+
+ /* This is the case when the system doesn't support FP/ASIMD. */
kvm_inject_undefined(vcpu);
return 1;
}
@@ -114,8 +130,12 @@ static int handle_no_fpsimd(struct kvm_vcpu *vcpu)
static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
{
u64 esr = kvm_vcpu_get_esr(vcpu);
+ bool is_wfe = !!(esr & ESR_ELx_WFx_ISS_WFE);
- if (esr & ESR_ELx_WFx_ISS_WFE) {
+ if (guest_hyp_wfx_traps_enabled(vcpu))
+ return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
+
+ if (is_wfe) {
trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
vcpu->stat.wfe_exit_stat++;
} else {
@@ -168,6 +188,9 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
struct kvm_run *run = vcpu->run;
u64 esr = kvm_vcpu_get_esr(vcpu);
+ if (!vcpu->guest_debug && forward_debug_exception(vcpu))
+ return 1;
+
run->exit_reason = KVM_EXIT_DEBUG;
run->debug.arch.hsr = lower_32_bits(esr);
run->debug.arch.hsr_high = upper_32_bits(esr);
@@ -178,7 +201,7 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
run->debug.arch.far = vcpu->arch.fault.far_el2;
break;
case ESR_ELx_EC_SOFTSTP_LOW:
- vcpu_clear_flag(vcpu, DBG_SS_ACTIVE_PENDING);
+ *vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
break;
}
@@ -202,24 +225,48 @@ static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu)
*/
static int handle_sve(struct kvm_vcpu *vcpu)
{
+ if (guest_hyp_sve_traps_enabled(vcpu))
+ return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
+
kvm_inject_undefined(vcpu);
return 1;
}
/*
- * Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
- * a NOP). If we get here, it is that we didn't fixup ptrauth on exit, and all
- * that we can do is give the guest an UNDEF.
+ * Two possibilities to handle a trapping ptrauth instruction:
+ *
+ * - Guest usage of a ptrauth instruction (which the guest EL1 did not
+ * turn into a NOP). If we get here, it is because we didn't enable
+ * ptrauth for the guest. This results in an UNDEF, as it isn't
+ * supposed to use ptrauth without being told it could.
+ *
+ * - Running an L2 NV guest while L1 has left HCR_EL2.API==0, and for
+ * which we reinject the exception into L1.
+ *
+ * Anything else is an emulation bug (hence the WARN_ON + UNDEF).
*/
static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu)
{
+ if (!vcpu_has_ptrauth(vcpu)) {
+ kvm_inject_undefined(vcpu);
+ return 1;
+ }
+
+ if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
+ kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
+ return 1;
+ }
+
+ /* Really shouldn't be here! */
+ WARN_ON_ONCE(1);
kvm_inject_undefined(vcpu);
return 1;
}
static int kvm_handle_eret(struct kvm_vcpu *vcpu)
{
- if (kvm_vcpu_get_esr(vcpu) & ESR_ELx_ERET_ISS_ERET)
+ if (esr_iss_is_eretax(kvm_vcpu_get_esr(vcpu)) &&
+ !vcpu_has_ptrauth(vcpu))
return kvm_handle_ptrauth(vcpu);
/*
@@ -252,6 +299,81 @@ static int handle_svc(struct kvm_vcpu *vcpu)
return 1;
}
+static int kvm_handle_gcs(struct kvm_vcpu *vcpu)
+{
+ /* We don't expect GCS, so treat it with contempt */
+ if (kvm_has_feat(vcpu->kvm, ID_AA64PFR1_EL1, GCS, IMP))
+ WARN_ON_ONCE(1);
+
+ kvm_inject_undefined(vcpu);
+ return 1;
+}
+
+static int handle_other(struct kvm_vcpu *vcpu)
+{
+ bool is_l2 = vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu);
+ u64 hcrx = __vcpu_sys_reg(vcpu, HCRX_EL2);
+ u64 esr = kvm_vcpu_get_esr(vcpu);
+ u64 iss = ESR_ELx_ISS(esr);
+ struct kvm *kvm = vcpu->kvm;
+ bool allowed, fwd = false;
+
+ /*
+ * We only trap for two reasons:
+ *
+ * - the feature is disabled, and the only outcome is to
+ * generate an UNDEF.
+ *
+ * - the feature is enabled, but a NV guest wants to trap the
+ * feature used by its L2 guest. We forward the exception in
+ * this case.
+ *
+ * What we don't expect is to end-up here if the guest is
+ * expected be be able to directly use the feature, hence the
+ * WARN_ON below.
+ */
+ switch (iss) {
+ case ESR_ELx_ISS_OTHER_ST64BV:
+ allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V);
+ if (is_l2)
+ fwd = !(hcrx & HCRX_EL2_EnASR);
+ break;
+ case ESR_ELx_ISS_OTHER_ST64BV0:
+ allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA);
+ if (is_l2)
+ fwd = !(hcrx & HCRX_EL2_EnAS0);
+ break;
+ case ESR_ELx_ISS_OTHER_LDST64B:
+ allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64);
+ if (is_l2)
+ fwd = !(hcrx & HCRX_EL2_EnALS);
+ break;
+ case ESR_ELx_ISS_OTHER_TSBCSYNC:
+ allowed = kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceBuffer, TRBE_V1P1);
+ if (is_l2)
+ fwd = (__vcpu_sys_reg(vcpu, HFGITR2_EL2) & HFGITR2_EL2_TSBCSYNC);
+ break;
+ case ESR_ELx_ISS_OTHER_PSBCSYNC:
+ allowed = kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, V1P5);
+ if (is_l2)
+ fwd = (__vcpu_sys_reg(vcpu, HFGITR_EL2) & HFGITR_EL2_PSBCSYNC);
+ break;
+ default:
+ /* Clearly, we're missing something. */
+ WARN_ON_ONCE(1);
+ allowed = false;
+ }
+
+ WARN_ON_ONCE(allowed && !fwd);
+
+ if (allowed && fwd)
+ kvm_inject_nested_sync(vcpu, esr);
+ else
+ kvm_inject_undefined(vcpu);
+
+ return 1;
+}
+
static exit_handle_fn arm_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
[ESR_ELx_EC_WFx] = kvm_handle_wfx,
@@ -261,6 +383,7 @@ static exit_handle_fn arm_exit_handlers[] = {
[ESR_ELx_EC_CP14_LS] = kvm_handle_cp14_load_store,
[ESR_ELx_EC_CP10_ID] = kvm_handle_cp10_id,
[ESR_ELx_EC_CP14_64] = kvm_handle_cp14_64,
+ [ESR_ELx_EC_OTHER] = handle_other,
[ESR_ELx_EC_HVC32] = handle_hvc,
[ESR_ELx_EC_SMC32] = handle_smc,
[ESR_ELx_EC_HVC64] = handle_hvc,
@@ -271,13 +394,15 @@ static exit_handle_fn arm_exit_handlers[] = {
[ESR_ELx_EC_ERET] = kvm_handle_eret,
[ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort,
[ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort,
+ [ESR_ELx_EC_DABT_CUR] = kvm_handle_vncr_abort,
[ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug,
[ESR_ELx_EC_WATCHPT_LOW]= kvm_handle_guest_debug,
[ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
[ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug,
[ESR_ELx_EC_BRK64] = kvm_handle_guest_debug,
- [ESR_ELx_EC_FP_ASIMD] = handle_no_fpsimd,
+ [ESR_ELx_EC_FP_ASIMD] = kvm_handle_fpasimd,
[ESR_ELx_EC_PAC] = kvm_handle_ptrauth,
+ [ESR_ELx_EC_GCS] = kvm_handle_gcs,
};
static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
@@ -383,6 +508,20 @@ void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
}
+static void print_nvhe_hyp_panic(const char *name, u64 panic_addr)
+{
+ kvm_err("nVHE hyp %s at: [<%016llx>] %pB!\n", name, panic_addr,
+ (void *)(panic_addr + kaslr_offset()));
+}
+
+static void kvm_nvhe_report_cfi_failure(u64 panic_addr)
+{
+ print_nvhe_hyp_panic("CFI failure", panic_addr);
+
+ if (IS_ENABLED(CONFIG_CFI_PERMISSIVE))
+ kvm_err(" (CONFIG_CFI_PERMISSIVE ignored for hyp failures)\n");
+}
+
void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
u64 elr_virt, u64 elr_phys,
u64 par, uintptr_t vcpu,
@@ -395,7 +534,7 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
if (mode != PSR_MODE_EL2t && mode != PSR_MODE_EL2h) {
kvm_err("Invalid host exception to nVHE hyp!\n");
} else if (ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 &&
- (esr & ESR_ELx_BRK64_ISS_COMMENT_MASK) == BUG_BRK_IMM) {
+ esr_brk_comment(esr) == BUG_BRK_IMM) {
const char *file = NULL;
unsigned int line = 0;
@@ -411,11 +550,16 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
if (file)
kvm_err("nVHE hyp BUG at: %s:%u!\n", file, line);
else
- kvm_err("nVHE hyp BUG at: [<%016llx>] %pB!\n", panic_addr,
- (void *)(panic_addr + kaslr_offset()));
+ print_nvhe_hyp_panic("BUG", panic_addr);
+ } else if (IS_ENABLED(CONFIG_CFI_CLANG) && esr_is_cfi_brk(esr)) {
+ kvm_nvhe_report_cfi_failure(panic_addr);
+ } else if (IS_ENABLED(CONFIG_UBSAN_KVM_EL2) &&
+ ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 &&
+ esr_is_ubsan_brk(esr)) {
+ print_nvhe_hyp_panic(report_ubsan_failure(esr & UBSAN_BRK_MASK),
+ panic_addr);
} else {
- kvm_err("nVHE hyp panic at: [<%016llx>] %pB!\n", panic_addr,
- (void *)(panic_addr + kaslr_offset()));
+ print_nvhe_hyp_panic("panic", panic_addr);
}
/* Dump the nVHE hypervisor backtrace */