aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/hyp/include/hyp/switch.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kvm/hyp/include/hyp/switch.h')
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h107
1 files changed, 54 insertions, 53 deletions
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 6c1f51f25eb3..e4a2f295a394 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -30,8 +30,6 @@
#include <asm/processor.h>
#include <asm/thread_info.h>
-extern const char __hyp_panic_string[];
-
extern struct exception_table_entry __start___kvm_ex_table;
extern struct exception_table_entry __stop___kvm_ex_table;
@@ -160,18 +158,10 @@ static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
return true;
}
-static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
+static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
{
- u8 ec;
- u64 esr;
u64 hpfar, far;
- esr = vcpu->arch.fault.esr_el2;
- ec = ESR_ELx_EC(esr);
-
- if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
- return true;
-
far = read_sysreg_el2(SYS_FAR);
/*
@@ -194,33 +184,59 @@ static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
hpfar = read_sysreg(hpfar_el2);
}
- vcpu->arch.fault.far_el2 = far;
- vcpu->arch.fault.hpfar_el2 = hpfar;
+ fault->far_el2 = far;
+ fault->hpfar_el2 = hpfar;
return true;
}
+static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
+{
+ u8 ec;
+ u64 esr;
+
+ esr = vcpu->arch.fault.esr_el2;
+ ec = ESR_ELx_EC(esr);
+
+ if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
+ return true;
+
+ return __get_fault_info(esr, &vcpu->arch.fault);
+}
+
+static inline void __hyp_sve_save_host(struct kvm_vcpu *vcpu)
+{
+ struct thread_struct *thread;
+
+ thread = container_of(vcpu->arch.host_fpsimd_state, struct thread_struct,
+ uw.fpsimd_state);
+
+ __sve_save_state(sve_pffr(thread), &vcpu->arch.host_fpsimd_state->fpsr);
+}
+
+static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
+{
+ sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
+ __sve_restore_state(vcpu_sve_pffr(vcpu),
+ &vcpu->arch.ctxt.fp_regs.fpsr);
+ write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
+}
+
/* Check for an FPSIMD/SVE trap and handle as appropriate */
static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
{
- bool vhe, sve_guest, sve_host;
+ bool sve_guest, sve_host;
u8 esr_ec;
+ u64 reg;
if (!system_supports_fpsimd())
return false;
- /*
- * Currently system_supports_sve() currently implies has_vhe(),
- * so the check is redundant. However, has_vhe() can be determined
- * statically and helps the compiler remove dead code.
- */
- if (has_vhe() && system_supports_sve()) {
+ if (system_supports_sve()) {
sve_guest = vcpu_has_sve(vcpu);
sve_host = vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE;
- vhe = true;
} else {
sve_guest = false;
sve_host = false;
- vhe = has_vhe();
}
esr_ec = kvm_vcpu_trap_get_class(vcpu);
@@ -229,53 +245,38 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
return false;
/* Don't handle SVE traps for non-SVE vcpus here: */
- if (!sve_guest)
- if (esr_ec != ESR_ELx_EC_FP_ASIMD)
- return false;
+ if (!sve_guest && esr_ec != ESR_ELx_EC_FP_ASIMD)
+ return false;
/* Valid trap. Switch the context: */
-
- if (vhe) {
- u64 reg = read_sysreg(cpacr_el1) | CPACR_EL1_FPEN;
-
+ if (has_vhe()) {
+ reg = CPACR_EL1_FPEN;
if (sve_guest)
reg |= CPACR_EL1_ZEN;
- write_sysreg(reg, cpacr_el1);
+ sysreg_clear_set(cpacr_el1, 0, reg);
} else {
- write_sysreg(read_sysreg(cptr_el2) & ~(u64)CPTR_EL2_TFP,
- cptr_el2);
- }
+ reg = CPTR_EL2_TFP;
+ if (sve_guest)
+ reg |= CPTR_EL2_TZ;
+ sysreg_clear_set(cptr_el2, reg, 0);
+ }
isb();
if (vcpu->arch.flags & KVM_ARM64_FP_HOST) {
- /*
- * In the SVE case, VHE is assumed: it is enforced by
- * Kconfig and kvm_arch_init().
- */
- if (sve_host) {
- struct thread_struct *thread = container_of(
- vcpu->arch.host_fpsimd_state,
- struct thread_struct, uw.fpsimd_state);
-
- sve_save_state(sve_pffr(thread),
- &vcpu->arch.host_fpsimd_state->fpsr);
- } else {
+ if (sve_host)
+ __hyp_sve_save_host(vcpu);
+ else
__fpsimd_save_state(vcpu->arch.host_fpsimd_state);
- }
vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
}
- if (sve_guest) {
- sve_load_state(vcpu_sve_pffr(vcpu),
- &vcpu->arch.ctxt.fp_regs.fpsr,
- sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1);
- write_sysreg_s(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR_EL12);
- } else {
+ if (sve_guest)
+ __hyp_sve_restore_guest(vcpu);
+ else
__fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);
- }
/* Skip restoring fpexc32 for AArch64 guests */
if (!(read_sysreg(hcr_el2) & HCR_RW))