aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/hyp/sysreg-sr.c
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2020-02-21 14:50:22 +0000
committerCatalin Marinas <catalin.marinas@arm.com>2020-03-13 17:34:28 +0000
commitb5475d8caedb71476f999a858ea3f8c24c5f9e50 (patch)
tree7a46098e055c0f1a206ff4e5bea0632943ca9e7c /arch/arm64/kvm/hyp/sysreg-sr.c
parentarm64: cpufeature: add cpus_have_final_cap() (diff)
downloadlinux-dev-b5475d8caedb71476f999a858ea3f8c24c5f9e50.tar.xz
linux-dev-b5475d8caedb71476f999a858ea3f8c24c5f9e50.zip
arm64: kvm: hyp: use cpus_have_final_cap()
The KVM hyp code is only run after system capabilities have been finalized, and thus all const cap checks have been patched. This is noted in in __cpu_init_hyp_mode(), where we BUG() if called too early: | /* | * Call initialization code, and switch to the full blown HYP code. | * If the cpucaps haven't been finalized yet, something has gone very | * wrong, and hyp will crash and burn when it uses any | * cpus_have_const_cap() wrapper. | */ Given this, the hyp code can use cpus_have_final_cap() and avoid generating code to check the cpu_hwcaps array, which would be unsafe to run in hyp context. This patch migrate the KVM hyp code to cpus_have_final_cap(), avoiding this redundant code generation, and making it possible to detect if we accidentally invoke this code too early. In the latter case, the BUG() in cpus_have_final_cap() will cause a hyp panic. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Marc Zyngier <maz@kernel.org> Cc: James Morse <james.morse@arm.com> Cc: Julien Thierry <julien.thierry.kdev@gmail.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Will Deacon <will@kernel.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/kvm/hyp/sysreg-sr.c')
-rw-r--r--arch/arm64/kvm/hyp/sysreg-sr.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index 7672a978926c..75b1925763f1 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -71,7 +71,7 @@ static void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_context *ct
ctxt->gp_regs.regs.pc = read_sysreg_el2(SYS_ELR);
ctxt->gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
- if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
+ if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
ctxt->sys_regs[DISR_EL1] = read_sysreg_s(SYS_VDISR_EL2);
}
@@ -118,7 +118,7 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2);
write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1);
- if (!cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
+ if (!cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR);
write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR);
} else if (!ctxt->__hyp_running_vcpu) {
@@ -149,7 +149,7 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
- if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE) &&
+ if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE) &&
ctxt->__hyp_running_vcpu) {
/*
* Must only be done for host registers, hence the context
@@ -194,7 +194,7 @@ __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
write_sysreg_el2(ctxt->gp_regs.regs.pc, SYS_ELR);
write_sysreg_el2(pstate, SYS_SPSR);
- if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
+ if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2);
}