aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm
diff options
context:
space:
mode:
authorDave Martin <Dave.Martin@arm.com>2018-04-20 16:20:43 +0100
committerMarc Zyngier <marc.zyngier@arm.com>2018-05-25 12:28:29 +0100
commit85acda3b4a27ee3e20c54783a44f307b51912c2b (patch)
tree0a2f7213311009b9a244cb5d6bc862c14ce2a3cf /arch/arm64/kvm
parentarm64/sve: Move sve_pffr() to fpsimd.h and make inline (diff)
downloadlinux-dev-85acda3b4a27ee3e20c54783a44f307b51912c2b.tar.xz
linux-dev-85acda3b4a27ee3e20c54783a44f307b51912c2b.zip
KVM: arm64: Save host SVE context as appropriate
This patch adds SVE context saving to the hyp FPSIMD context switch path. This means that it is no longer necessary to save the host SVE state in advance of entering the guest, when in use. In order to avoid adding pointless complexity to the code, VHE is assumed if SVE is in use. VHE is an architectural prerequisite for SVE, so there is no good reason to turn CONFIG_ARM64_VHE off in kernels that support both SVE and KVM. Historically, software models exist that can expose the architecturally invalid configuration of SVE without VHE, so if this situation is detected at kvm_init() time then KVM will be disabled. Signed-off-by: Dave Martin <Dave.Martin@arm.com> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r--arch/arm64/kvm/fpsimd.c1
-rw-r--r--arch/arm64/kvm/hyp/switch.c20
2 files changed, 19 insertions, 2 deletions
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
index 365933a98a7c..dc6ecfa5a2d2 100644
--- a/arch/arm64/kvm/fpsimd.c
+++ b/arch/arm64/kvm/fpsimd.c
@@ -59,7 +59,6 @@ error:
*/
void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
{
- BUG_ON(system_supports_sve());
BUG_ON(!current->mm);
vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | KVM_ARM64_HOST_SVE_IN_USE);
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 118f3002b9ce..a6a8c7d9157d 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -21,6 +21,7 @@
#include <kvm/arm_psci.h>
+#include <asm/cpufeature.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_host.h>
@@ -28,6 +29,7 @@
#include <asm/kvm_mmu.h>
#include <asm/fpsimd.h>
#include <asm/debug-monitors.h>
+#include <asm/processor.h>
#include <asm/thread_info.h>
/* Check whether the FP regs were dirtied while in the host-side run loop: */
@@ -329,6 +331,8 @@ static bool __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
void __hyp_text __hyp_switch_fpsimd(u64 esr __always_unused,
struct kvm_vcpu *vcpu)
{
+ struct user_fpsimd_state *host_fpsimd = vcpu->arch.host_fpsimd_state;
+
if (has_vhe())
write_sysreg(read_sysreg(cpacr_el1) | CPACR_EL1_FPEN,
cpacr_el1);
@@ -339,7 +343,21 @@ void __hyp_text __hyp_switch_fpsimd(u64 esr __always_unused,
isb();
if (vcpu->arch.flags & KVM_ARM64_FP_HOST) {
- __fpsimd_save_state(vcpu->arch.host_fpsimd_state);
+ /*
+ * In the SVE case, VHE is assumed: it is enforced by
+ * Kconfig and kvm_arch_init().
+ */
+ if (system_supports_sve() &&
+ (vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE)) {
+ struct thread_struct *thread = container_of(
+ host_fpsimd,
+ struct thread_struct, uw.fpsimd_state);
+
+ sve_save_state(sve_pffr(thread), &host_fpsimd->fpsr);
+ } else {
+ __fpsimd_save_state(host_fpsimd);
+ }
+
vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
}