aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch
diff options
context:
space:
mode:
authorJanakarajan Natarajan <Janakarajan.Natarajan@amd.com>2017-07-06 15:50:47 -0500
committerRadim Krčmář <rkrcmar@redhat.com>2017-07-12 22:38:30 +0200
commit89c8a4984fc98e625517bfe5083342d77ee35811 (patch)
treee8a974ab962400b2a851baed04a3b750f66f3ca0 /arch
parentKVM: SVM: Add Virtual VMLOAD VMSAVE feature definition (diff)
downloadwireguard-linux-89c8a4984fc98e625517bfe5083342d77ee35811.tar.xz
wireguard-linux-89c8a4984fc98e625517bfe5083342d77ee35811.zip
KVM: SVM: Enable Virtual VMLOAD VMSAVE feature
Enable the Virtual VMLOAD VMSAVE feature. This is done by setting bit 1 at position B8h in the vmcb. The processor must have nested paging enabled, be in 64-bit mode and have support for the Virtual VMLOAD VMSAVE feature for the bit to be set in the vmcb. Signed-off-by: Janakarajan Natarajan <Janakarajan.Natarajan@amd.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/svm.h1
-rw-r--r--arch/x86/kvm/svm.c24
2 files changed, 25 insertions, 0 deletions
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 74d139352491..58fffe79e417 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -120,6 +120,7 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
#define AVIC_ENABLE_MASK (1 << AVIC_ENABLE_SHIFT)
#define LBR_CTL_ENABLE_MASK BIT_ULL(0)
+#define VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK BIT_ULL(1)
#define SVM_INTERRUPT_SHADOW_MASK 1
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 9a09f89145f2..4c98d362e3e4 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -277,6 +277,10 @@ static int avic;
module_param(avic, int, S_IRUGO);
#endif
+/* enable/disable Virtual VMLOAD VMSAVE */
+static int vls = true;
+module_param(vls, int, 0444);
+
/* AVIC VM ID bit masks and lock */
static DECLARE_BITMAP(avic_vm_id_bitmap, AVIC_VM_ID_NR);
static DEFINE_SPINLOCK(avic_vm_id_lock);
@@ -1093,6 +1097,16 @@ static __init int svm_hardware_setup(void)
}
}
+ if (vls) {
+ if (!npt_enabled ||
+ !boot_cpu_has(X86_FEATURE_VIRTUAL_VMLOAD_VMSAVE) ||
+ !IS_ENABLED(CONFIG_X86_64)) {
+ vls = false;
+ } else {
+ pr_info("Virtual VMLOAD VMSAVE supported\n");
+ }
+ }
+
return 0;
err:
@@ -1280,6 +1294,16 @@ static void init_vmcb(struct vcpu_svm *svm)
if (avic)
avic_init_vmcb(svm);
+ /*
+ * If hardware supports Virtual VMLOAD VMSAVE then enable it
+ * in VMCB and clear intercepts to avoid #VMEXIT.
+ */
+ if (vls) {
+ clr_intercept(svm, INTERCEPT_VMLOAD);
+ clr_intercept(svm, INTERCEPT_VMSAVE);
+ svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
+ }
+
mark_all_dirty(svm->vmcb);
enable_gif(svm);