aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/tools/testing/selftests/kvm/x86_64/evmcs_test.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/selftests/kvm/x86_64/evmcs_test.c')
-rw-r--r--tools/testing/selftests/kvm/x86_64/evmcs_test.c88
1 files changed, 70 insertions, 18 deletions
diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
index ca22ee6d19cb..63096cea26c6 100644
--- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
@@ -18,6 +18,28 @@
#include "vmx.h"
#define VCPU_ID 5
+#define NMI_VECTOR 2
+
+static int ud_count;
+
+void enable_x2apic(void)
+{
+ uint32_t spiv_reg = APIC_BASE_MSR + (APIC_SPIV >> 4);
+
+ wrmsr(MSR_IA32_APICBASE, rdmsr(MSR_IA32_APICBASE) |
+ MSR_IA32_APICBASE_ENABLE | MSR_IA32_APICBASE_EXTD);
+ wrmsr(spiv_reg, rdmsr(spiv_reg) | APIC_SPIV_APIC_ENABLED);
+}
+
+static void guest_ud_handler(struct ex_regs *regs)
+{
+ ud_count++;
+ regs->rip += 3; /* VMLAUNCH */
+}
+
+static void guest_nmi_handler(struct ex_regs *regs)
+{
+}
void l2_guest_code(void)
{
@@ -25,15 +47,23 @@ void l2_guest_code(void)
GUEST_SYNC(8);
+ /* Forced exit to L1 upon restore */
+ GUEST_SYNC(9);
+
/* Done, exit to L1 and never come back. */
vmcall();
}
-void l1_guest_code(struct vmx_pages *vmx_pages)
+void guest_code(struct vmx_pages *vmx_pages)
{
#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+ enable_x2apic();
+
+ GUEST_SYNC(1);
+ GUEST_SYNC(2);
+
enable_vp_assist(vmx_pages->vp_assist_gpa, vmx_pages->vp_assist);
GUEST_ASSERT(vmx_pages->vmcs_gpa);
@@ -55,27 +85,40 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
current_evmcs->revision_id = EVMCS_VERSION;
GUEST_SYNC(6);
+ current_evmcs->pin_based_vm_exec_control |=
+ PIN_BASED_NMI_EXITING;
GUEST_ASSERT(!vmlaunch());
GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
- GUEST_SYNC(9);
+
+ /*
+ * NMI forces L2->L1 exit, resuming L2 and hope that EVMCS is
+ * up-to-date (RIP points where it should and not at the beginning
+ * of l2_guest_code(). GUEST_SYNC(9) checkes that.
+ */
GUEST_ASSERT(!vmresume());
- GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
+
GUEST_SYNC(10);
+
+ GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
+ GUEST_SYNC(11);
+
+ /* Try enlightened vmptrld with an incorrect GPA */
+ evmcs_vmptrld(0xdeadbeef, vmx_pages->enlightened_vmcs);
+ GUEST_ASSERT(vmlaunch());
+ GUEST_ASSERT(ud_count == 1);
+ GUEST_DONE();
}
-void guest_code(struct vmx_pages *vmx_pages)
+void inject_nmi(struct kvm_vm *vm)
{
- GUEST_SYNC(1);
- GUEST_SYNC(2);
+ struct kvm_vcpu_events events;
- if (vmx_pages)
- l1_guest_code(vmx_pages);
+ vcpu_events_get(vm, VCPU_ID, &events);
- GUEST_DONE();
+ events.nmi.pending = 1;
+ events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
- /* Try enlightened vmptrld with an incorrect GPA */
- evmcs_vmptrld(0xdeadbeef, vmx_pages->enlightened_vmcs);
- GUEST_ASSERT(vmlaunch());
+ vcpu_events_set(vm, VCPU_ID, &events);
}
int main(int argc, char *argv[])
@@ -109,6 +152,13 @@ int main(int argc, char *argv[])
vcpu_alloc_vmx(vm, &vmx_pages_gva);
vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
+ vm_init_descriptor_tables(vm);
+ vcpu_init_descriptor_tables(vm, VCPU_ID);
+ vm_handle_exception(vm, UD_VECTOR, guest_ud_handler);
+ vm_handle_exception(vm, NMI_VECTOR, guest_nmi_handler);
+
+ pr_info("Running L1 which uses EVMCS to run L2\n");
+
for (stage = 1;; stage++) {
_vcpu_run(vm, VCPU_ID);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
@@ -124,7 +174,7 @@ int main(int argc, char *argv[])
case UCALL_SYNC:
break;
case UCALL_DONE:
- goto part1_done;
+ goto done;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
@@ -154,12 +204,14 @@ int main(int argc, char *argv[])
TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
"Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
(ulong) regs2.rdi, (ulong) regs2.rsi);
- }
-part1_done:
- _vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
- "Unexpected successful VMEnter with invalid eVMCS pointer!");
+ /* Force immediate L2->L1 exit before resuming */
+ if (stage == 8) {
+ pr_info("Injecting NMI into L1 before L2 had a chance to run after restore\n");
+ inject_nmi(vm);
+ }
+ }
+done:
kvm_vm_free(vm);
}