aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/tools/testing/selftests/kvm/x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/selftests/kvm/x86_64')
-rw-r--r--tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c117
-rw-r--r--tools/testing/selftests/kvm/x86_64/debug_regs.c202
-rw-r--r--tools/testing/selftests/kvm/x86_64/evmcs_test.c166
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c190
-rw-r--r--tools/testing/selftests/kvm/x86_64/mmio_warning_test.c126
-rw-r--r--tools/testing/selftests/kvm/x86_64/platform_info_test.c107
-rw-r--r--tools/testing/selftests/kvm/x86_64/set_sregs_test.c52
-rw-r--r--tools/testing/selftests/kvm/x86_64/smm_test.c164
-rw-r--r--tools/testing/selftests/kvm/x86_64/state_test.c233
-rw-r--r--tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c77
-rw-r--r--tools/testing/selftests/kvm/x86_64/sync_regs_test.c243
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c87
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c157
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c259
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c274
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c168
-rw-r--r--tools/testing/selftests/kvm/x86_64/xss_msr_test.c76
17 files changed, 0 insertions, 2698 deletions
diff --git a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
deleted file mode 100644
index 140e91901582..000000000000
--- a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
+++ /dev/null
@@ -1,117 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * CR4 and CPUID sync test
- *
- * Copyright 2018, Red Hat, Inc. and/or its affiliates.
- *
- * Author:
- * Wei Huang <wei@redhat.com>
- */
-
-#include <fcntl.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/ioctl.h>
-
-#include "test_util.h"
-
-#include "kvm_util.h"
-#include "processor.h"
-
-#define X86_FEATURE_XSAVE (1<<26)
-#define X86_FEATURE_OSXSAVE (1<<27)
-#define VCPU_ID 1
-
-static inline bool cr4_cpuid_is_sync(void)
-{
- int func, subfunc;
- uint32_t eax, ebx, ecx, edx;
- uint64_t cr4;
-
- func = 0x1;
- subfunc = 0x0;
- __asm__ __volatile__("cpuid"
- : "=a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx)
- : "a"(func), "c"(subfunc));
-
- cr4 = get_cr4();
-
- return (!!(ecx & X86_FEATURE_OSXSAVE)) == (!!(cr4 & X86_CR4_OSXSAVE));
-}
-
-static void guest_code(void)
-{
- uint64_t cr4;
-
- /* turn on CR4.OSXSAVE */
- cr4 = get_cr4();
- cr4 |= X86_CR4_OSXSAVE;
- set_cr4(cr4);
-
- /* verify CR4.OSXSAVE == CPUID.OSXSAVE */
- GUEST_ASSERT(cr4_cpuid_is_sync());
-
- /* notify hypervisor to change CR4 */
- GUEST_SYNC(0);
-
- /* check again */
- GUEST_ASSERT(cr4_cpuid_is_sync());
-
- GUEST_DONE();
-}
-
-int main(int argc, char *argv[])
-{
- struct kvm_run *run;
- struct kvm_vm *vm;
- struct kvm_sregs sregs;
- struct kvm_cpuid_entry2 *entry;
- struct ucall uc;
- int rc;
-
- entry = kvm_get_supported_cpuid_entry(1);
- if (!(entry->ecx & X86_FEATURE_XSAVE)) {
- print_skip("XSAVE feature not supported");
- return 0;
- }
-
- /* Tell stdout not to buffer its content */
- setbuf(stdout, NULL);
-
- /* Create VM */
- vm = vm_create_default(VCPU_ID, 0, guest_code);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
- run = vcpu_state(vm, VCPU_ID);
-
- while (1) {
- rc = _vcpu_run(vm, VCPU_ID);
-
- TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc);
- TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
- "Unexpected exit reason: %u (%s),\n",
- run->exit_reason,
- exit_reason_str(run->exit_reason));
-
- switch (get_ucall(vm, VCPU_ID, &uc)) {
- case UCALL_SYNC:
- /* emulate hypervisor clearing CR4.OSXSAVE */
- vcpu_sregs_get(vm, VCPU_ID, &sregs);
- sregs.cr4 &= ~X86_CR4_OSXSAVE;
- vcpu_sregs_set(vm, VCPU_ID, &sregs);
- break;
- case UCALL_ABORT:
- TEST_FAIL("Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit.");
- break;
- case UCALL_DONE:
- goto done;
- default:
- TEST_FAIL("Unknown ucall %lu", uc.cmd);
- }
- }
-
- kvm_vm_free(vm);
-
-done:
- return 0;
-}
diff --git a/tools/testing/selftests/kvm/x86_64/debug_regs.c b/tools/testing/selftests/kvm/x86_64/debug_regs.c
deleted file mode 100644
index 8162c58a1234..000000000000
--- a/tools/testing/selftests/kvm/x86_64/debug_regs.c
+++ /dev/null
@@ -1,202 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * KVM guest debug register tests
- *
- * Copyright (C) 2020, Red Hat, Inc.
- */
-#include <stdio.h>
-#include <string.h>
-#include "kvm_util.h"
-#include "processor.h"
-
-#define VCPU_ID 0
-
-#define DR6_BD (1 << 13)
-#define DR7_GD (1 << 13)
-
-/* For testing data access debug BP */
-uint32_t guest_value;
-
-extern unsigned char sw_bp, hw_bp, write_data, ss_start, bd_start;
-
-static void guest_code(void)
-{
- /*
- * Software BP tests.
- *
- * NOTE: sw_bp need to be before the cmd here, because int3 is an
- * exception rather than a normal trap for KVM_SET_GUEST_DEBUG (we
- * capture it using the vcpu exception bitmap).
- */
- asm volatile("sw_bp: int3");
-
- /* Hardware instruction BP test */
- asm volatile("hw_bp: nop");
-
- /* Hardware data BP test */
- asm volatile("mov $1234,%%rax;\n\t"
- "mov %%rax,%0;\n\t write_data:"
- : "=m" (guest_value) : : "rax");
-
- /* Single step test, covers 2 basic instructions and 2 emulated */
- asm volatile("ss_start: "
- "xor %%rax,%%rax\n\t"
- "cpuid\n\t"
- "movl $0x1a0,%%ecx\n\t"
- "rdmsr\n\t"
- : : : "rax", "ecx");
-
- /* DR6.BD test */
- asm volatile("bd_start: mov %%dr0, %%rax" : : : "rax");
- GUEST_DONE();
-}
-
-#define CLEAR_DEBUG() memset(&debug, 0, sizeof(debug))
-#define APPLY_DEBUG() vcpu_set_guest_debug(vm, VCPU_ID, &debug)
-#define CAST_TO_RIP(v) ((unsigned long long)&(v))
-#define SET_RIP(v) do { \
- vcpu_regs_get(vm, VCPU_ID, &regs); \
- regs.rip = (v); \
- vcpu_regs_set(vm, VCPU_ID, &regs); \
- } while (0)
-#define MOVE_RIP(v) SET_RIP(regs.rip + (v));
-
-int main(void)
-{
- struct kvm_guest_debug debug;
- unsigned long long target_dr6, target_rip;
- struct kvm_regs regs;
- struct kvm_run *run;
- struct kvm_vm *vm;
- struct ucall uc;
- uint64_t cmd;
- int i;
- /* Instruction lengths starting at ss_start */
- int ss_size[4] = {
- 3, /* xor */
- 2, /* cpuid */
- 5, /* mov */
- 2, /* rdmsr */
- };
-
- if (!kvm_check_cap(KVM_CAP_SET_GUEST_DEBUG)) {
- print_skip("KVM_CAP_SET_GUEST_DEBUG not supported");
- return 0;
- }
-
- vm = vm_create_default(VCPU_ID, 0, guest_code);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
- run = vcpu_state(vm, VCPU_ID);
-
- /* Test software BPs - int3 */
- CLEAR_DEBUG();
- debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
- APPLY_DEBUG();
- vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
- run->debug.arch.exception == BP_VECTOR &&
- run->debug.arch.pc == CAST_TO_RIP(sw_bp),
- "INT3: exit %d exception %d rip 0x%llx (should be 0x%llx)",
- run->exit_reason, run->debug.arch.exception,
- run->debug.arch.pc, CAST_TO_RIP(sw_bp));
- MOVE_RIP(1);
-
- /* Test instruction HW BP over DR[0-3] */
- for (i = 0; i < 4; i++) {
- CLEAR_DEBUG();
- debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
- debug.arch.debugreg[i] = CAST_TO_RIP(hw_bp);
- debug.arch.debugreg[7] = 0x400 | (1UL << (2*i+1));
- APPLY_DEBUG();
- vcpu_run(vm, VCPU_ID);
- target_dr6 = 0xffff0ff0 | (1UL << i);
- TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
- run->debug.arch.exception == DB_VECTOR &&
- run->debug.arch.pc == CAST_TO_RIP(hw_bp) &&
- run->debug.arch.dr6 == target_dr6,
- "INS_HW_BP (DR%d): exit %d exception %d rip 0x%llx "
- "(should be 0x%llx) dr6 0x%llx (should be 0x%llx)",
- i, run->exit_reason, run->debug.arch.exception,
- run->debug.arch.pc, CAST_TO_RIP(hw_bp),
- run->debug.arch.dr6, target_dr6);
- }
- /* Skip "nop" */
- MOVE_RIP(1);
-
- /* Test data access HW BP over DR[0-3] */
- for (i = 0; i < 4; i++) {
- CLEAR_DEBUG();
- debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
- debug.arch.debugreg[i] = CAST_TO_RIP(guest_value);
- debug.arch.debugreg[7] = 0x00000400 | (1UL << (2*i+1)) |
- (0x000d0000UL << (4*i));
- APPLY_DEBUG();
- vcpu_run(vm, VCPU_ID);
- target_dr6 = 0xffff0ff0 | (1UL << i);
- TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
- run->debug.arch.exception == DB_VECTOR &&
- run->debug.arch.pc == CAST_TO_RIP(write_data) &&
- run->debug.arch.dr6 == target_dr6,
- "DATA_HW_BP (DR%d): exit %d exception %d rip 0x%llx "
- "(should be 0x%llx) dr6 0x%llx (should be 0x%llx)",
- i, run->exit_reason, run->debug.arch.exception,
- run->debug.arch.pc, CAST_TO_RIP(write_data),
- run->debug.arch.dr6, target_dr6);
- /* Rollback the 4-bytes "mov" */
- MOVE_RIP(-7);
- }
- /* Skip the 4-bytes "mov" */
- MOVE_RIP(7);
-
- /* Test single step */
- target_rip = CAST_TO_RIP(ss_start);
- target_dr6 = 0xffff4ff0ULL;
- vcpu_regs_get(vm, VCPU_ID, &regs);
- for (i = 0; i < (sizeof(ss_size) / sizeof(ss_size[0])); i++) {
- target_rip += ss_size[i];
- CLEAR_DEBUG();
- debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
- debug.arch.debugreg[7] = 0x00000400;
- APPLY_DEBUG();
- vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
- run->debug.arch.exception == DB_VECTOR &&
- run->debug.arch.pc == target_rip &&
- run->debug.arch.dr6 == target_dr6,
- "SINGLE_STEP[%d]: exit %d exception %d rip 0x%llx "
- "(should be 0x%llx) dr6 0x%llx (should be 0x%llx)",
- i, run->exit_reason, run->debug.arch.exception,
- run->debug.arch.pc, target_rip, run->debug.arch.dr6,
- target_dr6);
- }
-
- /* Finally test global disable */
- CLEAR_DEBUG();
- debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
- debug.arch.debugreg[7] = 0x400 | DR7_GD;
- APPLY_DEBUG();
- vcpu_run(vm, VCPU_ID);
- target_dr6 = 0xffff0ff0 | DR6_BD;
- TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
- run->debug.arch.exception == DB_VECTOR &&
- run->debug.arch.pc == CAST_TO_RIP(bd_start) &&
- run->debug.arch.dr6 == target_dr6,
- "DR7.GD: exit %d exception %d rip 0x%llx "
- "(should be 0x%llx) dr6 0x%llx (should be 0x%llx)",
- run->exit_reason, run->debug.arch.exception,
- run->debug.arch.pc, target_rip, run->debug.arch.dr6,
- target_dr6);
-
- /* Disable all debug controls, run to the end */
- CLEAR_DEBUG();
- APPLY_DEBUG();
-
- vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, "KVM_EXIT_IO");
- cmd = get_ucall(vm, VCPU_ID, &uc);
- TEST_ASSERT(cmd == UCALL_DONE, "UCALL_DONE");
-
- kvm_vm_free(vm);
-
- return 0;
-}
diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
deleted file mode 100644
index 757928199f19..000000000000
--- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c
+++ /dev/null
@@ -1,166 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2018, Red Hat, Inc.
- *
- * Tests for Enlightened VMCS, including nested guest state.
- */
-#define _GNU_SOURCE /* for program_invocation_short_name */
-#include <fcntl.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/ioctl.h>
-
-#include "test_util.h"
-
-#include "kvm_util.h"
-
-#include "vmx.h"
-
-#define VCPU_ID 5
-
-void l2_guest_code(void)
-{
- GUEST_SYNC(7);
-
- GUEST_SYNC(8);
-
- /* Done, exit to L1 and never come back. */
- vmcall();
-}
-
-void l1_guest_code(struct vmx_pages *vmx_pages)
-{
-#define L2_GUEST_STACK_SIZE 64
- unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
-
- enable_vp_assist(vmx_pages->vp_assist_gpa, vmx_pages->vp_assist);
-
- GUEST_ASSERT(vmx_pages->vmcs_gpa);
- GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
- GUEST_SYNC(3);
- GUEST_ASSERT(load_vmcs(vmx_pages));
- GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
-
- GUEST_SYNC(4);
- GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
-
- prepare_vmcs(vmx_pages, l2_guest_code,
- &l2_guest_stack[L2_GUEST_STACK_SIZE]);
-
- GUEST_SYNC(5);
- GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
- current_evmcs->revision_id = -1u;
- GUEST_ASSERT(vmlaunch());
- current_evmcs->revision_id = EVMCS_VERSION;
- GUEST_SYNC(6);
-
- GUEST_ASSERT(!vmlaunch());
- GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
- GUEST_SYNC(9);
- GUEST_ASSERT(!vmresume());
- GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
- GUEST_SYNC(10);
-}
-
-void guest_code(struct vmx_pages *vmx_pages)
-{
- GUEST_SYNC(1);
- GUEST_SYNC(2);
-
- if (vmx_pages)
- l1_guest_code(vmx_pages);
-
- GUEST_DONE();
-
- /* Try enlightened vmptrld with an incorrect GPA */
- evmcs_vmptrld(0xdeadbeef, vmx_pages->enlightened_vmcs);
- GUEST_ASSERT(vmlaunch());
-}
-
-int main(int argc, char *argv[])
-{
- vm_vaddr_t vmx_pages_gva = 0;
-
- struct kvm_regs regs1, regs2;
- struct kvm_vm *vm;
- struct kvm_run *run;
- struct kvm_x86_state *state;
- struct ucall uc;
- int stage;
-
- /* Create VM */
- vm = vm_create_default(VCPU_ID, 0, guest_code);
-
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
-
- if (!nested_vmx_supported() ||
- !kvm_check_cap(KVM_CAP_NESTED_STATE) ||
- !kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
- print_skip("Enlightened VMCS is unsupported");
- exit(KSFT_SKIP);
- }
-
- vcpu_enable_evmcs(vm, VCPU_ID);
-
- run = vcpu_state(vm, VCPU_ID);
-
- vcpu_regs_get(vm, VCPU_ID, &regs1);
-
- vcpu_alloc_vmx(vm, &vmx_pages_gva);
- vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
-
- for (stage = 1;; stage++) {
- _vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
- "Stage %d: unexpected exit reason: %u (%s),\n",
- stage, run->exit_reason,
- exit_reason_str(run->exit_reason));
-
- switch (get_ucall(vm, VCPU_ID, &uc)) {
- case UCALL_ABORT:
- TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
- __FILE__, uc.args[1]);
- /* NOT REACHED */
- case UCALL_SYNC:
- break;
- case UCALL_DONE:
- goto part1_done;
- default:
- TEST_FAIL("Unknown ucall %lu", uc.cmd);
- }
-
- /* UCALL_SYNC is handled here. */
- TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
- uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx",
- stage, (ulong)uc.args[1]);
-
- state = vcpu_save_state(vm, VCPU_ID);
- memset(&regs1, 0, sizeof(regs1));
- vcpu_regs_get(vm, VCPU_ID, &regs1);
-
- kvm_vm_release(vm);
-
- /* Restore state in a new VM. */
- kvm_vm_restart(vm, O_RDWR);
- vm_vcpu_add(vm, VCPU_ID);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
- vcpu_enable_evmcs(vm, VCPU_ID);
- vcpu_load_state(vm, VCPU_ID, state);
- run = vcpu_state(vm, VCPU_ID);
- free(state);
-
- memset(&regs2, 0, sizeof(regs2));
- vcpu_regs_get(vm, VCPU_ID, &regs2);
- TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
- "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
- (ulong) regs2.rdi, (ulong) regs2.rsi);
- }
-
-part1_done:
- _vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
- "Unexpected successful VMEnter with invalid eVMCS pointer!");
-
- kvm_vm_free(vm);
-}
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
deleted file mode 100644
index 745b708c2d3b..000000000000
--- a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
+++ /dev/null
@@ -1,190 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Test for x86 KVM_CAP_HYPERV_CPUID
- *
- * Copyright (C) 2018, Red Hat, Inc.
- *
- * This work is licensed under the terms of the GNU GPL, version 2.
- *
- */
-
-#define _GNU_SOURCE /* for program_invocation_short_name */
-#include <fcntl.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/ioctl.h>
-
-#include "test_util.h"
-#include "kvm_util.h"
-#include "processor.h"
-#include "vmx.h"
-
-#define VCPU_ID 0
-
-static void guest_code(void)
-{
-}
-
-static bool smt_possible(void)
-{
- char buf[16];
- FILE *f;
- bool res = true;
-
- f = fopen("/sys/devices/system/cpu/smt/control", "r");
- if (f) {
- if (fread(buf, sizeof(*buf), sizeof(buf), f) > 0) {
- if (!strncmp(buf, "forceoff", 8) ||
- !strncmp(buf, "notsupported", 12))
- res = false;
- }
- fclose(f);
- }
-
- return res;
-}
-
-static void test_hv_cpuid(struct kvm_cpuid2 *hv_cpuid_entries,
- bool evmcs_enabled)
-{
- int i;
- int nent = 9;
- u32 test_val;
-
- if (evmcs_enabled)
- nent += 1; /* 0x4000000A */
-
- TEST_ASSERT(hv_cpuid_entries->nent == nent,
- "KVM_GET_SUPPORTED_HV_CPUID should return %d entries"
- " with evmcs=%d (returned %d)",
- nent, evmcs_enabled, hv_cpuid_entries->nent);
-
- for (i = 0; i < hv_cpuid_entries->nent; i++) {
- struct kvm_cpuid_entry2 *entry = &hv_cpuid_entries->entries[i];
-
- TEST_ASSERT((entry->function >= 0x40000000) &&
- (entry->function <= 0x40000082),
- "function %x is our of supported range",
- entry->function);
-
- TEST_ASSERT(evmcs_enabled || (entry->function != 0x4000000A),
- "0x4000000A leaf should not be reported");
-
- TEST_ASSERT(entry->index == 0,
- ".index field should be zero");
-
- TEST_ASSERT(entry->flags == 0,
- ".flags field should be zero");
-
- TEST_ASSERT(!entry->padding[0] && !entry->padding[1] &&
- !entry->padding[2], "padding should be zero");
-
- switch (entry->function) {
- case 0x40000000:
- test_val = 0x40000082;
-
- TEST_ASSERT(entry->eax == test_val,
- "Wrong max leaf report in 0x40000000.EAX: %x"
- " (evmcs=%d)",
- entry->eax, evmcs_enabled
- );
- break;
- case 0x40000004:
- test_val = entry->eax & (1UL << 18);
-
- TEST_ASSERT(!!test_val == !smt_possible(),
- "NoNonArchitecturalCoreSharing bit"
- " doesn't reflect SMT setting");
- break;
- }
-
- /*
- * If needed for debug:
- * fprintf(stdout,
- * "CPUID%lx EAX=0x%lx EBX=0x%lx ECX=0x%lx EDX=0x%lx\n",
- * entry->function, entry->eax, entry->ebx, entry->ecx,
- * entry->edx);
- */
- }
-
-}
-
-void test_hv_cpuid_e2big(struct kvm_vm *vm)
-{
- static struct kvm_cpuid2 cpuid = {.nent = 0};
- int ret;
-
- ret = _vcpu_ioctl(vm, VCPU_ID, KVM_GET_SUPPORTED_HV_CPUID, &cpuid);
-
- TEST_ASSERT(ret == -1 && errno == E2BIG,
- "KVM_GET_SUPPORTED_HV_CPUID didn't fail with -E2BIG when"
- " it should have: %d %d", ret, errno);
-}
-
-
-struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(struct kvm_vm *vm)
-{
- int nent = 20; /* should be enough */
- static struct kvm_cpuid2 *cpuid;
-
- cpuid = malloc(sizeof(*cpuid) + nent * sizeof(struct kvm_cpuid_entry2));
-
- if (!cpuid) {
- perror("malloc");
- abort();
- }
-
- cpuid->nent = nent;
-
- vcpu_ioctl(vm, VCPU_ID, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
-
- return cpuid;
-}
-
-
-int main(int argc, char *argv[])
-{
- struct kvm_vm *vm;
- int rv, stage;
- struct kvm_cpuid2 *hv_cpuid_entries;
- bool evmcs_enabled;
-
- /* Tell stdout not to buffer its content */
- setbuf(stdout, NULL);
-
- rv = kvm_check_cap(KVM_CAP_HYPERV_CPUID);
- if (!rv) {
- print_skip("KVM_CAP_HYPERV_CPUID not supported");
- exit(KSFT_SKIP);
- }
-
- for (stage = 0; stage < 3; stage++) {
- evmcs_enabled = false;
-
- vm = vm_create_default(VCPU_ID, 0, guest_code);
- switch (stage) {
- case 0:
- test_hv_cpuid_e2big(vm);
- continue;
- case 1:
- break;
- case 2:
- if (!nested_vmx_supported() ||
- !kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
- print_skip("Enlightened VMCS is unsupported");
- continue;
- }
- vcpu_enable_evmcs(vm, VCPU_ID);
- evmcs_enabled = true;
- break;
- }
-
- hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm);
- test_hv_cpuid(hv_cpuid_entries, evmcs_enabled);
- free(hv_cpuid_entries);
- kvm_vm_free(vm);
- }
-
- return 0;
-}
diff --git a/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c b/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c
deleted file mode 100644
index e6480fd5c4bd..000000000000
--- a/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * mmio_warning_test
- *
- * Copyright (C) 2019, Google LLC.
- *
- * This work is licensed under the terms of the GNU GPL, version 2.
- *
- * Test that we don't get a kernel warning when we call KVM_RUN after a
- * triple fault occurs. To get the triple fault to occur we call KVM_RUN
- * on a VCPU that hasn't been properly setup.
- *
- */
-
-#define _GNU_SOURCE
-#include <fcntl.h>
-#include <kvm_util.h>
-#include <linux/kvm.h>
-#include <processor.h>
-#include <pthread.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/ioctl.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <test_util.h>
-#include <unistd.h>
-
-#define NTHREAD 4
-#define NPROCESS 5
-
-struct thread_context {
- int kvmcpu;
- struct kvm_run *run;
-};
-
-void *thr(void *arg)
-{
- struct thread_context *tc = (struct thread_context *)arg;
- int res;
- int kvmcpu = tc->kvmcpu;
- struct kvm_run *run = tc->run;
-
- res = ioctl(kvmcpu, KVM_RUN, 0);
- pr_info("ret1=%d exit_reason=%d suberror=%d\n",
- res, run->exit_reason, run->internal.suberror);
-
- return 0;
-}
-
-void test(void)
-{
- int i, kvm, kvmvm, kvmcpu;
- pthread_t th[NTHREAD];
- struct kvm_run *run;
- struct thread_context tc;
-
- kvm = open("/dev/kvm", O_RDWR);
- TEST_ASSERT(kvm != -1, "failed to open /dev/kvm");
- kvmvm = ioctl(kvm, KVM_CREATE_VM, 0);
- TEST_ASSERT(kvmvm != -1, "KVM_CREATE_VM failed");
- kvmcpu = ioctl(kvmvm, KVM_CREATE_VCPU, 0);
- TEST_ASSERT(kvmcpu != -1, "KVM_CREATE_VCPU failed");
- run = (struct kvm_run *)mmap(0, 4096, PROT_READ|PROT_WRITE, MAP_SHARED,
- kvmcpu, 0);
- tc.kvmcpu = kvmcpu;
- tc.run = run;
- srand(getpid());
- for (i = 0; i < NTHREAD; i++) {
- pthread_create(&th[i], NULL, thr, (void *)(uintptr_t)&tc);
- usleep(rand() % 10000);
- }
- for (i = 0; i < NTHREAD; i++)
- pthread_join(th[i], NULL);
-}
-
-int get_warnings_count(void)
-{
- int warnings;
- FILE *f;
-
- f = popen("dmesg | grep \"WARNING:\" | wc -l", "r");
- fscanf(f, "%d", &warnings);
- fclose(f);
-
- return warnings;
-}
-
-int main(void)
-{
- int warnings_before, warnings_after;
-
- if (!is_intel_cpu()) {
- print_skip("Must be run on an Intel CPU");
- exit(KSFT_SKIP);
- }
-
- if (vm_is_unrestricted_guest(NULL)) {
- print_skip("Unrestricted guest must be disabled");
- exit(KSFT_SKIP);
- }
-
- warnings_before = get_warnings_count();
-
- for (int i = 0; i < NPROCESS; ++i) {
- int status;
- int pid = fork();
-
- if (pid < 0)
- exit(1);
- if (pid == 0) {
- test();
- exit(0);
- }
- while (waitpid(pid, &status, __WALL) != pid)
- ;
- }
-
- warnings_after = get_warnings_count();
- TEST_ASSERT(warnings_before == warnings_after,
- "Warnings found in kernel. Run 'dmesg' to inspect them.");
-
- return 0;
-}
diff --git a/tools/testing/selftests/kvm/x86_64/platform_info_test.c b/tools/testing/selftests/kvm/x86_64/platform_info_test.c
deleted file mode 100644
index 1e89688cbbbf..000000000000
--- a/tools/testing/selftests/kvm/x86_64/platform_info_test.c
+++ /dev/null
@@ -1,107 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Test for x86 KVM_CAP_MSR_PLATFORM_INFO
- *
- * Copyright (C) 2018, Google LLC.
- *
- * This work is licensed under the terms of the GNU GPL, version 2.
- *
- * Verifies expected behavior of controlling guest access to
- * MSR_PLATFORM_INFO.
- */
-
-#define _GNU_SOURCE /* for program_invocation_short_name */
-#include <fcntl.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/ioctl.h>
-
-#include "test_util.h"
-#include "kvm_util.h"
-#include "processor.h"
-
-#define VCPU_ID 0
-#define MSR_PLATFORM_INFO_MAX_TURBO_RATIO 0xff00
-
-static void guest_code(void)
-{
- uint64_t msr_platform_info;
-
- for (;;) {
- msr_platform_info = rdmsr(MSR_PLATFORM_INFO);
- GUEST_SYNC(msr_platform_info);
- asm volatile ("inc %r11");
- }
-}
-
-static void set_msr_platform_info_enabled(struct kvm_vm *vm, bool enable)
-{
- struct kvm_enable_cap cap = {};
-
- cap.cap = KVM_CAP_MSR_PLATFORM_INFO;
- cap.flags = 0;
- cap.args[0] = (int)enable;
- vm_enable_cap(vm, &cap);
-}
-
-static void test_msr_platform_info_enabled(struct kvm_vm *vm)
-{
- struct kvm_run *run = vcpu_state(vm, VCPU_ID);
- struct ucall uc;
-
- set_msr_platform_info_enabled(vm, true);
- vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
- "Exit_reason other than KVM_EXIT_IO: %u (%s),\n",
- run->exit_reason,
- exit_reason_str(run->exit_reason));
- get_ucall(vm, VCPU_ID, &uc);
- TEST_ASSERT(uc.cmd == UCALL_SYNC,
- "Received ucall other than UCALL_SYNC: %lu\n", uc.cmd);
- TEST_ASSERT((uc.args[1] & MSR_PLATFORM_INFO_MAX_TURBO_RATIO) ==
- MSR_PLATFORM_INFO_MAX_TURBO_RATIO,
- "Expected MSR_PLATFORM_INFO to have max turbo ratio mask: %i.",
- MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
-}
-
-static void test_msr_platform_info_disabled(struct kvm_vm *vm)
-{
- struct kvm_run *run = vcpu_state(vm, VCPU_ID);
-
- set_msr_platform_info_enabled(vm, false);
- vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
- "Exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s)\n",
- run->exit_reason,
- exit_reason_str(run->exit_reason));
-}
-
-int main(int argc, char *argv[])
-{
- struct kvm_vm *vm;
- int rv;
- uint64_t msr_platform_info;
-
- /* Tell stdout not to buffer its content */
- setbuf(stdout, NULL);
-
- rv = kvm_check_cap(KVM_CAP_MSR_PLATFORM_INFO);
- if (!rv) {
- print_skip("KVM_CAP_MSR_PLATFORM_INFO not supported");
- exit(KSFT_SKIP);
- }
-
- vm = vm_create_default(VCPU_ID, 0, guest_code);
-
- msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO);
- vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO,
- msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
- test_msr_platform_info_enabled(vm);
- test_msr_platform_info_disabled(vm);
- vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info);
-
- kvm_vm_free(vm);
-
- return 0;
-}
diff --git a/tools/testing/selftests/kvm/x86_64/set_sregs_test.c b/tools/testing/selftests/kvm/x86_64/set_sregs_test.c
deleted file mode 100644
index 9f7656184f31..000000000000
--- a/tools/testing/selftests/kvm/x86_64/set_sregs_test.c
+++ /dev/null
@@ -1,52 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * KVM_SET_SREGS tests
- *
- * Copyright (C) 2018, Google LLC.
- *
- * This is a regression test for the bug fixed by the following commit:
- * d3802286fa0f ("kvm: x86: Disallow illegal IA32_APIC_BASE MSR values")
- *
- * That bug allowed a user-mode program that called the KVM_SET_SREGS
- * ioctl to put a VCPU's local APIC into an invalid state.
- */
-#define _GNU_SOURCE /* for program_invocation_short_name */
-#include <fcntl.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/ioctl.h>
-
-#include "test_util.h"
-
-#include "kvm_util.h"
-#include "processor.h"
-
-#define VCPU_ID 5
-
-int main(int argc, char *argv[])
-{
- struct kvm_sregs sregs;
- struct kvm_vm *vm;
- int rc;
-
- /* Tell stdout not to buffer its content */
- setbuf(stdout, NULL);
-
- /* Create VM */
- vm = vm_create_default(VCPU_ID, 0, NULL);
-
- vcpu_sregs_get(vm, VCPU_ID, &sregs);
- sregs.apic_base = 1 << 10;
- rc = _vcpu_sregs_set(vm, VCPU_ID, &sregs);
- TEST_ASSERT(rc, "Set IA32_APIC_BASE to %llx (invalid)",
- sregs.apic_base);
- sregs.apic_base = 1 << 11;
- rc = _vcpu_sregs_set(vm, VCPU_ID, &sregs);
- TEST_ASSERT(!rc, "Couldn't set IA32_APIC_BASE to %llx (valid)",
- sregs.apic_base);
-
- kvm_vm_free(vm);
-
- return 0;
-}
diff --git a/tools/testing/selftests/kvm/x86_64/smm_test.c b/tools/testing/selftests/kvm/x86_64/smm_test.c
deleted file mode 100644
index ae39a220609f..000000000000
--- a/tools/testing/selftests/kvm/x86_64/smm_test.c
+++ /dev/null
@@ -1,164 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2018, Red Hat, Inc.
- *
- * Tests for SMM.
- */
-#define _GNU_SOURCE /* for program_invocation_short_name */
-#include <fcntl.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <string.h>
-#include <sys/ioctl.h>
-
-#include "test_util.h"
-
-#include "kvm_util.h"
-
-#include "vmx.h"
-#include "svm_util.h"
-
-#define VCPU_ID 1
-
-#define PAGE_SIZE 4096
-
-#define SMRAM_SIZE 65536
-#define SMRAM_MEMSLOT ((1 << 16) | 1)
-#define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE)
-#define SMRAM_GPA 0x1000000
-#define SMRAM_STAGE 0xfe
-
-#define STR(x) #x
-#define XSTR(s) STR(s)
-
-#define SYNC_PORT 0xe
-#define DONE 0xff
-
-/*
- * This is compiled as normal 64-bit code, however, SMI handler is executed
- * in real-address mode. To stay simple we're limiting ourselves to a mode
- * independent subset of asm here.
- * SMI handler always report back fixed stage SMRAM_STAGE.
- */
-uint8_t smi_handler[] = {
- 0xb0, SMRAM_STAGE, /* mov $SMRAM_STAGE, %al */
- 0xe4, SYNC_PORT, /* in $SYNC_PORT, %al */
- 0x0f, 0xaa, /* rsm */
-};
-
-static inline void sync_with_host(uint64_t phase)
-{
- asm volatile("in $" XSTR(SYNC_PORT)", %%al \n"
- : "+a" (phase));
-}
-
-void self_smi(void)
-{
- wrmsr(APIC_BASE_MSR + (APIC_ICR >> 4),
- APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI);
-}
-
-void guest_code(void *arg)
-{
- uint64_t apicbase = rdmsr(MSR_IA32_APICBASE);
-
- sync_with_host(1);
-
- wrmsr(MSR_IA32_APICBASE, apicbase | X2APIC_ENABLE);
-
- sync_with_host(2);
-
- self_smi();
-
- sync_with_host(4);
-
- if (arg) {
- if (cpu_has_svm())
- generic_svm_setup(arg, NULL, NULL);
- else
- GUEST_ASSERT(prepare_for_vmx_operation(arg));
-
- sync_with_host(5);
-
- self_smi();
-
- sync_with_host(7);
- }
-
- sync_with_host(DONE);
-}
-
-int main(int argc, char *argv[])
-{
- vm_vaddr_t nested_gva = 0;
-
- struct kvm_regs regs;
- struct kvm_vm *vm;
- struct kvm_run *run;
- struct kvm_x86_state *state;
- int stage, stage_reported;
-
- /* Create VM */
- vm = vm_create_default(VCPU_ID, 0, guest_code);
-
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
-
- run = vcpu_state(vm, VCPU_ID);
-
- vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA,
- SMRAM_MEMSLOT, SMRAM_PAGES, 0);
- TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, SMRAM_GPA, SMRAM_MEMSLOT)
- == SMRAM_GPA, "could not allocate guest physical addresses?");
-
- memset(addr_gpa2hva(vm, SMRAM_GPA), 0x0, SMRAM_SIZE);
- memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler,
- sizeof(smi_handler));
-
- vcpu_set_msr(vm, VCPU_ID, MSR_IA32_SMBASE, SMRAM_GPA);
-
- if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
- if (nested_svm_supported())
- vcpu_alloc_svm(vm, &nested_gva);
- else if (nested_vmx_supported())
- vcpu_alloc_vmx(vm, &nested_gva);
- }
-
- if (!nested_gva)
- pr_info("will skip SMM test with VMX enabled\n");
-
- vcpu_args_set(vm, VCPU_ID, 1, nested_gva);
-
- for (stage = 1;; stage++) {
- _vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
- "Stage %d: unexpected exit reason: %u (%s),\n",
- stage, run->exit_reason,
- exit_reason_str(run->exit_reason));
-
- memset(&regs, 0, sizeof(regs));
- vcpu_regs_get(vm, VCPU_ID, &regs);
-
- stage_reported = regs.rax & 0xff;
-
- if (stage_reported == DONE)
- goto done;
-
- TEST_ASSERT(stage_reported == stage ||
- stage_reported == SMRAM_STAGE,
- "Unexpected stage: #%x, got %x",
- stage, stage_reported);
-
- state = vcpu_save_state(vm, VCPU_ID);
- kvm_vm_release(vm);
- kvm_vm_restart(vm, O_RDWR);
- vm_vcpu_add(vm, VCPU_ID);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
- vcpu_load_state(vm, VCPU_ID, state);
- run = vcpu_state(vm, VCPU_ID);
- free(state);
- }
-
-done:
- kvm_vm_free(vm);
-}
diff --git a/tools/testing/selftests/kvm/x86_64/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c
deleted file mode 100644
index f6c8b9042f8a..000000000000
--- a/tools/testing/selftests/kvm/x86_64/state_test.c
+++ /dev/null
@@ -1,233 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * KVM_GET/SET_* tests
- *
- * Copyright (C) 2018, Red Hat, Inc.
- *
- * Tests for vCPU state save/restore, including nested guest state.
- */
-#define _GNU_SOURCE /* for program_invocation_short_name */
-#include <fcntl.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/ioctl.h>
-
-#include "test_util.h"
-
-#include "kvm_util.h"
-#include "processor.h"
-#include "vmx.h"
-#include "svm_util.h"
-
-#define VCPU_ID 5
-#define L2_GUEST_STACK_SIZE 256
-
-void svm_l2_guest_code(void)
-{
- GUEST_SYNC(4);
- /* Exit to L1 */
- vmcall();
- GUEST_SYNC(6);
- /* Done, exit to L1 and never come back. */
- vmcall();
-}
-
-static void svm_l1_guest_code(struct svm_test_data *svm)
-{
- unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
- struct vmcb *vmcb = svm->vmcb;
-
- GUEST_ASSERT(svm->vmcb_gpa);
- /* Prepare for L2 execution. */
- generic_svm_setup(svm, svm_l2_guest_code,
- &l2_guest_stack[L2_GUEST_STACK_SIZE]);
-
- GUEST_SYNC(3);
- run_guest(vmcb, svm->vmcb_gpa);
- GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
- GUEST_SYNC(5);
- vmcb->save.rip += 3;
- run_guest(vmcb, svm->vmcb_gpa);
- GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
- GUEST_SYNC(7);
-}
-
-void vmx_l2_guest_code(void)
-{
- GUEST_SYNC(6);
-
- /* Exit to L1 */
- vmcall();
-
- /* L1 has now set up a shadow VMCS for us. */
- GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
- GUEST_SYNC(10);
- GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
- GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0fffee));
- GUEST_SYNC(11);
- GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0fffee);
- GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0ffffee));
- GUEST_SYNC(12);
-
- /* Done, exit to L1 and never come back. */
- vmcall();
-}
-
-static void vmx_l1_guest_code(struct vmx_pages *vmx_pages)
-{
- unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
-
- GUEST_ASSERT(vmx_pages->vmcs_gpa);
- GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
- GUEST_SYNC(3);
- GUEST_ASSERT(load_vmcs(vmx_pages));
- GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
-
- GUEST_SYNC(4);
- GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
-
- prepare_vmcs(vmx_pages, vmx_l2_guest_code,
- &l2_guest_stack[L2_GUEST_STACK_SIZE]);
-
- GUEST_SYNC(5);
- GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
- GUEST_ASSERT(!vmlaunch());
- GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
- GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
-
- /* Check that the launched state is preserved. */
- GUEST_ASSERT(vmlaunch());
-
- GUEST_ASSERT(!vmresume());
- GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
-
- GUEST_SYNC(7);
- GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
-
- GUEST_ASSERT(!vmresume());
- GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
-
- vmwrite(GUEST_RIP, vmreadz(GUEST_RIP) + 3);
-
- vmwrite(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS);
- vmwrite(VMCS_LINK_POINTER, vmx_pages->shadow_vmcs_gpa);
-
- GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa));
- GUEST_ASSERT(vmlaunch());
- GUEST_SYNC(8);
- GUEST_ASSERT(vmlaunch());
- GUEST_ASSERT(vmresume());
-
- vmwrite(GUEST_RIP, 0xc0ffee);
- GUEST_SYNC(9);
- GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
-
- GUEST_ASSERT(!vmptrld(vmx_pages->vmcs_gpa));
- GUEST_ASSERT(!vmresume());
- GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
-
- GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa));
- GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
- GUEST_ASSERT(vmlaunch());
- GUEST_ASSERT(vmresume());
- GUEST_SYNC(13);
- GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
- GUEST_ASSERT(vmlaunch());
- GUEST_ASSERT(vmresume());
-}
-
-static void __attribute__((__flatten__)) guest_code(void *arg)
-{
- GUEST_SYNC(1);
- GUEST_SYNC(2);
-
- if (arg) {
- if (cpu_has_svm())
- svm_l1_guest_code(arg);
- else
- vmx_l1_guest_code(arg);
- }
-
- GUEST_DONE();
-}
-
-int main(int argc, char *argv[])
-{
- vm_vaddr_t nested_gva = 0;
-
- struct kvm_regs regs1, regs2;
- struct kvm_vm *vm;
- struct kvm_run *run;
- struct kvm_x86_state *state;
- struct ucall uc;
- int stage;
-
- /* Create VM */
- vm = vm_create_default(VCPU_ID, 0, guest_code);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
- run = vcpu_state(vm, VCPU_ID);
-
- vcpu_regs_get(vm, VCPU_ID, &regs1);
-
- if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
- if (nested_svm_supported())
- vcpu_alloc_svm(vm, &nested_gva);
- else if (nested_vmx_supported())
- vcpu_alloc_vmx(vm, &nested_gva);
- }
-
- if (!nested_gva)
- pr_info("will skip nested state checks\n");
-
- vcpu_args_set(vm, VCPU_ID, 1, nested_gva);
-
- for (stage = 1;; stage++) {
- _vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
- "Stage %d: unexpected exit reason: %u (%s),\n",
- stage, run->exit_reason,
- exit_reason_str(run->exit_reason));
-
- switch (get_ucall(vm, VCPU_ID, &uc)) {
- case UCALL_ABORT:
- TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
- __FILE__, uc.args[1]);
- /* NOT REACHED */
- case UCALL_SYNC:
- break;
- case UCALL_DONE:
- goto done;
- default:
- TEST_FAIL("Unknown ucall %lu", uc.cmd);
- }
-
- /* UCALL_SYNC is handled here. */
- TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
- uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx",
- stage, (ulong)uc.args[1]);
-
- state = vcpu_save_state(vm, VCPU_ID);
- memset(&regs1, 0, sizeof(regs1));
- vcpu_regs_get(vm, VCPU_ID, &regs1);
-
- kvm_vm_release(vm);
-
- /* Restore state in a new VM. */
- kvm_vm_restart(vm, O_RDWR);
- vm_vcpu_add(vm, VCPU_ID);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
- vcpu_load_state(vm, VCPU_ID, state);
- run = vcpu_state(vm, VCPU_ID);
- free(state);
-
- memset(&regs2, 0, sizeof(regs2));
- vcpu_regs_get(vm, VCPU_ID, &regs2);
- TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
- "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
- (ulong) regs2.rdi, (ulong) regs2.rsi);
- }
-
-done:
- kvm_vm_free(vm);
-}
diff --git a/tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c b/tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c
deleted file mode 100644
index 0e1adb4e3199..000000000000
--- a/tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c
+++ /dev/null
@@ -1,77 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * svm_vmcall_test
- *
- * Copyright (C) 2020, Red Hat, Inc.
- *
- * Nested SVM testing: VMCALL
- */
-
-#include "test_util.h"
-#include "kvm_util.h"
-#include "processor.h"
-#include "svm_util.h"
-
-#define VCPU_ID 5
-
-static struct kvm_vm *vm;
-
-static void l2_guest_code(struct svm_test_data *svm)
-{
- __asm__ __volatile__("vmcall");
-}
-
-static void l1_guest_code(struct svm_test_data *svm)
-{
- #define L2_GUEST_STACK_SIZE 64
- unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
- struct vmcb *vmcb = svm->vmcb;
-
- /* Prepare for L2 execution. */
- generic_svm_setup(svm, l2_guest_code,
- &l2_guest_stack[L2_GUEST_STACK_SIZE]);
-
- run_guest(vmcb, svm->vmcb_gpa);
-
- GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
- GUEST_DONE();
-}
-
-int main(int argc, char *argv[])
-{
- vm_vaddr_t svm_gva;
-
- nested_svm_check_supported();
-
- vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
-
- vcpu_alloc_svm(vm, &svm_gva);
- vcpu_args_set(vm, VCPU_ID, 1, svm_gva);
-
- for (;;) {
- volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
- struct ucall uc;
-
- vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
- "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
- run->exit_reason,
- exit_reason_str(run->exit_reason));
-
- switch (get_ucall(vm, VCPU_ID, &uc)) {
- case UCALL_ABORT:
- TEST_FAIL("%s", (const char *)uc.args[0]);
- /* NOT REACHED */
- case UCALL_SYNC:
- break;
- case UCALL_DONE:
- goto done;
- default:
- TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
- }
- }
-done:
- kvm_vm_free(vm);
- return 0;
-}
diff --git a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c
deleted file mode 100644
index d672f0a473f8..000000000000
--- a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c
+++ /dev/null
@@ -1,243 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Test for x86 KVM_CAP_SYNC_REGS
- *
- * Copyright (C) 2018, Google LLC.
- *
- * Verifies expected behavior of x86 KVM_CAP_SYNC_REGS functionality,
- * including requesting an invalid register set, updates to/from values
- * in kvm_run.s.regs when kvm_valid_regs and kvm_dirty_regs are toggled.
- */
-
-#define _GNU_SOURCE /* for program_invocation_short_name */
-#include <fcntl.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/ioctl.h>
-
-#include "test_util.h"
-#include "kvm_util.h"
-#include "processor.h"
-
-#define VCPU_ID 5
-
-#define UCALL_PIO_PORT ((uint16_t)0x1000)
-
-/*
- * ucall is embedded here to protect against compiler reshuffling registers
- * before calling a function. In this test we only need to get KVM_EXIT_IO
- * vmexit and preserve RBX, no additional information is needed.
- */
-void guest_code(void)
-{
- asm volatile("1: in %[port], %%al\n"
- "add $0x1, %%rbx\n"
- "jmp 1b"
- : : [port] "d" (UCALL_PIO_PORT) : "rax", "rbx");
-}
-
-static void compare_regs(struct kvm_regs *left, struct kvm_regs *right)
-{
-#define REG_COMPARE(reg) \
- TEST_ASSERT(left->reg == right->reg, \
- "Register " #reg \
- " values did not match: 0x%llx, 0x%llx\n", \
- left->reg, right->reg)
- REG_COMPARE(rax);
- REG_COMPARE(rbx);
- REG_COMPARE(rcx);
- REG_COMPARE(rdx);
- REG_COMPARE(rsi);
- REG_COMPARE(rdi);
- REG_COMPARE(rsp);
- REG_COMPARE(rbp);
- REG_COMPARE(r8);
- REG_COMPARE(r9);
- REG_COMPARE(r10);
- REG_COMPARE(r11);
- REG_COMPARE(r12);
- REG_COMPARE(r13);
- REG_COMPARE(r14);
- REG_COMPARE(r15);
- REG_COMPARE(rip);
- REG_COMPARE(rflags);
-#undef REG_COMPARE
-}
-
-static void compare_sregs(struct kvm_sregs *left, struct kvm_sregs *right)
-{
-}
-
-static void compare_vcpu_events(struct kvm_vcpu_events *left,
- struct kvm_vcpu_events *right)
-{
-}
-
-#define TEST_SYNC_FIELDS (KVM_SYNC_X86_REGS|KVM_SYNC_X86_SREGS|KVM_SYNC_X86_EVENTS)
-#define INVALID_SYNC_FIELD 0x80000000
-
-int main(int argc, char *argv[])
-{
- struct kvm_vm *vm;
- struct kvm_run *run;
- struct kvm_regs regs;
- struct kvm_sregs sregs;
- struct kvm_vcpu_events events;
- int rv, cap;
-
- /* Tell stdout not to buffer its content */
- setbuf(stdout, NULL);
-
- cap = kvm_check_cap(KVM_CAP_SYNC_REGS);
- if ((cap & TEST_SYNC_FIELDS) != TEST_SYNC_FIELDS) {
- print_skip("KVM_CAP_SYNC_REGS not supported");
- exit(KSFT_SKIP);
- }
- if ((cap & INVALID_SYNC_FIELD) != 0) {
- print_skip("The \"invalid\" field is not invalid");
- exit(KSFT_SKIP);
- }
-
- /* Create VM */
- vm = vm_create_default(VCPU_ID, 0, guest_code);
-
- run = vcpu_state(vm, VCPU_ID);
-
- /* Request reading invalid register set from VCPU. */
- run->kvm_valid_regs = INVALID_SYNC_FIELD;
- rv = _vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(rv < 0 && errno == EINVAL,
- "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
- rv);
- vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0;
-
- run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
- rv = _vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(rv < 0 && errno == EINVAL,
- "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
- rv);
- vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0;
-
- /* Request setting invalid register set into VCPU. */
- run->kvm_dirty_regs = INVALID_SYNC_FIELD;
- rv = _vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(rv < 0 && errno == EINVAL,
- "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
- rv);
- vcpu_state(vm, VCPU_ID)->kvm_dirty_regs = 0;
-
- run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
- rv = _vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(rv < 0 && errno == EINVAL,
- "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
- rv);
- vcpu_state(vm, VCPU_ID)->kvm_dirty_regs = 0;
-
- /* Request and verify all valid register sets. */
- /* TODO: BUILD TIME CHECK: TEST_ASSERT(KVM_SYNC_X86_NUM_FIELDS != 3); */
- run->kvm_valid_regs = TEST_SYNC_FIELDS;
- rv = _vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
- "Unexpected exit reason: %u (%s),\n",
- run->exit_reason,
- exit_reason_str(run->exit_reason));
-
- vcpu_regs_get(vm, VCPU_ID, &regs);
- compare_regs(&regs, &run->s.regs.regs);
-
- vcpu_sregs_get(vm, VCPU_ID, &sregs);
- compare_sregs(&sregs, &run->s.regs.sregs);
-
- vcpu_events_get(vm, VCPU_ID, &events);
- compare_vcpu_events(&events, &run->s.regs.events);
-
- /* Set and verify various register values. */
- run->s.regs.regs.rbx = 0xBAD1DEA;
- run->s.regs.sregs.apic_base = 1 << 11;
- /* TODO run->s.regs.events.XYZ = ABC; */
-
- run->kvm_valid_regs = TEST_SYNC_FIELDS;
- run->kvm_dirty_regs = KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS;
- rv = _vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
- "Unexpected exit reason: %u (%s),\n",
- run->exit_reason,
- exit_reason_str(run->exit_reason));
- TEST_ASSERT(run->s.regs.regs.rbx == 0xBAD1DEA + 1,
- "rbx sync regs value incorrect 0x%llx.",
- run->s.regs.regs.rbx);
- TEST_ASSERT(run->s.regs.sregs.apic_base == 1 << 11,
- "apic_base sync regs value incorrect 0x%llx.",
- run->s.regs.sregs.apic_base);
-
- vcpu_regs_get(vm, VCPU_ID, &regs);
- compare_regs(&regs, &run->s.regs.regs);
-
- vcpu_sregs_get(vm, VCPU_ID, &sregs);
- compare_sregs(&sregs, &run->s.regs.sregs);
-
- vcpu_events_get(vm, VCPU_ID, &events);
- compare_vcpu_events(&events, &run->s.regs.events);
-
- /* Clear kvm_dirty_regs bits, verify new s.regs values are
- * overwritten with existing guest values.
- */
- run->kvm_valid_regs = TEST_SYNC_FIELDS;
- run->kvm_dirty_regs = 0;
- run->s.regs.regs.rbx = 0xDEADBEEF;
- rv = _vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
- "Unexpected exit reason: %u (%s),\n",
- run->exit_reason,
- exit_reason_str(run->exit_reason));
- TEST_ASSERT(run->s.regs.regs.rbx != 0xDEADBEEF,
- "rbx sync regs value incorrect 0x%llx.",
- run->s.regs.regs.rbx);
-
- /* Clear kvm_valid_regs bits and kvm_dirty_bits.
- * Verify s.regs values are not overwritten with existing guest values
- * and that guest values are not overwritten with kvm_sync_regs values.
- */
- run->kvm_valid_regs = 0;
- run->kvm_dirty_regs = 0;
- run->s.regs.regs.rbx = 0xAAAA;
- regs.rbx = 0xBAC0;
- vcpu_regs_set(vm, VCPU_ID, &regs);
- rv = _vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
- "Unexpected exit reason: %u (%s),\n",
- run->exit_reason,
- exit_reason_str(run->exit_reason));
- TEST_ASSERT(run->s.regs.regs.rbx == 0xAAAA,
- "rbx sync regs value incorrect 0x%llx.",
- run->s.regs.regs.rbx);
- vcpu_regs_get(vm, VCPU_ID, &regs);
- TEST_ASSERT(regs.rbx == 0xBAC0 + 1,
- "rbx guest value incorrect 0x%llx.",
- regs.rbx);
-
- /* Clear kvm_valid_regs bits. Verify s.regs values are not overwritten
- * with existing guest values but that guest values are overwritten
- * with kvm_sync_regs values.
- */
- run->kvm_valid_regs = 0;
- run->kvm_dirty_regs = TEST_SYNC_FIELDS;
- run->s.regs.regs.rbx = 0xBBBB;
- rv = _vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
- "Unexpected exit reason: %u (%s),\n",
- run->exit_reason,
- exit_reason_str(run->exit_reason));
- TEST_ASSERT(run->s.regs.regs.rbx == 0xBBBB,
- "rbx sync regs value incorrect 0x%llx.",
- run->s.regs.regs.rbx);
- vcpu_regs_get(vm, VCPU_ID, &regs);
- TEST_ASSERT(regs.rbx == 0xBBBB + 1,
- "rbx guest value incorrect 0x%llx.",
- regs.rbx);
-
- kvm_vm_free(vm);
-
- return 0;
-}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c b/tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c
deleted file mode 100644
index fe40ade06a49..000000000000
--- a/tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c
+++ /dev/null
@@ -1,87 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * vmx_close_while_nested
- *
- * Copyright (C) 2019, Red Hat, Inc.
- *
- * Verify that nothing bad happens if a KVM user exits with open
- * file descriptors while executing a nested guest.
- */
-
-#include "test_util.h"
-#include "kvm_util.h"
-#include "processor.h"
-#include "vmx.h"
-
-#include <string.h>
-#include <sys/ioctl.h>
-
-#include "kselftest.h"
-
-#define VCPU_ID 5
-
-enum {
- PORT_L0_EXIT = 0x2000,
-};
-
-/* The virtual machine object. */
-static struct kvm_vm *vm;
-
-static void l2_guest_code(void)
-{
- /* Exit to L0 */
- asm volatile("inb %%dx, %%al"
- : : [port] "d" (PORT_L0_EXIT) : "rax");
-}
-
-static void l1_guest_code(struct vmx_pages *vmx_pages)
-{
-#define L2_GUEST_STACK_SIZE 64
- unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
-
- GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
- GUEST_ASSERT(load_vmcs(vmx_pages));
-
- /* Prepare the VMCS for L2 execution. */
- prepare_vmcs(vmx_pages, l2_guest_code,
- &l2_guest_stack[L2_GUEST_STACK_SIZE]);
-
- GUEST_ASSERT(!vmlaunch());
- GUEST_ASSERT(0);
-}
-
-int main(int argc, char *argv[])
-{
- vm_vaddr_t vmx_pages_gva;
-
- nested_vmx_check_supported();
-
- vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
-
- /* Allocate VMX pages and shared descriptors (vmx_pages). */
- vcpu_alloc_vmx(vm, &vmx_pages_gva);
- vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
-
- for (;;) {
- volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
- struct ucall uc;
-
- vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
- "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
- run->exit_reason,
- exit_reason_str(run->exit_reason));
-
- if (run->io.port == PORT_L0_EXIT)
- break;
-
- switch (get_ucall(vm, VCPU_ID, &uc)) {
- case UCALL_ABORT:
- TEST_FAIL("%s", (const char *)uc.args[0]);
- /* NOT REACHED */
- default:
- TEST_FAIL("Unknown ucall %lu", uc.cmd);
- }
- }
-}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
deleted file mode 100644
index e894a638a155..000000000000
--- a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
+++ /dev/null
@@ -1,157 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * KVM dirty page logging test
- *
- * Copyright (C) 2018, Red Hat, Inc.
- */
-
-#define _GNU_SOURCE /* for program_invocation_name */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
-
-#include "test_util.h"
-#include "kvm_util.h"
-#include "processor.h"
-#include "vmx.h"
-
-#define VCPU_ID 1
-
-/* The memory slot index to track dirty pages */
-#define TEST_MEM_SLOT_INDEX 1
-#define TEST_MEM_PAGES 3
-
-/* L1 guest test virtual memory offset */
-#define GUEST_TEST_MEM 0xc0000000
-
-/* L2 guest test virtual memory offset */
-#define NESTED_TEST_MEM1 0xc0001000
-#define NESTED_TEST_MEM2 0xc0002000
-
-static void l2_guest_code(void)
-{
- *(volatile uint64_t *)NESTED_TEST_MEM1;
- *(volatile uint64_t *)NESTED_TEST_MEM1 = 1;
- GUEST_SYNC(true);
- GUEST_SYNC(false);
-
- *(volatile uint64_t *)NESTED_TEST_MEM2 = 1;
- GUEST_SYNC(true);
- *(volatile uint64_t *)NESTED_TEST_MEM2 = 1;
- GUEST_SYNC(true);
- GUEST_SYNC(false);
-
- /* Exit to L1 and never come back. */
- vmcall();
-}
-
-void l1_guest_code(struct vmx_pages *vmx)
-{
-#define L2_GUEST_STACK_SIZE 64
- unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
-
- GUEST_ASSERT(vmx->vmcs_gpa);
- GUEST_ASSERT(prepare_for_vmx_operation(vmx));
- GUEST_ASSERT(load_vmcs(vmx));
-
- prepare_vmcs(vmx, l2_guest_code,
- &l2_guest_stack[L2_GUEST_STACK_SIZE]);
-
- GUEST_SYNC(false);
- GUEST_ASSERT(!vmlaunch());
- GUEST_SYNC(false);
- GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
- GUEST_DONE();
-}
-
-int main(int argc, char *argv[])
-{
- vm_vaddr_t vmx_pages_gva = 0;
- struct vmx_pages *vmx;
- unsigned long *bmap;
- uint64_t *host_test_mem;
-
- struct kvm_vm *vm;
- struct kvm_run *run;
- struct ucall uc;
- bool done = false;
-
- nested_vmx_check_supported();
-
- /* Create VM */
- vm = vm_create_default(VCPU_ID, 0, l1_guest_code);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
- vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
- vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
- run = vcpu_state(vm, VCPU_ID);
-
- /* Add an extra memory slot for testing dirty logging */
- vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
- GUEST_TEST_MEM,
- TEST_MEM_SLOT_INDEX,
- TEST_MEM_PAGES,
- KVM_MEM_LOG_DIRTY_PAGES);
-
- /*
- * Add an identity map for GVA range [0xc0000000, 0xc0002000). This
- * affects both L1 and L2. However...
- */
- virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM, TEST_MEM_PAGES, 0);
-
- /*
- * ... pages in the L2 GPA range [0xc0001000, 0xc0003000) will map to
- * 0xc0000000.
- *
- * Note that prepare_eptp should be called only L1's GPA map is done,
- * meaning after the last call to virt_map.
- */
- prepare_eptp(vmx, vm, 0);
- nested_map_memslot(vmx, vm, 0, 0);
- nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096, 0);
- nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096, 0);
-
- bmap = bitmap_alloc(TEST_MEM_PAGES);
- host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM);
-
- while (!done) {
- memset(host_test_mem, 0xaa, TEST_MEM_PAGES * 4096);
- _vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
- "Unexpected exit reason: %u (%s),\n",
- run->exit_reason,
- exit_reason_str(run->exit_reason));
-
- switch (get_ucall(vm, VCPU_ID, &uc)) {
- case UCALL_ABORT:
- TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
- __FILE__, uc.args[1]);
- /* NOT REACHED */
- case UCALL_SYNC:
- /*
- * The nested guest wrote at offset 0x1000 in the memslot, but the
- * dirty bitmap must be filled in according to L1 GPA, not L2.
- */
- kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
- if (uc.args[1]) {
- TEST_ASSERT(test_bit(0, bmap), "Page 0 incorrectly reported clean\n");
- TEST_ASSERT(host_test_mem[0] == 1, "Page 0 not written by guest\n");
- } else {
- TEST_ASSERT(!test_bit(0, bmap), "Page 0 incorrectly reported dirty\n");
- TEST_ASSERT(host_test_mem[0] == 0xaaaaaaaaaaaaaaaaULL, "Page 0 written by guest\n");
- }
-
- TEST_ASSERT(!test_bit(1, bmap), "Page 1 incorrectly reported dirty\n");
- TEST_ASSERT(host_test_mem[4096 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 1 written by guest\n");
- TEST_ASSERT(!test_bit(2, bmap), "Page 2 incorrectly reported dirty\n");
- TEST_ASSERT(host_test_mem[8192 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 2 written by guest\n");
- break;
- case UCALL_DONE:
- done = true;
- break;
- default:
- TEST_FAIL("Unknown ucall %lu", uc.cmd);
- }
- }
-}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c b/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c
deleted file mode 100644
index a7737af1224f..000000000000
--- a/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c
+++ /dev/null
@@ -1,259 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * VMX-preemption timer test
- *
- * Copyright (C) 2020, Google, LLC.
- *
- * Test to ensure the VM-Enter after migration doesn't
- * incorrectly restarts the timer with the full timer
- * value instead of partially decayed timer value
- *
- */
-#define _GNU_SOURCE /* for program_invocation_short_name */
-#include <fcntl.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/ioctl.h>
-
-#include "test_util.h"
-
-#include "kvm_util.h"
-#include "processor.h"
-#include "vmx.h"
-
-#define VCPU_ID 5
-#define PREEMPTION_TIMER_VALUE 100000000ull
-#define PREEMPTION_TIMER_VALUE_THRESHOLD1 80000000ull
-
-u32 vmx_pt_rate;
-bool l2_save_restore_done;
-static u64 l2_vmx_pt_start;
-volatile u64 l2_vmx_pt_finish;
-
-union vmx_basic basic;
-union vmx_ctrl_msr ctrl_pin_rev;
-union vmx_ctrl_msr ctrl_exit_rev;
-
-void l2_guest_code(void)
-{
- u64 vmx_pt_delta;
-
- vmcall();
- l2_vmx_pt_start = (rdtsc() >> vmx_pt_rate) << vmx_pt_rate;
-
- /*
- * Wait until the 1st threshold has passed
- */
- do {
- l2_vmx_pt_finish = rdtsc();
- vmx_pt_delta = (l2_vmx_pt_finish - l2_vmx_pt_start) >>
- vmx_pt_rate;
- } while (vmx_pt_delta < PREEMPTION_TIMER_VALUE_THRESHOLD1);
-
- /*
- * Force L2 through Save and Restore cycle
- */
- GUEST_SYNC(1);
-
- l2_save_restore_done = 1;
-
- /*
- * Now wait for the preemption timer to fire and
- * exit to L1
- */
- while ((l2_vmx_pt_finish = rdtsc()))
- ;
-}
-
-void l1_guest_code(struct vmx_pages *vmx_pages)
-{
-#define L2_GUEST_STACK_SIZE 64
- unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
- u64 l1_vmx_pt_start;
- u64 l1_vmx_pt_finish;
- u64 l1_tsc_deadline, l2_tsc_deadline;
-
- GUEST_ASSERT(vmx_pages->vmcs_gpa);
- GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
- GUEST_ASSERT(load_vmcs(vmx_pages));
- GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
-
- prepare_vmcs(vmx_pages, l2_guest_code,
- &l2_guest_stack[L2_GUEST_STACK_SIZE]);
-
- /*
- * Check for Preemption timer support
- */
- basic.val = rdmsr(MSR_IA32_VMX_BASIC);
- ctrl_pin_rev.val = rdmsr(basic.ctrl ? MSR_IA32_VMX_TRUE_PINBASED_CTLS
- : MSR_IA32_VMX_PINBASED_CTLS);
- ctrl_exit_rev.val = rdmsr(basic.ctrl ? MSR_IA32_VMX_TRUE_EXIT_CTLS
- : MSR_IA32_VMX_EXIT_CTLS);
-
- if (!(ctrl_pin_rev.clr & PIN_BASED_VMX_PREEMPTION_TIMER) ||
- !(ctrl_exit_rev.clr & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER))
- return;
-
- GUEST_ASSERT(!vmlaunch());
- GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
- vmwrite(GUEST_RIP, vmreadz(GUEST_RIP) + vmreadz(VM_EXIT_INSTRUCTION_LEN));
-
- /*
- * Turn on PIN control and resume the guest
- */
- GUEST_ASSERT(!vmwrite(PIN_BASED_VM_EXEC_CONTROL,
- vmreadz(PIN_BASED_VM_EXEC_CONTROL) |
- PIN_BASED_VMX_PREEMPTION_TIMER));
-
- GUEST_ASSERT(!vmwrite(VMX_PREEMPTION_TIMER_VALUE,
- PREEMPTION_TIMER_VALUE));
-
- vmx_pt_rate = rdmsr(MSR_IA32_VMX_MISC) & 0x1F;
-
- l2_save_restore_done = 0;
-
- l1_vmx_pt_start = (rdtsc() >> vmx_pt_rate) << vmx_pt_rate;
-
- GUEST_ASSERT(!vmresume());
-
- l1_vmx_pt_finish = rdtsc();
-
- /*
- * Ensure exit from L2 happens after L2 goes through
- * save and restore
- */
- GUEST_ASSERT(l2_save_restore_done);
-
- /*
- * Ensure the exit from L2 is due to preemption timer expiry
- */
- GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_PREEMPTION_TIMER);
-
- l1_tsc_deadline = l1_vmx_pt_start +
- (PREEMPTION_TIMER_VALUE << vmx_pt_rate);
-
- l2_tsc_deadline = l2_vmx_pt_start +
- (PREEMPTION_TIMER_VALUE << vmx_pt_rate);
-
- /*
- * Sync with the host and pass the l1|l2 pt_expiry_finish times and
- * tsc deadlines so that host can verify they are as expected
- */
- GUEST_SYNC_ARGS(2, l1_vmx_pt_finish, l1_tsc_deadline,
- l2_vmx_pt_finish, l2_tsc_deadline);
-}
-
-void guest_code(struct vmx_pages *vmx_pages)
-{
- if (vmx_pages)
- l1_guest_code(vmx_pages);
-
- GUEST_DONE();
-}
-
-int main(int argc, char *argv[])
-{
- vm_vaddr_t vmx_pages_gva = 0;
-
- struct kvm_regs regs1, regs2;
- struct kvm_vm *vm;
- struct kvm_run *run;
- struct kvm_x86_state *state;
- struct ucall uc;
- int stage;
-
- /*
- * AMD currently does not implement any VMX features, so for now we
- * just early out.
- */
- nested_vmx_check_supported();
-
- /* Create VM */
- vm = vm_create_default(VCPU_ID, 0, guest_code);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
- run = vcpu_state(vm, VCPU_ID);
-
- vcpu_regs_get(vm, VCPU_ID, &regs1);
-
- if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
- vcpu_alloc_vmx(vm, &vmx_pages_gva);
- vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
- } else {
- pr_info("will skip vmx preemption timer checks\n");
- goto done;
- }
-
- for (stage = 1;; stage++) {
- _vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
- "Stage %d: unexpected exit reason: %u (%s),\n",
- stage, run->exit_reason,
- exit_reason_str(run->exit_reason));
-
- switch (get_ucall(vm, VCPU_ID, &uc)) {
- case UCALL_ABORT:
- TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
- __FILE__, uc.args[1]);
- /* NOT REACHED */
- case UCALL_SYNC:
- break;
- case UCALL_DONE:
- goto done;
- default:
- TEST_FAIL("Unknown ucall %lu", uc.cmd);
- }
-
- /* UCALL_SYNC is handled here. */
- TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
- uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx",
- stage, (ulong)uc.args[1]);
- /*
- * If this stage 2 then we should verify the vmx pt expiry
- * is as expected.
- * From L1's perspective verify Preemption timer hasn't
- * expired too early.
- * From L2's perspective verify Preemption timer hasn't
- * expired too late.
- */
- if (stage == 2) {
-
- pr_info("Stage %d: L1 PT expiry TSC (%lu) , L1 TSC deadline (%lu)\n",
- stage, uc.args[2], uc.args[3]);
-
- pr_info("Stage %d: L2 PT expiry TSC (%lu) , L2 TSC deadline (%lu)\n",
- stage, uc.args[4], uc.args[5]);
-
- TEST_ASSERT(uc.args[2] >= uc.args[3],
- "Stage %d: L1 PT expiry TSC (%lu) < L1 TSC deadline (%lu)",
- stage, uc.args[2], uc.args[3]);
-
- TEST_ASSERT(uc.args[4] < uc.args[5],
- "Stage %d: L2 PT expiry TSC (%lu) > L2 TSC deadline (%lu)",
- stage, uc.args[4], uc.args[5]);
- }
-
- state = vcpu_save_state(vm, VCPU_ID);
- memset(&regs1, 0, sizeof(regs1));
- vcpu_regs_get(vm, VCPU_ID, &regs1);
-
- kvm_vm_release(vm);
-
- /* Restore state in a new VM. */
- kvm_vm_restart(vm, O_RDWR);
- vm_vcpu_add(vm, VCPU_ID);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
- vcpu_load_state(vm, VCPU_ID, state);
- run = vcpu_state(vm, VCPU_ID);
- free(state);
-
- memset(&regs2, 0, sizeof(regs2));
- vcpu_regs_get(vm, VCPU_ID, &regs2);
- TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
- "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
- (ulong) regs2.rdi, (ulong) regs2.rsi);
- }
-
-done:
- kvm_vm_free(vm);
-}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
deleted file mode 100644
index 54cdefdfb49d..000000000000
--- a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
+++ /dev/null
@@ -1,274 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * vmx_set_nested_state_test
- *
- * Copyright (C) 2019, Google LLC.
- *
- * This test verifies the integrity of calling the ioctl KVM_SET_NESTED_STATE.
- */
-
-#include "test_util.h"
-#include "kvm_util.h"
-#include "processor.h"
-#include "vmx.h"
-
-#include <errno.h>
-#include <linux/kvm.h>
-#include <string.h>
-#include <sys/ioctl.h>
-#include <unistd.h>
-
-/*
- * Mirror of VMCS12_REVISION in arch/x86/kvm/vmx/vmcs12.h. If that value
- * changes this should be updated.
- */
-#define VMCS12_REVISION 0x11e57ed0
-#define VCPU_ID 5
-
-bool have_evmcs;
-
-void test_nested_state(struct kvm_vm *vm, struct kvm_nested_state *state)
-{
- vcpu_nested_state_set(vm, VCPU_ID, state, false);
-}
-
-void test_nested_state_expect_errno(struct kvm_vm *vm,
- struct kvm_nested_state *state,
- int expected_errno)
-{
- int rv;
-
- rv = vcpu_nested_state_set(vm, VCPU_ID, state, true);
- TEST_ASSERT(rv == -1 && errno == expected_errno,
- "Expected %s (%d) from vcpu_nested_state_set but got rv: %i errno: %s (%d)",
- strerror(expected_errno), expected_errno, rv, strerror(errno),
- errno);
-}
-
-void test_nested_state_expect_einval(struct kvm_vm *vm,
- struct kvm_nested_state *state)
-{
- test_nested_state_expect_errno(vm, state, EINVAL);
-}
-
-void test_nested_state_expect_efault(struct kvm_vm *vm,
- struct kvm_nested_state *state)
-{
- test_nested_state_expect_errno(vm, state, EFAULT);
-}
-
-void set_revision_id_for_vmcs12(struct kvm_nested_state *state,
- u32 vmcs12_revision)
-{
- /* Set revision_id in vmcs12 to vmcs12_revision. */
- memcpy(&state->data, &vmcs12_revision, sizeof(u32));
-}
-
-void set_default_state(struct kvm_nested_state *state)
-{
- memset(state, 0, sizeof(*state));
- state->flags = KVM_STATE_NESTED_RUN_PENDING |
- KVM_STATE_NESTED_GUEST_MODE;
- state->format = 0;
- state->size = sizeof(*state);
-}
-
-void set_default_vmx_state(struct kvm_nested_state *state, int size)
-{
- memset(state, 0, size);
- state->flags = KVM_STATE_NESTED_GUEST_MODE |
- KVM_STATE_NESTED_RUN_PENDING;
- if (have_evmcs)
- state->flags |= KVM_STATE_NESTED_EVMCS;
- state->format = 0;
- state->size = size;
- state->hdr.vmx.vmxon_pa = 0x1000;
- state->hdr.vmx.vmcs12_pa = 0x2000;
- state->hdr.vmx.smm.flags = 0;
- set_revision_id_for_vmcs12(state, VMCS12_REVISION);
-}
-
-void test_vmx_nested_state(struct kvm_vm *vm)
-{
- /* Add a page for VMCS12. */
- const int state_sz = sizeof(struct kvm_nested_state) + getpagesize();
- struct kvm_nested_state *state =
- (struct kvm_nested_state *)malloc(state_sz);
-
- /* The format must be set to 0. 0 for VMX, 1 for SVM. */
- set_default_vmx_state(state, state_sz);
- state->format = 1;
- test_nested_state_expect_einval(vm, state);
-
- /*
- * We cannot virtualize anything if the guest does not have VMX
- * enabled.
- */
- set_default_vmx_state(state, state_sz);
- test_nested_state_expect_einval(vm, state);
-
- /*
- * We cannot virtualize anything if the guest does not have VMX
- * enabled. We expect KVM_SET_NESTED_STATE to return 0 if vmxon_pa
- * is set to -1ull, but the flags must be zero.
- */
- set_default_vmx_state(state, state_sz);
- state->hdr.vmx.vmxon_pa = -1ull;
- test_nested_state_expect_einval(vm, state);
-
- state->hdr.vmx.vmcs12_pa = -1ull;
- state->flags = KVM_STATE_NESTED_EVMCS;
- test_nested_state_expect_einval(vm, state);
-
- state->flags = 0;
- test_nested_state(vm, state);
-
- /* Enable VMX in the guest CPUID. */
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
-
- /*
- * Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without
- * setting the nested state but flags other than eVMCS must be clear.
- * The eVMCS flag can be set if the enlightened VMCS capability has
- * been enabled.
- */
- set_default_vmx_state(state, state_sz);
- state->hdr.vmx.vmxon_pa = -1ull;
- state->hdr.vmx.vmcs12_pa = -1ull;
- test_nested_state_expect_einval(vm, state);
-
- state->flags &= KVM_STATE_NESTED_EVMCS;
- if (have_evmcs) {
- test_nested_state_expect_einval(vm, state);
- vcpu_enable_evmcs(vm, VCPU_ID);
- }
- test_nested_state(vm, state);
-
- /* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */
- state->hdr.vmx.smm.flags = 1;
- test_nested_state_expect_einval(vm, state);
-
- /* It is invalid to have vmxon_pa == -1ull and vmcs_pa != -1ull. */
- set_default_vmx_state(state, state_sz);
- state->hdr.vmx.vmxon_pa = -1ull;
- state->flags = 0;
- test_nested_state_expect_einval(vm, state);
-
- /* It is invalid to have vmxon_pa set to a non-page aligned address. */
- set_default_vmx_state(state, state_sz);
- state->hdr.vmx.vmxon_pa = 1;
- test_nested_state_expect_einval(vm, state);
-
- /*
- * It is invalid to have KVM_STATE_NESTED_SMM_GUEST_MODE and
- * KVM_STATE_NESTED_GUEST_MODE set together.
- */
- set_default_vmx_state(state, state_sz);
- state->flags = KVM_STATE_NESTED_GUEST_MODE |
- KVM_STATE_NESTED_RUN_PENDING;
- state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
- test_nested_state_expect_einval(vm, state);
-
- /*
- * It is invalid to have any of the SMM flags set besides:
- * KVM_STATE_NESTED_SMM_GUEST_MODE
- * KVM_STATE_NESTED_SMM_VMXON
- */
- set_default_vmx_state(state, state_sz);
- state->hdr.vmx.smm.flags = ~(KVM_STATE_NESTED_SMM_GUEST_MODE |
- KVM_STATE_NESTED_SMM_VMXON);
- test_nested_state_expect_einval(vm, state);
-
- /* Outside SMM, SMM flags must be zero. */
- set_default_vmx_state(state, state_sz);
- state->flags = 0;
- state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
- test_nested_state_expect_einval(vm, state);
-
- /* Size must be large enough to fit kvm_nested_state and vmcs12. */
- set_default_vmx_state(state, state_sz);
- state->size = sizeof(*state);
- test_nested_state(vm, state);
-
- /* vmxon_pa cannot be the same address as vmcs_pa. */
- set_default_vmx_state(state, state_sz);
- state->hdr.vmx.vmxon_pa = 0;
- state->hdr.vmx.vmcs12_pa = 0;
- test_nested_state_expect_einval(vm, state);
-
- /* The revision id for vmcs12 must be VMCS12_REVISION. */
- set_default_vmx_state(state, state_sz);
- set_revision_id_for_vmcs12(state, 0);
- test_nested_state_expect_einval(vm, state);
-
- /*
- * Test that if we leave nesting the state reflects that when we get
- * it again.
- */
- set_default_vmx_state(state, state_sz);
- state->hdr.vmx.vmxon_pa = -1ull;
- state->hdr.vmx.vmcs12_pa = -1ull;
- state->flags = 0;
- test_nested_state(vm, state);
- vcpu_nested_state_get(vm, VCPU_ID, state);
- TEST_ASSERT(state->size >= sizeof(*state) && state->size <= state_sz,
- "Size must be between %ld and %d. The size returned was %d.",
- sizeof(*state), state_sz, state->size);
- TEST_ASSERT(state->hdr.vmx.vmxon_pa == -1ull, "vmxon_pa must be -1ull.");
- TEST_ASSERT(state->hdr.vmx.vmcs12_pa == -1ull, "vmcs_pa must be -1ull.");
-
- free(state);
-}
-
-int main(int argc, char *argv[])
-{
- struct kvm_vm *vm;
- struct kvm_nested_state state;
-
- have_evmcs = kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS);
-
- if (!kvm_check_cap(KVM_CAP_NESTED_STATE)) {
- print_skip("KVM_CAP_NESTED_STATE not available");
- exit(KSFT_SKIP);
- }
-
- /*
- * AMD currently does not implement set_nested_state, so for now we
- * just early out.
- */
- nested_vmx_check_supported();
-
- vm = vm_create_default(VCPU_ID, 0, 0);
-
- /* Passing a NULL kvm_nested_state causes a EFAULT. */
- test_nested_state_expect_efault(vm, NULL);
-
- /* 'size' cannot be smaller than sizeof(kvm_nested_state). */
- set_default_state(&state);
- state.size = 0;
- test_nested_state_expect_einval(vm, &state);
-
- /*
- * Setting the flags 0xf fails the flags check. The only flags that
- * can be used are:
- * KVM_STATE_NESTED_GUEST_MODE
- * KVM_STATE_NESTED_RUN_PENDING
- * KVM_STATE_NESTED_EVMCS
- */
- set_default_state(&state);
- state.flags = 0xf;
- test_nested_state_expect_einval(vm, &state);
-
- /*
- * If KVM_STATE_NESTED_RUN_PENDING is set then
- * KVM_STATE_NESTED_GUEST_MODE has to be set as well.
- */
- set_default_state(&state);
- state.flags = KVM_STATE_NESTED_RUN_PENDING;
- test_nested_state_expect_einval(vm, &state);
-
- test_vmx_nested_state(vm);
-
- kvm_vm_free(vm);
- return 0;
-}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
deleted file mode 100644
index fbe8417cbc2c..000000000000
--- a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
+++ /dev/null
@@ -1,168 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * vmx_tsc_adjust_test
- *
- * Copyright (C) 2018, Google LLC.
- *
- * IA32_TSC_ADJUST test
- *
- * According to the SDM, "if an execution of WRMSR to the
- * IA32_TIME_STAMP_COUNTER MSR adds (or subtracts) value X from the TSC,
- * the logical processor also adds (or subtracts) value X from the
- * IA32_TSC_ADJUST MSR.
- *
- * Note that when L1 doesn't intercept writes to IA32_TSC, a
- * WRMSR(IA32_TSC) from L2 sets L1's TSC value, not L2's perceived TSC
- * value.
- *
- * This test verifies that this unusual case is handled correctly.
- */
-
-#include "test_util.h"
-#include "kvm_util.h"
-#include "processor.h"
-#include "vmx.h"
-
-#include <string.h>
-#include <sys/ioctl.h>
-
-#include "kselftest.h"
-
-#ifndef MSR_IA32_TSC_ADJUST
-#define MSR_IA32_TSC_ADJUST 0x3b
-#endif
-
-#define PAGE_SIZE 4096
-#define VCPU_ID 5
-
-#define TSC_ADJUST_VALUE (1ll << 32)
-#define TSC_OFFSET_VALUE -(1ll << 48)
-
-enum {
- PORT_ABORT = 0x1000,
- PORT_REPORT,
- PORT_DONE,
-};
-
-enum {
- VMXON_PAGE = 0,
- VMCS_PAGE,
- MSR_BITMAP_PAGE,
-
- NUM_VMX_PAGES,
-};
-
-struct kvm_single_msr {
- struct kvm_msrs header;
- struct kvm_msr_entry entry;
-} __attribute__((packed));
-
-/* The virtual machine object. */
-static struct kvm_vm *vm;
-
-static void check_ia32_tsc_adjust(int64_t max)
-{
- int64_t adjust;
-
- adjust = rdmsr(MSR_IA32_TSC_ADJUST);
- GUEST_SYNC(adjust);
- GUEST_ASSERT(adjust <= max);
-}
-
-static void l2_guest_code(void)
-{
- uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE;
-
- wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE);
- check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
-
- /* Exit to L1 */
- __asm__ __volatile__("vmcall");
-}
-
-static void l1_guest_code(struct vmx_pages *vmx_pages)
-{
-#define L2_GUEST_STACK_SIZE 64
- unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
- uint32_t control;
- uintptr_t save_cr3;
-
- GUEST_ASSERT(rdtsc() < TSC_ADJUST_VALUE);
- wrmsr(MSR_IA32_TSC, rdtsc() - TSC_ADJUST_VALUE);
- check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
-
- GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
- GUEST_ASSERT(load_vmcs(vmx_pages));
-
- /* Prepare the VMCS for L2 execution. */
- prepare_vmcs(vmx_pages, l2_guest_code,
- &l2_guest_stack[L2_GUEST_STACK_SIZE]);
- control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
- control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETTING;
- vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
- vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE);
-
- /* Jump into L2. First, test failure to load guest CR3. */
- save_cr3 = vmreadz(GUEST_CR3);
- vmwrite(GUEST_CR3, -1ull);
- GUEST_ASSERT(!vmlaunch());
- GUEST_ASSERT(vmreadz(VM_EXIT_REASON) ==
- (EXIT_REASON_FAILED_VMENTRY | EXIT_REASON_INVALID_STATE));
- check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
- vmwrite(GUEST_CR3, save_cr3);
-
- GUEST_ASSERT(!vmlaunch());
- GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
-
- check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
-
- GUEST_DONE();
-}
-
-static void report(int64_t val)
-{
- pr_info("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n",
- val, val / TSC_ADJUST_VALUE, val % TSC_ADJUST_VALUE);
-}
-
-int main(int argc, char *argv[])
-{
- vm_vaddr_t vmx_pages_gva;
-
- nested_vmx_check_supported();
-
- vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
-
- /* Allocate VMX pages and shared descriptors (vmx_pages). */
- vcpu_alloc_vmx(vm, &vmx_pages_gva);
- vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
-
- for (;;) {
- volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
- struct ucall uc;
-
- vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
- "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
- run->exit_reason,
- exit_reason_str(run->exit_reason));
-
- switch (get_ucall(vm, VCPU_ID, &uc)) {
- case UCALL_ABORT:
- TEST_FAIL("%s", (const char *)uc.args[0]);
- /* NOT REACHED */
- case UCALL_SYNC:
- report(uc.args[1]);
- break;
- case UCALL_DONE:
- goto done;
- default:
- TEST_FAIL("Unknown ucall %lu", uc.cmd);
- }
- }
-
- kvm_vm_free(vm);
-done:
- return 0;
-}
diff --git a/tools/testing/selftests/kvm/x86_64/xss_msr_test.c b/tools/testing/selftests/kvm/x86_64/xss_msr_test.c
deleted file mode 100644
index 3529376747c2..000000000000
--- a/tools/testing/selftests/kvm/x86_64/xss_msr_test.c
+++ /dev/null
@@ -1,76 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2019, Google LLC.
- *
- * Tests for the IA32_XSS MSR.
- */
-
-#define _GNU_SOURCE /* for program_invocation_short_name */
-#include <sys/ioctl.h>
-
-#include "test_util.h"
-#include "kvm_util.h"
-#include "vmx.h"
-
-#define VCPU_ID 1
-#define MSR_BITS 64
-
-#define X86_FEATURE_XSAVES (1<<3)
-
-bool is_supported_msr(u32 msr_index)
-{
- struct kvm_msr_list *list;
- bool found = false;
- int i;
-
- list = kvm_get_msr_index_list();
- for (i = 0; i < list->nmsrs; ++i) {
- if (list->indices[i] == msr_index) {
- found = true;
- break;
- }
- }
-
- free(list);
- return found;
-}
-
-int main(int argc, char *argv[])
-{
- struct kvm_cpuid_entry2 *entry;
- bool xss_supported = false;
- struct kvm_vm *vm;
- uint64_t xss_val;
- int i, r;
-
- /* Create VM */
- vm = vm_create_default(VCPU_ID, 0, 0);
-
- if (kvm_get_cpuid_max_basic() >= 0xd) {
- entry = kvm_get_supported_cpuid_index(0xd, 1);
- xss_supported = entry && !!(entry->eax & X86_FEATURE_XSAVES);
- }
- if (!xss_supported) {
- print_skip("IA32_XSS is not supported by the vCPU");
- exit(KSFT_SKIP);
- }
-
- xss_val = vcpu_get_msr(vm, VCPU_ID, MSR_IA32_XSS);
- TEST_ASSERT(xss_val == 0,
- "MSR_IA32_XSS should be initialized to zero\n");
-
- vcpu_set_msr(vm, VCPU_ID, MSR_IA32_XSS, xss_val);
- /*
- * At present, KVM only supports a guest IA32_XSS value of 0. Verify
- * that trying to set the guest IA32_XSS to an unsupported value fails.
- * Also, in the future when a non-zero value succeeds check that
- * IA32_XSS is in the KVM_GET_MSR_INDEX_LIST.
- */
- for (i = 0; i < MSR_BITS; ++i) {
- r = _vcpu_set_msr(vm, VCPU_ID, MSR_IA32_XSS, 1ull << i);
- TEST_ASSERT(r == 0 || is_supported_msr(MSR_IA32_XSS),
- "IA32_XSS was able to be set, but was not found in KVM_GET_MSR_INDEX_LIST.\n");
- }
-
- kvm_vm_free(vm);
-}