aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c
blob: a81a24761aac072a0359305826712a917ed06bba (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
// SPDX-License-Identifier: GPL-2.0-only
/*
 * vmx_apic_access_test
 *
 * Copyright (C) 2020, Google LLC.
 *
 * This work is licensed under the terms of the GNU GPL, version 2.
 *
 * The first subtest simply checks to see that an L2 guest can be
 * launched with a valid APIC-access address that is backed by a
 * page of L1 physical memory.
 *
 * The second subtest sets the APIC-access address to a (valid) L1
 * physical address that is not backed by memory. KVM can't handle
 * this situation, so resuming L2 should result in a KVM exit for
 * internal error (emulation). This is not an architectural
 * requirement. It is just a shortcoming of KVM. The internal error
 * is unfortunate, but it's better than what used to happen!
 */

#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
#include "vmx.h"

#include <string.h>
#include <sys/ioctl.h>

#include "kselftest.h"

static void l2_guest_code(void)
{
	/* Exit to L1 */
	__asm__ __volatile__("vmcall");
}

static void l1_guest_code(struct vmx_pages *vmx_pages, unsigned long high_gpa)
{
#define L2_GUEST_STACK_SIZE 64
	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
	uint32_t control;

	GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
	GUEST_ASSERT(load_vmcs(vmx_pages));

	/* Prepare the VMCS for L2 execution. */
	prepare_vmcs(vmx_pages, l2_guest_code,
		     &l2_guest_stack[L2_GUEST_STACK_SIZE]);
	control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
	control |= CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
	vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
	control = vmreadz(SECONDARY_VM_EXEC_CONTROL);
	control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
	vmwrite(SECONDARY_VM_EXEC_CONTROL, control);
	vmwrite(APIC_ACCESS_ADDR, vmx_pages->apic_access_gpa);

	/* Try to launch L2 with the memory-backed APIC-access address. */
	GUEST_SYNC(vmreadz(APIC_ACCESS_ADDR));
	GUEST_ASSERT(!vmlaunch());
	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);

	vmwrite(APIC_ACCESS_ADDR, high_gpa);

	/* Try to resume L2 with the unbacked APIC-access address. */
	GUEST_SYNC(vmreadz(APIC_ACCESS_ADDR));
	GUEST_ASSERT(!vmresume());
	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);

	GUEST_DONE();
}

int main(int argc, char *argv[])
{
	unsigned long apic_access_addr = ~0ul;
	vm_vaddr_t vmx_pages_gva;
	unsigned long high_gpa;
	struct vmx_pages *vmx;
	bool done = false;

	struct kvm_vcpu *vcpu;
	struct kvm_vm *vm;

	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));

	vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);

	high_gpa = (vm->max_gfn - 1) << vm->page_shift;

	vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
	prepare_virtualize_apic_accesses(vmx, vm);
	vcpu_args_set(vcpu, 2, vmx_pages_gva, high_gpa);

	while (!done) {
		volatile struct kvm_run *run = vcpu->run;
		struct ucall uc;

		vcpu_run(vcpu);
		if (apic_access_addr == high_gpa) {
			TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_INTERNAL_ERROR);
			TEST_ASSERT(run->internal.suberror ==
				    KVM_INTERNAL_ERROR_EMULATION,
				    "Got internal suberror other than KVM_INTERNAL_ERROR_EMULATION: %u",
				    run->internal.suberror);
			break;
		}
		TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);

		switch (get_ucall(vcpu, &uc)) {
		case UCALL_ABORT:
			REPORT_GUEST_ASSERT(uc);
			/* NOT REACHED */
		case UCALL_SYNC:
			apic_access_addr = uc.args[1];
			break;
		case UCALL_DONE:
			done = true;
			break;
		default:
			TEST_ASSERT(false, "Unknown ucall %lu", uc.cmd);
		}
	}
	kvm_vm_free(vm);
	return 0;
}