1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
|
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __KVM_X86_VMX_COMMON_H
#define __KVM_X86_VMX_COMMON_H
#include <linux/kvm_host.h>
#include <asm/posted_intr.h>
#include "mmu.h"
union vmx_exit_reason {
struct {
u32 basic : 16;
u32 reserved16 : 1;
u32 reserved17 : 1;
u32 reserved18 : 1;
u32 reserved19 : 1;
u32 reserved20 : 1;
u32 reserved21 : 1;
u32 reserved22 : 1;
u32 reserved23 : 1;
u32 reserved24 : 1;
u32 reserved25 : 1;
u32 bus_lock_detected : 1;
u32 enclave_mode : 1;
u32 smi_pending_mtf : 1;
u32 smi_from_vmx_root : 1;
u32 reserved30 : 1;
u32 failed_vmentry : 1;
};
u32 full;
};
struct vcpu_vt {
/* Posted interrupt descriptor */
struct pi_desc pi_desc;
/* Used if this vCPU is waiting for PI notification wakeup. */
struct list_head pi_wakeup_list;
union vmx_exit_reason exit_reason;
unsigned long exit_qualification;
u32 exit_intr_info;
/*
* If true, guest state has been loaded into hardware, and host state
* saved into vcpu_{vt,vmx,tdx}. If false, host state is loaded into
* hardware.
*/
bool guest_state_loaded;
#ifdef CONFIG_X86_64
u64 msr_host_kernel_gs_base;
#endif
unsigned long host_debugctlmsr;
};
#ifdef CONFIG_KVM_INTEL_TDX
static __always_inline bool is_td(struct kvm *kvm)
{
return kvm->arch.vm_type == KVM_X86_TDX_VM;
}
static __always_inline bool is_td_vcpu(struct kvm_vcpu *vcpu)
{
return is_td(vcpu->kvm);
}
#else
static inline bool is_td(struct kvm *kvm) { return false; }
static inline bool is_td_vcpu(struct kvm_vcpu *vcpu) { return false; }
#endif
static inline bool vt_is_tdx_private_gpa(struct kvm *kvm, gpa_t gpa)
{
/* For TDX the direct mask is the shared mask. */
return !kvm_is_addr_direct(kvm, gpa);
}
static inline int __vmx_handle_ept_violation(struct kvm_vcpu *vcpu, gpa_t gpa,
unsigned long exit_qualification)
{
u64 error_code;
/* Is it a read fault? */
error_code = (exit_qualification & EPT_VIOLATION_ACC_READ)
? PFERR_USER_MASK : 0;
/* Is it a write fault? */
error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE)
? PFERR_WRITE_MASK : 0;
/* Is it a fetch fault? */
error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR)
? PFERR_FETCH_MASK : 0;
/* ept page table entry is present? */
error_code |= (exit_qualification & EPT_VIOLATION_RWX_MASK)
? PFERR_PRESENT_MASK : 0;
if (error_code & EPT_VIOLATION_GVA_IS_VALID)
error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) ?
PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
if (vt_is_tdx_private_gpa(vcpu->kvm, gpa))
error_code |= PFERR_PRIVATE_ACCESS;
return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
}
#endif /* __KVM_X86_VMX_COMMON_H */
|