aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx/vmx.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/vmx/vmx.h')
-rw-r--r--arch/x86/kvm/vmx/vmx.h124
1 files changed, 49 insertions, 75 deletions
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 61128b48c503..82d0bc3a4d52 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -109,14 +109,21 @@ struct nested_vmx {
* to guest memory during VM exit.
*/
struct vmcs12 *cached_shadow_vmcs12;
+
/*
* Indicates if the shadow vmcs or enlightened vmcs must be updated
* with the data held by struct vmcs12.
*/
- bool need_vmcs12_sync;
+ bool need_vmcs12_to_shadow_sync;
bool dirty_vmcs12;
/*
+ * Indicates lazily loaded guest state has not yet been decached from
+ * vmcs02.
+ */
+ bool need_sync_vmcs02_to_vmcs12_rare;
+
+ /*
* vmcs02 has been initialized, i.e. state that is constant for
* vmcs02 has been written to the backing VMCS. Initialization
* is delayed until L1 actually attempts to run a nested VM.
@@ -180,14 +187,24 @@ struct vcpu_vmx {
struct kvm_vcpu vcpu;
u8 fail;
u8 msr_bitmap_mode;
+
+ /*
+ * If true, host state has been stored in vmx->loaded_vmcs for
+ * the CPU registers that only need to be switched when transitioning
+ * to/from the kernel, and the registers have been loaded with guest
+ * values. If false, host state is loaded in the CPU registers
+ * and vmx->loaded_vmcs->host_state is invalid.
+ */
+ bool guest_state_loaded;
+
u32 exit_intr_info;
u32 idt_vectoring_info;
ulong rflags;
+
struct shared_msr_entry *guest_msrs;
int nmsrs;
int save_nmsrs;
- bool guest_msrs_dirty;
- unsigned long host_idt_base;
+ bool guest_msrs_ready;
#ifdef CONFIG_X86_64
u64 msr_host_kernel_gs_base;
u64 msr_guest_kernel_gs_base;
@@ -195,21 +212,15 @@ struct vcpu_vmx {
u64 spec_ctrl;
- u32 vm_entry_controls_shadow;
- u32 vm_exit_controls_shadow;
u32 secondary_exec_control;
/*
* loaded_vmcs points to the VMCS currently used in this vcpu. For a
* non-nested (L1) guest, it always points to vmcs01. For a nested
- * guest (L2), it points to a different VMCS. loaded_cpu_state points
- * to the VMCS whose state is loaded into the CPU registers that only
- * need to be switched when transitioning to/from the kernel; a NULL
- * value indicates that host state is loaded.
+ * guest (L2), it points to a different VMCS.
*/
struct loaded_vmcs vmcs01;
struct loaded_vmcs *loaded_vmcs;
- struct loaded_vmcs *loaded_cpu_state;
struct msr_autoload {
struct vmx_msrs guest;
@@ -260,8 +271,6 @@ struct vcpu_vmx {
unsigned long host_debugctlmsr;
- u64 msr_ia32_power_ctl;
-
/*
* Only bits masked by msr_ia32_feature_control_valid_bits can be set in
* msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included
@@ -292,12 +301,14 @@ struct kvm_vmx {
};
bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
+void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu);
void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
-void vmx_vcpu_put(struct kvm_vcpu *vcpu);
int allocate_vpid(void);
void free_vpid(int vpid);
void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
+void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
+ unsigned long fs_base, unsigned long gs_base);
int vmx_get_cpl(struct kvm_vcpu *vcpu);
unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
@@ -376,69 +387,31 @@ static inline u8 vmx_get_rvi(void)
return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
}
-static inline void vm_entry_controls_reset_shadow(struct vcpu_vmx *vmx)
-{
- vmx->vm_entry_controls_shadow = vmcs_read32(VM_ENTRY_CONTROLS);
-}
-
-static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
-{
- vmcs_write32(VM_ENTRY_CONTROLS, val);
- vmx->vm_entry_controls_shadow = val;
-}
-
-static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
-{
- if (vmx->vm_entry_controls_shadow != val)
- vm_entry_controls_init(vmx, val);
-}
-
-static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
-{
- return vmx->vm_entry_controls_shadow;
-}
-
-static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
-{
- vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
-}
-
-static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
-{
- vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
-}
-
-static inline void vm_exit_controls_reset_shadow(struct vcpu_vmx *vmx)
-{
- vmx->vm_exit_controls_shadow = vmcs_read32(VM_EXIT_CONTROLS);
-}
-
-static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
-{
- vmcs_write32(VM_EXIT_CONTROLS, val);
- vmx->vm_exit_controls_shadow = val;
-}
-
-static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
-{
- if (vmx->vm_exit_controls_shadow != val)
- vm_exit_controls_init(vmx, val);
-}
-
-static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
-{
- return vmx->vm_exit_controls_shadow;
-}
-
-static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
-{
- vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
-}
-
-static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
-{
- vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
+#define BUILD_CONTROLS_SHADOW(lname, uname) \
+static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val) \
+{ \
+ if (vmx->loaded_vmcs->controls_shadow.lname != val) { \
+ vmcs_write32(uname, val); \
+ vmx->loaded_vmcs->controls_shadow.lname = val; \
+ } \
+} \
+static inline u32 lname##_controls_get(struct vcpu_vmx *vmx) \
+{ \
+ return vmx->loaded_vmcs->controls_shadow.lname; \
+} \
+static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u32 val) \
+{ \
+ lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \
+} \
+static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u32 val) \
+{ \
+ lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \
}
+BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS)
+BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS)
+BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL)
+BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL)
+BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL)
static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
{
@@ -468,6 +441,7 @@ static inline u32 vmx_vmexit_ctrl(void)
}
u32 vmx_exec_control(struct vcpu_vmx *vmx);
+u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx);
static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
{