aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kvm/cpuid.c46
-rw-r--r--arch/x86/kvm/cpuid.h24
-rw-r--r--arch/x86/kvm/svm/svm.c28
-rw-r--r--arch/x86/kvm/vmx/vmx.c8
4 files changed, 85 insertions, 21 deletions
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 29a9562031f4..ad8e33f35553 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -354,13 +354,56 @@ static bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu)
is_guest_vendor_hygon(entry->ebx, entry->ecx, entry->edx);
}
+/*
+ * This isn't truly "unsafe", but except for the cpu_caps initialization code,
+ * all register lookups should use __cpuid_entry_get_reg(), which provides
+ * compile-time validation of the input.
+ */
+static u32 cpuid_get_reg_unsafe(struct kvm_cpuid_entry2 *entry, u32 reg)
+{
+ switch (reg) {
+ case CPUID_EAX:
+ return entry->eax;
+ case CPUID_EBX:
+ return entry->ebx;
+ case CPUID_ECX:
+ return entry->ecx;
+ case CPUID_EDX:
+ return entry->edx;
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+}
+
void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
struct kvm_cpuid_entry2 *best;
+ struct kvm_cpuid_entry2 *entry;
bool allow_gbpages;
+ int i;
memset(vcpu->arch.cpu_caps, 0, sizeof(vcpu->arch.cpu_caps));
+ BUILD_BUG_ON(ARRAY_SIZE(reverse_cpuid) != NR_KVM_CPU_CAPS);
+
+ /*
+ * Reset guest capabilities to userspace's guest CPUID definition, i.e.
+ * honor userspace's definition for features that don't require KVM or
+ * hardware management/support (or that KVM simply doesn't care about).
+ */
+ for (i = 0; i < NR_KVM_CPU_CAPS; i++) {
+ const struct cpuid_reg cpuid = reverse_cpuid[i];
+
+ if (!cpuid.function)
+ continue;
+
+ entry = kvm_find_cpuid_entry_index(vcpu, cpuid.function, cpuid.index);
+ if (!entry)
+ continue;
+
+ vcpu->arch.cpu_caps[i] = cpuid_get_reg_unsafe(entry, cpuid.reg);
+ }
kvm_update_cpuid_runtime(vcpu);
@@ -377,8 +420,7 @@ void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
*/
allow_gbpages = tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) :
guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES);
- if (allow_gbpages)
- guest_cpu_cap_set(vcpu, X86_FEATURE_GBPAGES);
+ guest_cpu_cap_change(vcpu, X86_FEATURE_GBPAGES, allow_gbpages);
best = kvm_find_cpuid_entry(vcpu, 1);
if (best && apic) {
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 486cb1429998..0a3b27547d2f 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -246,11 +246,29 @@ static __always_inline void guest_cpu_cap_set(struct kvm_vcpu *vcpu,
vcpu->arch.cpu_caps[x86_leaf] |= __feature_bit(x86_feature);
}
-static __always_inline void guest_cpu_cap_check_and_set(struct kvm_vcpu *vcpu,
- unsigned int x86_feature)
+static __always_inline void guest_cpu_cap_clear(struct kvm_vcpu *vcpu,
+ unsigned int x86_feature)
{
- if (kvm_cpu_cap_has(x86_feature) && guest_cpuid_has(vcpu, x86_feature))
+ unsigned int x86_leaf = __feature_leaf(x86_feature);
+
+ vcpu->arch.cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature);
+}
+
+static __always_inline void guest_cpu_cap_change(struct kvm_vcpu *vcpu,
+ unsigned int x86_feature,
+ bool guest_has_cap)
+{
+ if (guest_has_cap)
guest_cpu_cap_set(vcpu, x86_feature);
+ else
+ guest_cpu_cap_clear(vcpu, x86_feature);
+}
+
+static __always_inline void guest_cpu_cap_constrain(struct kvm_vcpu *vcpu,
+ unsigned int x86_feature)
+{
+ if (!kvm_cpu_cap_has(x86_feature))
+ guest_cpu_cap_clear(vcpu, x86_feature);
}
static __always_inline bool guest_cpu_cap_has(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index f96c62a9d2c2..3b94cb6c2b7a 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4401,27 +4401,29 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
* XSS on VM-Enter/VM-Exit. Failure to do so would effectively give
* the guest read/write access to the host's XSS.
*/
- if (boot_cpu_has(X86_FEATURE_XSAVE) &&
- boot_cpu_has(X86_FEATURE_XSAVES) &&
- guest_cpuid_has(vcpu, X86_FEATURE_XSAVE))
- guest_cpu_cap_set(vcpu, X86_FEATURE_XSAVES);
+ guest_cpu_cap_change(vcpu, X86_FEATURE_XSAVES,
+ boot_cpu_has(X86_FEATURE_XSAVE) &&
+ boot_cpu_has(X86_FEATURE_XSAVES) &&
+ guest_cpuid_has(vcpu, X86_FEATURE_XSAVE));
- guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_NRIPS);
- guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_TSCRATEMSR);
- guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_LBRV);
+ guest_cpu_cap_constrain(vcpu, X86_FEATURE_NRIPS);
+ guest_cpu_cap_constrain(vcpu, X86_FEATURE_TSCRATEMSR);
+ guest_cpu_cap_constrain(vcpu, X86_FEATURE_LBRV);
/*
* Intercept VMLOAD if the vCPU model is Intel in order to emulate that
* VMLOAD drops bits 63:32 of SYSENTER (ignoring the fact that exposing
* SVM on Intel is bonkers and extremely unlikely to work).
*/
- if (!guest_cpuid_is_intel_compatible(vcpu))
- guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD);
+ if (guest_cpuid_is_intel_compatible(vcpu))
+ guest_cpu_cap_clear(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD);
+ else
+ guest_cpu_cap_constrain(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD);
- guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_PAUSEFILTER);
- guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_PFTHRESHOLD);
- guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_VGIF);
- guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_VNMI);
+ guest_cpu_cap_constrain(vcpu, X86_FEATURE_PAUSEFILTER);
+ guest_cpu_cap_constrain(vcpu, X86_FEATURE_PFTHRESHOLD);
+ guest_cpu_cap_constrain(vcpu, X86_FEATURE_VGIF);
+ guest_cpu_cap_constrain(vcpu, X86_FEATURE_VNMI);
svm_recalc_instruction_intercepts(vcpu, svm);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index ccba522246c3..8b95ba323a17 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7830,10 +7830,12 @@ void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
*/
if (boot_cpu_has(X86_FEATURE_XSAVE) &&
guest_cpuid_has(vcpu, X86_FEATURE_XSAVE))
- guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_XSAVES);
+ guest_cpu_cap_constrain(vcpu, X86_FEATURE_XSAVES);
+ else
+ guest_cpu_cap_clear(vcpu, X86_FEATURE_XSAVES);
- guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_VMX);
- guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_LAM);
+ guest_cpu_cap_constrain(vcpu, X86_FEATURE_VMX);
+ guest_cpu_cap_constrain(vcpu, X86_FEATURE_LAM);
vmx_setup_uret_msrs(vmx);