diff options
Diffstat (limited to '')
-rw-r--r-- | arch/x86/kvm/cpuid.h | 76 |
1 files changed, 27 insertions, 49 deletions
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index 92bd7623175b..67d80aa72d50 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -67,41 +67,40 @@ static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry, *reg = kvm_cpu_caps[leaf]; } -static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu, - unsigned int x86_feature) +static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, + unsigned int x86_feature) { const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature); struct kvm_cpuid_entry2 *entry; + u32 *reg; + + /* + * XSAVES is a special snowflake. Due to lack of a dedicated intercept + * on SVM, KVM must assume that XSAVES (and thus XRSTORS) is usable by + * the guest if the host supports XSAVES and *XSAVE* is exposed to the + * guest. Because the guest can execute XSAVES and XRSTORS, i.e. can + * indirectly consume XSS, KVM must ensure XSS is zeroed when running + * the guest, i.e. must set XSAVES in vCPU capabilities. But to reject + * direct XSS reads and writes (to minimize the virtualization hole and + * honor userspace's CPUID), KVM needs to check the raw guest CPUID, + * not KVM's view of guest capabilities. + * + * For all other features, guest capabilities are accurate. Expand + * this allowlist with extreme vigilance. + */ + BUILD_BUG_ON(x86_feature != X86_FEATURE_XSAVES); entry = kvm_find_cpuid_entry_index(vcpu, cpuid.function, cpuid.index); if (!entry) return NULL; - return __cpuid_entry_get_reg(entry, cpuid.reg); -} - -static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, - unsigned int x86_feature) -{ - u32 *reg; - - reg = guest_cpuid_get_register(vcpu, x86_feature); + reg = __cpuid_entry_get_reg(entry, cpuid.reg); if (!reg) return false; return *reg & __feature_bit(x86_feature); } -static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu, - unsigned int x86_feature) -{ - u32 *reg; - - reg = guest_cpuid_get_register(vcpu, x86_feature); - if (reg) - *reg &= ~__feature_bit(x86_feature); -} - static inline bool guest_cpuid_is_amd_compatible(struct kvm_vcpu *vcpu) { return vcpu->arch.is_amd_compatible; @@ -202,27 +201,6 @@ static __always_inline bool guest_pv_has(struct kvm_vcpu *vcpu, return vcpu->arch.pv_cpuid.features & (1u << kvm_feature); } -enum kvm_governed_features { -#define KVM_GOVERNED_FEATURE(x) KVM_GOVERNED_##x, -#include "governed_features.h" - KVM_NR_GOVERNED_FEATURES -}; - -static __always_inline int kvm_governed_feature_index(unsigned int x86_feature) -{ - switch (x86_feature) { -#define KVM_GOVERNED_FEATURE(x) case x: return KVM_GOVERNED_##x; -#include "governed_features.h" - default: - return -1; - } -} - -static __always_inline bool kvm_is_governed_feature(unsigned int x86_feature) -{ - return kvm_governed_feature_index(x86_feature) >= 0; -} - static __always_inline void guest_cpu_cap_set(struct kvm_vcpu *vcpu, unsigned int x86_feature) { @@ -267,17 +245,17 @@ static inline bool kvm_vcpu_is_legal_cr3(struct kvm_vcpu *vcpu, unsigned long cr static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu) { - return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) || - guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) || - guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) || - guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD)); + return (guest_cpu_cap_has(vcpu, X86_FEATURE_SPEC_CTRL) || + guest_cpu_cap_has(vcpu, X86_FEATURE_AMD_STIBP) || + guest_cpu_cap_has(vcpu, X86_FEATURE_AMD_IBRS) || + guest_cpu_cap_has(vcpu, X86_FEATURE_AMD_SSBD)); } static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu) { - return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) || - guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB) || - guest_cpuid_has(vcpu, X86_FEATURE_SBPB)); + return (guest_cpu_cap_has(vcpu, X86_FEATURE_SPEC_CTRL) || + guest_cpu_cap_has(vcpu, X86_FEATURE_AMD_IBPB) || + guest_cpu_cap_has(vcpu, X86_FEATURE_SBPB)); } #endif |