aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2021-06-22 10:57:37 -0700
committerPaolo Bonzini <pbonzini@redhat.com>2021-06-24 18:00:47 -0400
commitfdaa293598f908adb945001dabb305225144e183 (patch)
treeb12ff32bef0ae8548334e55203f436de8c3a7fd2 /arch
parentKVM: x86/mmu: Drop redundant rsvd bits reset for nested NPT (diff)
downloadwireguard-linux-fdaa293598f908adb945001dabb305225144e183.tar.xz
wireguard-linux-fdaa293598f908adb945001dabb305225144e183.zip
KVM: x86/mmu: Get CR0.WP from MMU, not vCPU, in shadow page fault
Use the current MMU instead of vCPU state to query CR0.WP when handling a page fault. In the nested NPT case, the current CR0.WP reflects L2, whereas the page fault is shadowing L1's NPT. Practically speaking, this is a nop a NPT walks are always user faults, but fix it up for consistency. Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20210622175739.3610207-53-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/mmu.h5
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h5
2 files changed, 2 insertions, 8 deletions
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 62844bacd13f..83e6c6965f1e 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -165,11 +165,6 @@ static inline bool is_writable_pte(unsigned long pte)
return pte & PT_WRITABLE_MASK;
}
-static inline bool is_write_protection(struct kvm_vcpu *vcpu)
-{
- return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
-}
-
/*
* Check if a given access (described through the I/D, W/R and U/S bits of a
* page fault error code pfec) causes a permission fault with the given PTE
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 75c3fe966e81..2f5a0e8d05ed 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -795,7 +795,7 @@ FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
bool self_changed = false;
if (!(walker->pte_access & ACC_WRITE_MASK ||
- (!is_write_protection(vcpu) && !user_fault)))
+ (!is_cr0_wp(vcpu->arch.mmu) && !user_fault)))
return false;
for (level = walker->level; level <= walker->max_level; level++) {
@@ -893,8 +893,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
* we will cache the incorrect access into mmio spte.
*/
if (write_fault && !(walker.pte_access & ACC_WRITE_MASK) &&
- !is_write_protection(vcpu) && !user_fault &&
- !is_noslot_pfn(pfn)) {
+ !is_cr0_wp(vcpu->arch.mmu) && !user_fault && !is_noslot_pfn(pfn)) {
walker.pte_access |= ACC_WRITE_MASK;
walker.pte_access &= ~ACC_USER_MASK;