aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2024-08-02 13:39:00 -0700
committerSean Christopherson <seanjc@google.com>2024-08-29 19:05:55 -0700
commit1dc9cc1c4c230fbc6bf6322f150d4b81f712cfb9 (patch)
tree7ec2b1f680dff853eb628a2c805774a50fc7dbab
parentKVM: x86/mmu: Drop pointless "return" wrapper label in FNAME(fetch) (diff)
downloadwireguard-linux-1dc9cc1c4c230fbc6bf6322f150d4b81f712cfb9.tar.xz
wireguard-linux-1dc9cc1c4c230fbc6bf6322f150d4b81f712cfb9.zip
KVM: x86/mmu: Reword a misleading comment about checking gpte_changed()
Rewrite the comment in FNAME(fetch) to explain why KVM needs to check that the gPTE is still fresh before continuing the shadow page walk, even if KVM already has a linked shadow page for the gPTE in question. No functional change intended. Link: https://lore.kernel.org/r/20240802203900.348808-4-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to '')
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h10
1 files changed, 8 insertions, 2 deletions
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 480c54122991..405bd7ceee2a 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -695,8 +695,14 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
return RET_PF_RETRY;
/*
- * Verify that the gpte in the page we've just write
- * protected is still there.
+ * Verify that the gpte in the page, which is now either
+ * write-protected or unsync, wasn't modified between the fault
+ * and acquiring mmu_lock. This needs to be done even when
+ * reusing an existing shadow page to ensure the information
+ * gathered by the walker matches the information stored in the
+ * shadow page (which could have been modified by a different
+ * vCPU even if the page was already linked). Holding mmu_lock
+ * prevents the shadow page from changing after this point.
*/
if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
return RET_PF_RETRY;