aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2019-09-12 19:46:07 -0700
committerPaolo Bonzini <pbonzini@redhat.com>2019-09-24 14:35:35 +0200
commitfbb158cb88b6e4f8279707a1842ee0332f64b1a3 (patch)
treeb37438e6dc9823c893a6b77416a3ef33b4abe920 /arch/x86/kvm/mmu.c
parentKVM: x86/mmu: Revert "Revert "KVM: MMU: add tracepoint for kvm_mmu_invalidate_all_pages"" (diff)
downloadlinux-dev-fbb158cb88b6e4f8279707a1842ee0332f64b1a3.tar.xz
linux-dev-fbb158cb88b6e4f8279707a1842ee0332f64b1a3.zip
KVM: x86/mmu: Revert "Revert "KVM: MMU: zap pages in batch""
Now that the fast invalidate mechanism has been reintroduced, restore the performance tweaks for fast invalidation that existed prior to its removal. Paraphrashing the original changelog: Zap at least 10 shadow pages before releasing mmu_lock to reduce the overhead associated with re-acquiring the lock. Note: "10" is an arbitrary number, speculated to be high enough so that a vCPU isn't stuck zapping obsolete pages for an extended period, but small enough so that other vCPUs aren't starved waiting for mmu_lock. This reverts commit 43d2b14b105fb00b8864c7b0ee7043cc1cc4a969. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c35
1 files changed, 9 insertions, 26 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 2ab0ac88d86e..3acc4b046d47 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -5671,12 +5671,12 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
return ret;
}
-
+#define BATCH_ZAP_PAGES 10
static void kvm_zap_obsolete_pages(struct kvm *kvm)
{
struct kvm_mmu_page *sp, *node;
LIST_HEAD(invalid_list);
- int ign;
+ int nr_zapped, batch = 0;
restart:
list_for_each_entry_safe_reverse(sp, node,
@@ -5689,28 +5689,6 @@ restart:
break;
/*
- * Do not repeatedly zap a root page to avoid unnecessary
- * KVM_REQ_MMU_RELOAD, otherwise we may not be able to
- * progress:
- * vcpu 0 vcpu 1
- * call vcpu_enter_guest():
- * 1): handle KVM_REQ_MMU_RELOAD
- * and require mmu-lock to
- * load mmu
- * repeat:
- * 1): zap root page and
- * send KVM_REQ_MMU_RELOAD
- *
- * 2): if (cond_resched_lock(mmu-lock))
- *
- * 2): hold mmu-lock and load mmu
- *
- * 3): see KVM_REQ_MMU_RELOAD bit
- * on vcpu->requests is set
- * then return 1 to call
- * vcpu_enter_guest() again.
- * goto repeat;
- *
* Since we are reversely walking the list and the invalid
* list will be moved to the head, skip the invalid page
* can help us to avoid the infinity list walking.
@@ -5718,14 +5696,19 @@ restart:
if (sp->role.invalid)
continue;
- if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
+ if (batch >= BATCH_ZAP_PAGES &&
+ (need_resched() || spin_needbreak(&kvm->mmu_lock))) {
+ batch = 0;
kvm_mmu_commit_zap_page(kvm, &invalid_list);
cond_resched_lock(&kvm->mmu_lock);
goto restart;
}
- if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
+ if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list,
+ &nr_zapped)) {
+ batch += nr_zapped;
goto restart;
+ }
}
kvm_mmu_commit_zap_page(kvm, &invalid_list);