aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c35
1 files changed, 2 insertions, 33 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 8f72526e2f68..218b277bfda3 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3466,7 +3466,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
/*
* Currently, fast page fault only works for direct mapping
* since the gfn is not stable for indirect shadow page. See
- * Documentation/virtual/kvm/locking.txt to get more detail.
+ * Documentation/virt/kvm/locking.txt to get more detail.
*/
fault_handled = fast_pf_fix_direct_spte(vcpu, sp,
iterator.sptep, spte,
@@ -5653,38 +5653,7 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot,
struct kvm_page_track_notifier_node *node)
{
- struct kvm_mmu_page *sp;
- LIST_HEAD(invalid_list);
- unsigned long i;
- bool flush;
- gfn_t gfn;
-
- spin_lock(&kvm->mmu_lock);
-
- if (list_empty(&kvm->arch.active_mmu_pages))
- goto out_unlock;
-
- flush = slot_handle_all_level(kvm, slot, kvm_zap_rmapp, false);
-
- for (i = 0; i < slot->npages; i++) {
- gfn = slot->base_gfn + i;
-
- for_each_valid_sp(kvm, sp, gfn) {
- if (sp->gfn != gfn)
- continue;
-
- kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
- }
- if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
- kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
- flush = false;
- cond_resched_lock(&kvm->mmu_lock);
- }
- }
- kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
-
-out_unlock:
- spin_unlock(&kvm->mmu_lock);
+ kvm_mmu_zap_all(kvm);
}
void kvm_mmu_init_vm(struct kvm *kvm)