aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/mmu.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-09-14 20:26:06 +0300
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-09-14 13:59:55 -0700
commit22d95b1282810f5af599ee292b3fc443aefbdad0 (patch)
treedafe050aef12ea4bbac4ffb77070d8ec416a0838 /drivers/kvm/mmu.c
parentMerge branch 'master' of ssh://master.kernel.org/pub/scm/linux/kernel/git/mchehab/v4l-dvb (diff)
downloadlinux-dev-22d95b1282810f5af599ee292b3fc443aefbdad0.tar.xz
linux-dev-22d95b1282810f5af599ee292b3fc443aefbdad0.zip
KVM: MMU: Fix rare oops on guest context switch
A guest context switch to an uncached cr3 can require allocation of shadow pages, but we only recycle shadow pages in kvm_mmu_page_fault(). Move shadow page recycling to mmu_topup_memory_caches(), which is called from both the page fault handler and from guest cr3 reload. Signed-off-by: Avi Kivity <avi@qumranet.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to '')
-rw-r--r--drivers/kvm/mmu.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 1a87ba9d5156..23965aa5ee78 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -273,12 +273,14 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
int r;
r = __mmu_topup_memory_caches(vcpu, GFP_NOWAIT);
+ kvm_mmu_free_some_pages(vcpu);
if (r < 0) {
spin_unlock(&vcpu->kvm->lock);
kvm_arch_ops->vcpu_put(vcpu);
r = __mmu_topup_memory_caches(vcpu, GFP_KERNEL);
kvm_arch_ops->vcpu_load(vcpu);
spin_lock(&vcpu->kvm->lock);
+ kvm_mmu_free_some_pages(vcpu);
}
return r;
}
@@ -1208,7 +1210,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
return kvm_mmu_unprotect_page(vcpu, gpa >> PAGE_SHIFT);
}
-void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
+void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
{
while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
struct kvm_mmu_page *page;
@@ -1218,7 +1220,6 @@ void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
kvm_mmu_zap_page(vcpu->kvm, page);
}
}
-EXPORT_SYMBOL_GPL(kvm_mmu_free_some_pages);
static void free_mmu_pages(struct kvm_vcpu *vcpu)
{