aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2010-11-12 14:49:55 +0800
committerAvi Kivity <avi@redhat.com>2011-01-12 11:29:14 +0200
commitc4806acdcec020fe5bbb054ce9dc75aaecaf29dd (patch)
tree0fafeeb72efdb8985165fcab0a24ffcc5071653c /arch
parentKVM: MMU: support apf for nonpaing guest (diff)
downloadlinux-dev-c4806acdcec020fe5bbb054ce9dc75aaecaf29dd.tar.xz
linux-dev-c4806acdcec020fe5bbb054ce9dc75aaecaf29dd.zip
KVM: MMU: fix apf prefault if nested guest is enabled
If apf is generated in L2 guest and is completed in L1 guest, it will prefault this apf in L1 guest's mmu context. Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/kvm/mmu.c1
-rw-r--r--arch/x86/kvm/x86.c3
3 files changed, 4 insertions, 1 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 116dac5e01d6..f1e8d5b99f5d 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -600,6 +600,7 @@ struct kvm_x86_ops {
struct kvm_arch_async_pf {
u32 token;
gfn_t gfn;
+ bool direct_map;
};
extern struct kvm_x86_ops *kvm_x86_ops;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index db0fd555a5a2..ab100a7e0c49 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2609,6 +2609,7 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
struct kvm_arch_async_pf arch;
arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
arch.gfn = gfn;
+ arch.direct_map = vcpu->arch.mmu.direct_map;
return kvm_setup_async_pf(vcpu, gva, gfn, &arch);
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9b875ff05410..c673e726fbdb 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6168,7 +6168,8 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
{
int r;
- if (!vcpu->arch.mmu.direct_map || is_error_page(work->page))
+ if (!vcpu->arch.mmu.direct_map || !work->arch.direct_map ||
+ is_error_page(work->page))
return;
r = kvm_mmu_reload(vcpu);