aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorJunaid Shahid <junaids@google.com>2018-06-29 13:10:05 -0700
committerPaolo Bonzini <pbonzini@redhat.com>2018-08-06 17:59:01 +0200
commitfaff87588d8bfd9e56e9203412f0bb80455da7b9 (patch)
tree740ebc1243b63b7b2aafb9c058480395f35f2a41 /arch/x86/kvm
parentkvm: x86: Skip shadow page resync on CR3 switch when indicated by guest (diff)
downloadlinux-dev-faff87588d8bfd9e56e9203412f0bb80455da7b9.tar.xz
linux-dev-faff87588d8bfd9e56e9203412f0bb80455da7b9.zip
kvm: x86: Flush only affected TLB entries in kvm_mmu_invlpg*
This needs a minor bug fix. The updated patch is as follows. Thanks, Junaid ------------------------------------------------------------------------------ kvm_mmu_invlpg() and kvm_mmu_invpcid_gva() only need to flush the TLB entries for the specific guest virtual address, instead of flushing all TLB entries associated with the VM. Signed-off-by: Junaid Shahid <junaids@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/mmu.c14
-rw-r--r--arch/x86/kvm/svm.c8
-rw-r--r--arch/x86/kvm/vmx.c28
3 files changed, 47 insertions, 3 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 9446a36a4ab7..f84c194e6db1 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -5226,6 +5226,10 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{
struct kvm_mmu *mmu = &vcpu->arch.mmu;
+ /* INVLPG on a * non-canonical address is a NOP according to the SDM. */
+ if (is_noncanonical_address(gva, vcpu))
+ return;
+
mmu->invlpg(vcpu, gva, mmu->root_hpa);
/*
@@ -5242,7 +5246,7 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
if (VALID_PAGE(mmu->prev_root.hpa))
mmu->invlpg(vcpu, gva, mmu->prev_root.hpa);
- kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+ kvm_x86_ops->tlb_flush_gva(vcpu, gva);
++vcpu->stat.invlpg;
}
EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
@@ -5250,18 +5254,22 @@ EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
{
struct kvm_mmu *mmu = &vcpu->arch.mmu;
+ bool tlb_flush = false;
if (pcid == kvm_get_active_pcid(vcpu)) {
mmu->invlpg(vcpu, gva, mmu->root_hpa);
- kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+ tlb_flush = true;
}
if (VALID_PAGE(mmu->prev_root.hpa) &&
pcid == kvm_get_pcid(vcpu, mmu->prev_root.cr3)) {
mmu->invlpg(vcpu, gva, mmu->prev_root.hpa);
- kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+ tlb_flush = true;
}
+ if (tlb_flush)
+ kvm_x86_ops->tlb_flush_gva(vcpu, gva);
+
++vcpu->stat.invlpg;
/*
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index be9931cad409..73e27a98456f 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5434,6 +5434,13 @@ static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
svm->asid_generation--;
}
+static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ invlpga(gva, svm->vmcb->control.asid);
+}
+
static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
{
}
@@ -7086,6 +7093,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.set_rflags = svm_set_rflags,
.tlb_flush = svm_flush_tlb,
+ .tlb_flush_gva = svm_flush_tlb_gva,
.run = svm_vcpu_run,
.handle_exit = handle_exit,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index b81210051133..5aea5af02386 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1992,6 +1992,19 @@ static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
__loaded_vmcs_clear, loaded_vmcs, 1);
}
+static inline bool vpid_sync_vcpu_addr(int vpid, gva_t addr)
+{
+ if (vpid == 0)
+ return true;
+
+ if (cpu_has_vmx_invvpid_individual_addr()) {
+ __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid, addr);
+ return true;
+ }
+
+ return false;
+}
+
static inline void vpid_sync_vcpu_single(int vpid)
{
if (vpid == 0)
@@ -4833,6 +4846,20 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
__vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
}
+static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
+{
+ int vpid = to_vmx(vcpu)->vpid;
+
+ if (!vpid_sync_vcpu_addr(vpid, addr))
+ vpid_sync_context(vpid);
+
+ /*
+ * If VPIDs are not supported or enabled, then the above is a no-op.
+ * But we don't really need a TLB flush in that case anyway, because
+ * each VM entry/exit includes an implicit flush when VPID is 0.
+ */
+}
+
static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
{
ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
@@ -13603,6 +13630,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.set_rflags = vmx_set_rflags,
.tlb_flush = vmx_flush_tlb,
+ .tlb_flush_gva = vmx_flush_tlb_gva,
.run = vmx_vcpu_run,
.handle_exit = vmx_handle_exit,