aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorYu Zhao <yuzhao@google.com>2016-03-30 13:38:09 -0700
committerPaolo Bonzini <pbonzini@redhat.com>2016-04-01 12:10:10 +0200
commit14f4760562e41d50817d56b42c821d70ad10b483 (patch)
treea5883be4834d815db85463c1218b20da509b1830 /arch/x86
parentKVM: x86: reduce default value of halt_poll_ns parameter (diff)
downloadlinux-dev-14f4760562e41d50817d56b42c821d70ad10b483.tar.xz
linux-dev-14f4760562e41d50817d56b42c821d70ad10b483.zip
kvm: set page dirty only if page has been writable
In absence of shadow dirty mask, there is no need to set page dirty if page has never been writable. This is a tiny optimization but good to have for people who care much about dirty page tracking. Signed-off-by: Yu Zhao <yuzhao@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/mmu.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 70e95d097ef1..1ff4dbb73fb7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -557,8 +557,15 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
!is_writable_pte(new_spte))
ret = true;
- if (!shadow_accessed_mask)
+ if (!shadow_accessed_mask) {
+ /*
+ * We don't set page dirty when dropping non-writable spte.
+ * So do it now if the new spte is becoming non-writable.
+ */
+ if (ret)
+ kvm_set_pfn_dirty(spte_to_pfn(old_spte));
return ret;
+ }
/*
* Flush TLB when accessed/dirty bits are changed in the page tables,
@@ -605,7 +612,8 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
kvm_set_pfn_accessed(pfn);
- if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
+ if (old_spte & (shadow_dirty_mask ? shadow_dirty_mask :
+ PT_WRITABLE_MASK))
kvm_set_pfn_dirty(pfn);
return 1;
}