aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu/mmu.c
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2019-12-06 15:57:22 -0800
committerPaolo Bonzini <pbonzini@redhat.com>2020-01-08 18:16:05 +0100
commit2f57b7051fe8fa680b7c38c7e98094fa3ba3ba8b (patch)
tree5e91f36b61e66fabb39e271a16cbb5d73b478adf /arch/x86/kvm/mmu/mmu.c
parentKVM: x86/mmu: Incorporate guest's page level into max level for shadow MMU (diff)
downloadlinux-dev-2f57b7051fe8fa680b7c38c7e98094fa3ba3ba8b.tar.xz
linux-dev-2f57b7051fe8fa680b7c38c7e98094fa3ba3ba8b.zip
KVM: x86/mmu: Persist gfn_lpage_is_disallowed() to max_level
Persist the max page level calculated via gfn_lpage_is_disallowed() to the max level "returned" by mapping_level() so that its naturally taken into account by the max level check that conditions calling transparent_hugepage_adjust(). Drop the gfn_lpage_is_disallowed() check in thp_adjust() as it's now handled by mapping_level() and its callers. Add a comment to document the behavior of host_mapping_level() and its interaction with max level and transparent huge pages. Note, transferring the gfn_lpage_is_disallowed() from thp_adjust() to mapping_level() superficially affects how changes to a memslot's disallow_lpage count will be handled due to thp_adjust() being run while holding mmu_lock. In the more common case where a different vCPU increments the count via account_shadowed(), gfn_lpage_is_disallowed() is rechecked by set_spte() to ensure a writable large page isn't created. In the less common case where the count is decremented to zero due to all shadow pages in the memslot being zapped, THP behavior now matches hugetlbfs behavior in the sense that a small page will be created when a large page could be used if the count reaches zero in the miniscule window between mapping_level() and acquiring mmu_lock. Lastly, the new THP behavior also follows hugetlbfs behavior in the absurdly unlikely scenario of a memslot being moved such that the memslot's compatibility with respect to large pages changes, but without changing the validity of the gpf->pfn walk. I.e. if a memslot is moved between mapping_level() and snapshotting mmu_seq, it's theoretically possible to consume a stale disallow_lpage count. But, since KVM zaps all shadow pages when moving a memslot and forces all vCPUs to reload a new MMU, the inserted spte will always be thrown away prior to completing the memslot move, i.e. whether or not the spte accurately reflects disallow_lpage is irrelevant. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to '')
-rw-r--r--arch/x86/kvm/mmu/mmu.c28
1 files changed, 18 insertions, 10 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index daf41806243f..7cfacfe8548e 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1330,7 +1330,7 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
int *max_levelp)
{
- int host_level, max_level = *max_levelp;
+ int max_level = *max_levelp;
struct kvm_memory_slot *slot;
if (unlikely(max_level == PT_PAGE_TABLE_LEVEL))
@@ -1342,18 +1342,27 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
return PT_PAGE_TABLE_LEVEL;
}
- host_level = host_mapping_level(vcpu->kvm, large_gfn);
-
- if (host_level == PT_PAGE_TABLE_LEVEL)
- return host_level;
-
- max_level = min(kvm_x86_ops->get_lpage_level(), host_level);
+ max_level = min(max_level, kvm_x86_ops->get_lpage_level());
for ( ; max_level > PT_PAGE_TABLE_LEVEL; max_level--) {
if (!__mmu_gfn_lpage_is_disallowed(large_gfn, max_level, slot))
break;
}
- return max_level;
+ *max_levelp = max_level;
+
+ if (max_level == PT_PAGE_TABLE_LEVEL)
+ return PT_PAGE_TABLE_LEVEL;
+
+ /*
+ * Note, host_mapping_level() does *not* handle transparent huge pages.
+ * As suggested by "mapping", it reflects the page size established by
+ * the associated vma, if there is one, i.e. host_mapping_level() will
+ * return a huge page level if and only if a vma exists and the backing
+ * implementation for the vma uses huge pages, e.g. hugetlbfs and dax.
+ * So, do not propagate host_mapping_level() to max_level as KVM can
+ * still promote the guest mapping to a huge page in the THP case.
+ */
+ return host_mapping_level(vcpu->kvm, large_gfn);
}
/*
@@ -3424,8 +3433,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
*/
if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
!kvm_is_zone_device_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL &&
- PageTransCompoundMap(pfn_to_page(pfn)) &&
- !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
+ PageTransCompoundMap(pfn_to_page(pfn))) {
unsigned long mask;
/*
* mmu_notifier_retry was successful and we hold the