aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_64_mmu_radix.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/book3s_64_mmu_radix.c')
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c36
1 files changed, 28 insertions, 8 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 2a2fad9a1c13..3cb0c9843d01 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -353,7 +353,13 @@ static struct kmem_cache *kvm_pmd_cache;
static pte_t *kvmppc_pte_alloc(void)
{
- return kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
+ pte_t *pte;
+
+ pte = kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
+ /* pmd_populate() will only reference _pa(pte). */
+ kmemleak_ignore(pte);
+
+ return pte;
}
static void kvmppc_pte_free(pte_t *ptep)
@@ -363,7 +369,13 @@ static void kvmppc_pte_free(pte_t *ptep)
static pmd_t *kvmppc_pmd_alloc(void)
{
- return kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
+ pmd_t *pmd;
+
+ pmd = kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
+ /* pud_populate() will only reference _pa(pmd). */
+ kmemleak_ignore(pmd);
+
+ return pmd;
}
static void kvmppc_pmd_free(pmd_t *pmdp)
@@ -417,9 +429,13 @@ void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
* Callers are responsible for flushing the PWC.
*
* When page tables are being unmapped/freed as part of page fault path
- * (full == false), ptes are not expected. There is code to unmap them
- * and emit a warning if encountered, but there may already be data
- * corruption due to the unexpected mappings.
+ * (full == false), valid ptes are generally not expected; however, there
+ * is one situation where they arise, which is when dirty page logging is
+ * turned off for a memslot while the VM is running. The new memslot
+ * becomes visible to page faults before the memslot commit function
+ * gets to flush the memslot, which can lead to a 2MB page mapping being
+ * installed for a guest physical address where there are already 64kB
+ * (or 4kB) mappings (of sub-pages of the same 2MB page).
*/
static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
unsigned int lpid)
@@ -433,7 +449,6 @@ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
for (it = 0; it < PTRS_PER_PTE; ++it, ++p) {
if (pte_val(*p) == 0)
continue;
- WARN_ON_ONCE(1);
kvmppc_unmap_pte(kvm, p,
pte_pfn(*p) << PAGE_SHIFT,
PAGE_SHIFT, NULL, lpid);
@@ -891,7 +906,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
return ret;
}
-int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
unsigned long ea, unsigned long dsisr)
{
struct kvm *kvm = vcpu->kvm;
@@ -937,7 +952,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
return RESUME_GUEST;
}
- return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing);
+ return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing);
}
if (memslot->flags & KVM_MEM_READONLY) {
@@ -1142,6 +1157,11 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
kvm->arch.lpid);
gpa += PAGE_SIZE;
}
+ /*
+ * Increase the mmu notifier sequence number to prevent any page
+ * fault that read the memslot earlier from writing a PTE.
+ */
+ kvm->mmu_notifier_seq++;
spin_unlock(&kvm->mmu_lock);
}