aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_64_mmu_radix.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@ozlabs.org>2018-08-14 20:37:45 +1000
committerPaul Mackerras <paulus@ozlabs.org>2018-08-15 14:39:27 +1000
commitc066fafc595eef5ae3c83ae3a8305956b8c3ef15 (patch)
tree5b177b3a74faaad6f02a95190bae22b82f83c65a /arch/powerpc/kvm/book3s_64_mmu_radix.c
parentKVM/x86: Use CC_SET()/CC_OUT in arch/x86/kvm/vmx.c (diff)
downloadlinux-dev-c066fafc595eef5ae3c83ae3a8305956b8c3ef15.tar.xz
linux-dev-c066fafc595eef5ae3c83ae3a8305956b8c3ef15.zip
KVM: PPC: Book3S HV: Use correct pagesize in kvm_unmap_radix()
Since commit e641a317830b ("KVM: PPC: Book3S HV: Unify dirty page map between HPT and radix", 2017-10-26), kvm_unmap_radix() computes the number of PAGE_SIZEd pages being unmapped and passes it to kvmppc_update_dirty_map(), which expects to be passed the page size instead. Consequently it will only mark one system page dirty even when a large page (for example a THP page) is being unmapped. The consequence of this is that part of the THP page might not get copied during live migration, resulting in memory corruption for the guest. This fixes it by computing and passing the page size in kvm_unmap_radix(). Cc: stable@vger.kernel.org # v4.15+ Fixes: e641a317830b (KVM: PPC: Book3S HV: Unify dirty page map between HPT and radix) Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Diffstat (limited to 'arch/powerpc/kvm/book3s_64_mmu_radix.c')
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 176f911ee983..7efc42538ccf 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -738,10 +738,10 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
gpa, shift);
kvmppc_radix_tlbie_page(kvm, gpa, shift);
if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) {
- unsigned long npages = 1;
+ unsigned long psize = PAGE_SIZE;
if (shift)
- npages = 1ul << (shift - PAGE_SHIFT);
- kvmppc_update_dirty_map(memslot, gfn, npages);
+ psize = 1ul << shift;
+ kvmppc_update_dirty_map(memslot, gfn, psize);
}
}
return 0;