From 32f6daad4651a748a58a3ab6da0611862175722f Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Wed, 11 Apr 2012 09:51:49 -0600 Subject: KVM: unmap pages from the iommu when slots are removed We've been adding new mappings, but not destroying old mappings. This can lead to a page leak as pages are pinned using get_user_pages, but only unpinned with put_page if they still exist in the memslots list on vm shutdown. A memslot that is destroyed while an iommu domain is enabled for the guest will therefore result in an elevated page reference count that is never cleared. Additionally, without this fix, the iommu is only programmed with the first translation for a gpa. This can result in peer-to-peer errors if a mapping is destroyed and replaced by a new mapping at the same gpa as the iommu will still be pointing to the original, pinned memory address. Signed-off-by: Alex Williamson Signed-off-by: Marcelo Tosatti --- virt/kvm/iommu.c | 7 ++++++- virt/kvm/kvm_main.c | 5 +++-- 2 files changed, 9 insertions(+), 3 deletions(-) (limited to 'virt') diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c index a457d2138f49..fec1723de9b4 100644 --- a/virt/kvm/iommu.c +++ b/virt/kvm/iommu.c @@ -310,6 +310,11 @@ static void kvm_iommu_put_pages(struct kvm *kvm, } } +void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot) +{ + kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages); +} + static int kvm_iommu_unmap_memslots(struct kvm *kvm) { int idx; @@ -320,7 +325,7 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm) slots = kvm_memslots(kvm); kvm_for_each_memslot(memslot, slots) - kvm_iommu_put_pages(kvm, memslot->base_gfn, memslot->npages); + kvm_iommu_unmap_pages(kvm, memslot); srcu_read_unlock(&kvm->srcu, idx); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 42b73930a6de..9739b533ca2e 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -808,12 +808,13 @@ int __kvm_set_memory_region(struct kvm *kvm, if (r) goto out_free; - /* map the pages in iommu page table */ + /* map/unmap the pages in iommu page table */ if (npages) { r = kvm_iommu_map_pages(kvm, &new); if (r) goto out_free; - } + } else + kvm_iommu_unmap_pages(kvm, &old); r = -ENOMEM; slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots), -- cgit v1.2.3-59-g8ed1b From 21a1416a1c945c5aeaeaf791b63c64926018eb77 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Tue, 17 Apr 2012 21:46:44 -0600 Subject: KVM: lock slots_lock around device assignment As pointed out by Jason Baron, when assigning a device to a guest we first set the iommu domain pointer, which enables mapping and unmapping of memory slots to the iommu. This leaves a window where this path is enabled, but we haven't synchronized the iommu mappings to the existing memory slots. Thus a slot being removed at that point could send us down unexpected code paths removing non-existent pinnings and iommu mappings. Take the slots_lock around creating the iommu domain and initial mappings as well as around iommu teardown to avoid this race. Signed-off-by: Alex Williamson Signed-off-by: Marcelo Tosatti --- virt/kvm/iommu.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) (limited to 'virt') diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c index fec1723de9b4..e9fff9830bf0 100644 --- a/virt/kvm/iommu.c +++ b/virt/kvm/iommu.c @@ -240,9 +240,13 @@ int kvm_iommu_map_guest(struct kvm *kvm) return -ENODEV; } + mutex_lock(&kvm->slots_lock); + kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type); - if (!kvm->arch.iommu_domain) - return -ENOMEM; + if (!kvm->arch.iommu_domain) { + r = -ENOMEM; + goto out_unlock; + } if (!allow_unsafe_assigned_interrupts && !iommu_domain_has_cap(kvm->arch.iommu_domain, @@ -253,17 +257,16 @@ int kvm_iommu_map_guest(struct kvm *kvm) " module option.\n", __func__); iommu_domain_free(kvm->arch.iommu_domain); kvm->arch.iommu_domain = NULL; - return -EPERM; + r = -EPERM; + goto out_unlock; } r = kvm_iommu_map_memslots(kvm); if (r) - goto out_unmap; - - return 0; + kvm_iommu_unmap_memslots(kvm); -out_unmap: - kvm_iommu_unmap_memslots(kvm); +out_unlock: + mutex_unlock(&kvm->slots_lock); return r; } @@ -340,7 +343,11 @@ int kvm_iommu_unmap_guest(struct kvm *kvm) if (!domain) return 0; + mutex_lock(&kvm->slots_lock); kvm_iommu_unmap_memslots(kvm); + kvm->arch.iommu_domain = NULL; + mutex_unlock(&kvm->slots_lock); + iommu_domain_free(domain); return 0; } -- cgit v1.2.3-59-g8ed1b