From 301e7ee1dec513e5aca12d01c819a1f762918d0a Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 22 Jul 2019 16:21:05 +0200 Subject: Revert "iommu/vt-d: Consolidate domain_init() to avoid duplication" This reverts commit 123b2ffc376e1b3e9e015c75175b61e88a8b8518. This commit reportedly caused boot failures on some systems and needs to be reverted for now. Signed-off-by: Joerg Roedel --- drivers/iommu/intel-iommu.c | 123 +++++++++++++++++++++++++++++++------------- 1 file changed, 87 insertions(+), 36 deletions(-) (limited to 'drivers/iommu/intel-iommu.c') diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index ac4172c02244..441781d12553 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -1833,6 +1833,63 @@ static inline int guestwidth_to_adjustwidth(int gaw) return agaw; } +static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu, + int guest_width) +{ + int adjust_width, agaw; + unsigned long sagaw; + int err; + + init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN); + + err = init_iova_flush_queue(&domain->iovad, + iommu_flush_iova, iova_entry_free); + if (err) + return err; + + domain_reserve_special_ranges(domain); + + /* calculate AGAW */ + if (guest_width > cap_mgaw(iommu->cap)) + guest_width = cap_mgaw(iommu->cap); + domain->gaw = guest_width; + adjust_width = guestwidth_to_adjustwidth(guest_width); + agaw = width_to_agaw(adjust_width); + sagaw = cap_sagaw(iommu->cap); + if (!test_bit(agaw, &sagaw)) { + /* hardware doesn't support it, choose a bigger one */ + pr_debug("Hardware doesn't support agaw %d\n", agaw); + agaw = find_next_bit(&sagaw, 5, agaw); + if (agaw >= 5) + return -ENODEV; + } + domain->agaw = agaw; + + if (ecap_coherent(iommu->ecap)) + domain->iommu_coherency = 1; + else + domain->iommu_coherency = 0; + + if (ecap_sc_support(iommu->ecap)) + domain->iommu_snooping = 1; + else + domain->iommu_snooping = 0; + + if (intel_iommu_superpage) + domain->iommu_superpage = fls(cap_super_page_val(iommu->cap)); + else + domain->iommu_superpage = 0; + + domain->nid = iommu->node; + + /* always allocate the top pgd */ + domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); + if (!domain->pgd) + return -ENOMEM; + __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); + return 0; +} + static void domain_exit(struct dmar_domain *domain) { struct page *freelist; @@ -2513,31 +2570,6 @@ static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque) return 0; } -static int domain_init(struct dmar_domain *domain, int guest_width) -{ - int adjust_width; - - init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN); - domain_reserve_special_ranges(domain); - - /* calculate AGAW */ - domain->gaw = guest_width; - adjust_width = guestwidth_to_adjustwidth(guest_width); - domain->agaw = width_to_agaw(adjust_width); - - domain->iommu_coherency = 0; - domain->iommu_snooping = 0; - domain->iommu_superpage = 0; - domain->max_addr = 0; - - /* always allocate the top pgd */ - domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); - if (!domain->pgd) - return -ENOMEM; - domain_flush_cache(domain, domain->pgd, PAGE_SIZE); - return 0; -} - static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw) { struct device_domain_info *info; @@ -2575,19 +2607,11 @@ static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw) domain = alloc_domain(0); if (!domain) return NULL; - - if (domain_init(domain, gaw)) { + if (domain_init(domain, iommu, gaw)) { domain_exit(domain); return NULL; } - if (init_iova_flush_queue(&domain->iovad, - iommu_flush_iova, - iova_entry_free)) { - pr_warn("iova flush queue initialization failed\n"); - intel_iommu_strict = 1; - } - out: return domain; } @@ -2692,6 +2716,8 @@ static int domain_prepare_identity_map(struct device *dev, return iommu_domain_identity_map(domain, start, end); } +static int md_domain_init(struct dmar_domain *domain, int guest_width); + static int __init si_domain_init(int hw) { struct dmar_rmrr_unit *rmrr; @@ -2702,7 +2728,7 @@ static int __init si_domain_init(int hw) if (!si_domain) return -EFAULT; - if (domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { + if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { domain_exit(si_domain); return -EFAULT; } @@ -4829,6 +4855,31 @@ static void dmar_remove_one_dev_info(struct device *dev) spin_unlock_irqrestore(&device_domain_lock, flags); } +static int md_domain_init(struct dmar_domain *domain, int guest_width) +{ + int adjust_width; + + init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN); + domain_reserve_special_ranges(domain); + + /* calculate AGAW */ + domain->gaw = guest_width; + adjust_width = guestwidth_to_adjustwidth(guest_width); + domain->agaw = width_to_agaw(adjust_width); + + domain->iommu_coherency = 0; + domain->iommu_snooping = 0; + domain->iommu_superpage = 0; + domain->max_addr = 0; + + /* always allocate the top pgd */ + domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); + if (!domain->pgd) + return -ENOMEM; + domain_flush_cache(domain, domain->pgd, PAGE_SIZE); + return 0; +} + static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) { struct dmar_domain *dmar_domain; @@ -4843,7 +4894,7 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) pr_err("Can't allocate dmar_domain\n"); return NULL; } - if (domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { + if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { pr_err("Domain initialization failed\n"); domain_exit(dmar_domain); return NULL; -- cgit v1.2.3-59-g8ed1b From 557529494d79f3f1fadd486dd18d2de0b19be4da Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Tue, 9 Jul 2019 13:22:45 +0800 Subject: iommu/vt-d: Avoid duplicated pci dma alias consideration As we have abandoned the home-made lazy domain allocation and delegated the DMA domain life cycle up to the default domain mechanism defined in the generic iommu layer, we needn't consider pci alias anymore when mapping/unmapping the context entries. Without this fix, we see kernel NULL pointer dereference during pci device hot-plug test. Cc: Ashok Raj Cc: Jacob Pan Cc: Kevin Tian Fixes: fa954e6831789 ("iommu/vt-d: Delegate the dma domain to upper layer") Signed-off-by: Lu Baolu Reported-and-tested-by: Xu Pengfei Signed-off-by: Joerg Roedel --- drivers/iommu/intel-iommu.c | 55 ++------------------------------------------- 1 file changed, 2 insertions(+), 53 deletions(-) (limited to 'drivers/iommu/intel-iommu.c') diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 441781d12553..9b1d62d03370 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -339,8 +339,6 @@ static void domain_exit(struct dmar_domain *domain); static void domain_remove_dev_info(struct dmar_domain *domain); static void dmar_remove_one_dev_info(struct device *dev); static void __dmar_remove_one_dev_info(struct device_domain_info *info); -static void domain_context_clear(struct intel_iommu *iommu, - struct device *dev); static int domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu); static bool device_is_rmrr_locked(struct device *dev); @@ -2105,26 +2103,9 @@ out_unlock: return ret; } -struct domain_context_mapping_data { - struct dmar_domain *domain; - struct intel_iommu *iommu; - struct pasid_table *table; -}; - -static int domain_context_mapping_cb(struct pci_dev *pdev, - u16 alias, void *opaque) -{ - struct domain_context_mapping_data *data = opaque; - - return domain_context_mapping_one(data->domain, data->iommu, - data->table, PCI_BUS_NUM(alias), - alias & 0xff); -} - static int domain_context_mapping(struct dmar_domain *domain, struct device *dev) { - struct domain_context_mapping_data data; struct pasid_table *table; struct intel_iommu *iommu; u8 bus, devfn; @@ -2134,17 +2115,7 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev) return -ENODEV; table = intel_pasid_get_table(dev); - - if (!dev_is_pci(dev)) - return domain_context_mapping_one(domain, iommu, table, - bus, devfn); - - data.domain = domain; - data.iommu = iommu; - data.table = table; - - return pci_for_each_dma_alias(to_pci_dev(dev), - &domain_context_mapping_cb, &data); + return domain_context_mapping_one(domain, iommu, table, bus, devfn); } static int domain_context_mapped_cb(struct pci_dev *pdev, @@ -4784,28 +4755,6 @@ out_free_dmar: return ret; } -static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque) -{ - struct intel_iommu *iommu = opaque; - - domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff); - return 0; -} - -/* - * NB - intel-iommu lacks any sort of reference counting for the users of - * dependent devices. If multiple endpoints have intersecting dependent - * devices, unbinding the driver from any one of them will possibly leave - * the others unable to operate. - */ -static void domain_context_clear(struct intel_iommu *iommu, struct device *dev) -{ - if (!iommu || !dev || !dev_is_pci(dev)) - return; - - pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu); -} - static void __dmar_remove_one_dev_info(struct device_domain_info *info) { struct dmar_domain *domain; @@ -4826,7 +4775,7 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info) PASID_RID2PASID); iommu_disable_dev_iotlb(info); - domain_context_clear(iommu, info->dev); + domain_context_clear_one(iommu, info->bus, info->devfn); intel_pasid_free_table(info->dev); } -- cgit v1.2.3-59-g8ed1b From effa467870c7612012885df4e246bdb8ffd8e44c Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Tue, 16 Jul 2019 22:38:05 +0100 Subject: iommu/vt-d: Don't queue_iova() if there is no flush queue Intel VT-d driver was reworked to use common deferred flushing implementation. Previously there was one global per-cpu flush queue, afterwards - one per domain. Before deferring a flush, the queue should be allocated and initialized. Currently only domains with IOMMU_DOMAIN_DMA type initialize their flush queue. It's probably worth to init it for static or unmanaged domains too, but it may be arguable - I'm leaving it to iommu folks. Prevent queuing an iova flush if the domain doesn't have a queue. The defensive check seems to be worth to keep even if queue would be initialized for all kinds of domains. And is easy backportable. On 4.19.43 stable kernel it has a user-visible effect: previously for devices in si domain there were crashes, on sata devices: BUG: spinlock bad magic on CPU#6, swapper/0/1 lock: 0xffff88844f582008, .magic: 00000000, .owner: /-1, .owner_cpu: 0 CPU: 6 PID: 1 Comm: swapper/0 Not tainted 4.19.43 #1 Call Trace: dump_stack+0x61/0x7e spin_bug+0x9d/0xa3 do_raw_spin_lock+0x22/0x8e _raw_spin_lock_irqsave+0x32/0x3a queue_iova+0x45/0x115 intel_unmap+0x107/0x113 intel_unmap_sg+0x6b/0x76 __ata_qc_complete+0x7f/0x103 ata_qc_complete+0x9b/0x26a ata_qc_complete_multiple+0xd0/0xe3 ahci_handle_port_interrupt+0x3ee/0x48a ahci_handle_port_intr+0x73/0xa9 ahci_single_level_irq_intr+0x40/0x60 __handle_irq_event_percpu+0x7f/0x19a handle_irq_event_percpu+0x32/0x72 handle_irq_event+0x38/0x56 handle_edge_irq+0x102/0x121 handle_irq+0x147/0x15c do_IRQ+0x66/0xf2 common_interrupt+0xf/0xf RIP: 0010:__do_softirq+0x8c/0x2df The same for usb devices that use ehci-pci: BUG: spinlock bad magic on CPU#0, swapper/0/1 lock: 0xffff88844f402008, .magic: 00000000, .owner: /-1, .owner_cpu: 0 CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.19.43 #4 Call Trace: dump_stack+0x61/0x7e spin_bug+0x9d/0xa3 do_raw_spin_lock+0x22/0x8e _raw_spin_lock_irqsave+0x32/0x3a queue_iova+0x77/0x145 intel_unmap+0x107/0x113 intel_unmap_page+0xe/0x10 usb_hcd_unmap_urb_setup_for_dma+0x53/0x9d usb_hcd_unmap_urb_for_dma+0x17/0x100 unmap_urb_for_dma+0x22/0x24 __usb_hcd_giveback_urb+0x51/0xc3 usb_giveback_urb_bh+0x97/0xde tasklet_action_common.isra.4+0x5f/0xa1 tasklet_action+0x2d/0x30 __do_softirq+0x138/0x2df irq_exit+0x7d/0x8b smp_apic_timer_interrupt+0x10f/0x151 apic_timer_interrupt+0xf/0x20 RIP: 0010:_raw_spin_unlock_irqrestore+0x17/0x39 Cc: David Woodhouse Cc: Joerg Roedel Cc: Lu Baolu Cc: iommu@lists.linux-foundation.org Cc: # 4.14+ Fixes: 13cf01744608 ("iommu/vt-d: Make use of iova deferred flushing") Signed-off-by: Dmitry Safonov Reviewed-by: Lu Baolu Signed-off-by: Joerg Roedel --- drivers/iommu/intel-iommu.c | 3 ++- drivers/iommu/iova.c | 18 ++++++++++++++---- include/linux/iova.h | 6 ++++++ 3 files changed, 22 insertions(+), 5 deletions(-) (limited to 'drivers/iommu/intel-iommu.c') diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 9b1d62d03370..72c6d647bec9 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -3561,7 +3561,8 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) freelist = domain_unmap(domain, start_pfn, last_pfn); - if (intel_iommu_strict || (pdev && pdev->untrusted)) { + if (intel_iommu_strict || (pdev && pdev->untrusted) || + !has_iova_flush_queue(&domain->iovad)) { iommu_flush_iotlb_psi(iommu, domain, start_pfn, nrpages, !freelist, 0); /* free iova */ diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index d499b2621239..8413ae54904a 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -54,9 +54,14 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, } EXPORT_SYMBOL_GPL(init_iova_domain); +bool has_iova_flush_queue(struct iova_domain *iovad) +{ + return !!iovad->fq; +} + static void free_iova_flush_queue(struct iova_domain *iovad) { - if (!iovad->fq) + if (!has_iova_flush_queue(iovad)) return; if (timer_pending(&iovad->fq_timer)) @@ -74,13 +79,14 @@ static void free_iova_flush_queue(struct iova_domain *iovad) int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb, iova_entry_dtor entry_dtor) { + struct iova_fq __percpu *queue; int cpu; atomic64_set(&iovad->fq_flush_start_cnt, 0); atomic64_set(&iovad->fq_flush_finish_cnt, 0); - iovad->fq = alloc_percpu(struct iova_fq); - if (!iovad->fq) + queue = alloc_percpu(struct iova_fq); + if (!queue) return -ENOMEM; iovad->flush_cb = flush_cb; @@ -89,13 +95,17 @@ int init_iova_flush_queue(struct iova_domain *iovad, for_each_possible_cpu(cpu) { struct iova_fq *fq; - fq = per_cpu_ptr(iovad->fq, cpu); + fq = per_cpu_ptr(queue, cpu); fq->head = 0; fq->tail = 0; spin_lock_init(&fq->lock); } + smp_wmb(); + + iovad->fq = queue; + timer_setup(&iovad->fq_timer, fq_flush_timeout, 0); atomic_set(&iovad->fq_timer_on, 0); diff --git a/include/linux/iova.h b/include/linux/iova.h index 781b96ac706f..cd0f1de901a8 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -155,6 +155,7 @@ struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); void init_iova_domain(struct iova_domain *iovad, unsigned long granule, unsigned long start_pfn); +bool has_iova_flush_queue(struct iova_domain *iovad); int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb, iova_entry_dtor entry_dtor); struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); @@ -235,6 +236,11 @@ static inline void init_iova_domain(struct iova_domain *iovad, { } +bool has_iova_flush_queue(struct iova_domain *iovad) +{ + return false; +} + static inline int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb, iova_entry_dtor entry_dtor) -- cgit v1.2.3-59-g8ed1b From 3ee9eca760e7d0b68c55813243de66bbb499dc3b Mon Sep 17 00:00:00 2001 From: Dmitry Safonov Date: Tue, 16 Jul 2019 22:38:06 +0100 Subject: iommu/vt-d: Check if domain->pgd was allocated There is a couple of places where on domain_init() failure domain_exit() is called. While currently domain_init() can fail only if alloc_pgtable_page() has failed. Make domain_exit() check if domain->pgd present, before calling domain_unmap(), as it theoretically should crash on clearing pte entries in dma_pte_clear_level(). Cc: David Woodhouse Cc: Joerg Roedel Cc: Lu Baolu Cc: iommu@lists.linux-foundation.org Signed-off-by: Dmitry Safonov Reviewed-by: Lu Baolu Signed-off-by: Joerg Roedel --- drivers/iommu/intel-iommu.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers/iommu/intel-iommu.c') diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 72c6d647bec9..bdaed2da8a55 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -1890,7 +1890,6 @@ static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu, static void domain_exit(struct dmar_domain *domain) { - struct page *freelist; /* Remove associated devices and clear attached or cached domains */ domain_remove_dev_info(domain); @@ -1898,9 +1897,12 @@ static void domain_exit(struct dmar_domain *domain) /* destroy iovas */ put_iova_domain(&domain->iovad); - freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); + if (domain->pgd) { + struct page *freelist; - dma_free_pagelist(freelist); + freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); + dma_free_pagelist(freelist); + } free_domain_mem(domain); } -- cgit v1.2.3-59-g8ed1b From 458b7c8e0dde12d140e3472b80919cbb9ae793f4 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Thu, 1 Aug 2019 11:14:58 +0800 Subject: iommu/vt-d: Detach domain when move device out of group When removing a device from an iommu group, the domain should be detached from the device. Otherwise, the stale domain info will still be cached by the driver and the driver will refuse to attach any domain to the device again. Cc: Ashok Raj Cc: Jacob Pan Cc: Kevin Tian Fixes: b7297783c2bb6 ("iommu/vt-d: Remove duplicated code for device hotplug") Reported-and-tested-by: Vlad Buslov Suggested-by: Robin Murphy Link: https://lkml.org/lkml/2019/7/26/1133 Signed-off-by: Lu Baolu Signed-off-by: Joerg Roedel --- drivers/iommu/intel-iommu.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/iommu/intel-iommu.c') diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index bdaed2da8a55..3e22fa6ae8c8 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -5316,6 +5316,8 @@ static void intel_iommu_remove_device(struct device *dev) if (!iommu) return; + dmar_remove_one_dev_info(dev); + iommu_group_remove_device(dev); iommu_device_unlink(&iommu->iommu, dev); -- cgit v1.2.3-59-g8ed1b From ae23bfb68f2896835e54a137688906713cb607e7 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Tue, 6 Aug 2019 08:14:08 +0800 Subject: iommu/vt-d: Detach domain before using a private one When the default domain of a group doesn't work for a device, the iommu driver will try to use a private domain. The domain which was previously attached to the device must be detached. Cc: Ashok Raj Cc: Jacob Pan Cc: Kevin Tian Cc: Alex Williamson Fixes: 942067f1b6b97 ("iommu/vt-d: Identify default domains replaced with private") Reported-by: Alex Williamson Link: https://lkml.org/lkml/2019/8/2/1379 Signed-off-by: Lu Baolu Tested-by: Alex Williamson Signed-off-by: Joerg Roedel --- drivers/iommu/intel-iommu.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/iommu/intel-iommu.c') diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 3e22fa6ae8c8..37259b7f95a7 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -3449,6 +3449,7 @@ static bool iommu_need_mapping(struct device *dev) dmar_domain = to_dmar_domain(domain); dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN; } + dmar_remove_one_dev_info(dev); get_private_domain_for_dev(dev); } @@ -4803,7 +4804,8 @@ static void dmar_remove_one_dev_info(struct device *dev) spin_lock_irqsave(&device_domain_lock, flags); info = dev->archdata.iommu; - __dmar_remove_one_dev_info(info); + if (info) + __dmar_remove_one_dev_info(info); spin_unlock_irqrestore(&device_domain_lock, flags); } @@ -5281,6 +5283,7 @@ static int intel_iommu_add_device(struct device *dev) if (device_def_domain_type(dev) == IOMMU_DOMAIN_IDENTITY) { ret = iommu_request_dm_for_dev(dev); if (ret) { + dmar_remove_one_dev_info(dev); dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN; domain_add_dev_info(si_domain, dev); dev_info(dev, @@ -5291,6 +5294,7 @@ static int intel_iommu_add_device(struct device *dev) if (device_def_domain_type(dev) == IOMMU_DOMAIN_DMA) { ret = iommu_request_dma_domain_for_dev(dev); if (ret) { + dmar_remove_one_dev_info(dev); dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN; if (!get_private_domain_for_dev(dev)) { dev_warn(dev, -- cgit v1.2.3-59-g8ed1b From 3a18844dcf89e636b2d0cbf577e3963b0bcb6d23 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Tue, 6 Aug 2019 08:14:09 +0800 Subject: iommu/vt-d: Fix possible use-after-free of private domain Multiple devices might share a private domain. One real example is a pci bridge and all devices behind it. When remove a private domain, make sure that it has been detached from all devices to avoid use-after-free case. Cc: Ashok Raj Cc: Jacob Pan Cc: Kevin Tian Cc: Alex Williamson Fixes: 942067f1b6b97 ("iommu/vt-d: Identify default domains replaced with private") Signed-off-by: Lu Baolu Tested-by: Alex Williamson Signed-off-by: Joerg Roedel --- drivers/iommu/intel-iommu.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/iommu/intel-iommu.c') diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 37259b7f95a7..12d094d08c0a 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -4791,7 +4791,8 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info) /* free the private domain */ if (domain->flags & DOMAIN_FLAG_LOSE_CHILDREN && - !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) + !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) && + list_empty(&domain->devices)) domain_exit(info->domain); free_devinfo_mem(info); -- cgit v1.2.3-59-g8ed1b