From 98b64741d61124a12fb05a7595acb1fd6c1dc55d Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Mon, 7 Feb 2022 23:50:48 +0100 Subject: iommu/arm-smmu-v3: Avoid open coded arithmetic in memory allocation kmalloc_array()/kcalloc() should be used to avoid potential overflow when a multiplication is needed to compute the size of the requested memory. So turn a devm_kzalloc()+explicit size computation into an equivalent devm_kcalloc(). Signed-off-by: Christophe JAILLET Acked-by: Robin Murphy Link: https://lore.kernel.org/r/3f7b9b202c6b6f5edc234ab7af5f208fbf8bc944.1644274051.git.christophe.jaillet@wanadoo.fr Signed-off-by: Will Deacon --- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 6dc6d8b6b368..14d06aad0726 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -2981,10 +2981,10 @@ static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu) { unsigned int i; struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; - size_t size = sizeof(*cfg->l1_desc) * cfg->num_l1_ents; void *strtab = smmu->strtab_cfg.strtab; - cfg->l1_desc = devm_kzalloc(smmu->dev, size, GFP_KERNEL); + cfg->l1_desc = devm_kcalloc(smmu->dev, cfg->num_l1_ents, + sizeof(*cfg->l1_desc), GFP_KERNEL); if (!cfg->l1_desc) return -ENOMEM; -- cgit v1.2.3-59-g8ed1b From fcdeb8c34043a192c881b0594b26d195d5e57997 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Mon, 7 Feb 2022 23:47:55 +0100 Subject: iommu/arm-smmu-v3: Simplify memory allocation Use devm_bitmap_zalloc() instead of hand writing it. Signed-off-by: Christophe JAILLET Link: https://lore.kernel.org/r/598bd905103dcbe5653a54bb0dfb5a8597728214.1644274051.git.christophe.jaillet@wanadoo.fr Signed-off-by: Will Deacon --- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 22 +++++----------------- 1 file changed, 5 insertions(+), 17 deletions(-) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 14d06aad0726..bbc4eeb42811 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -2911,32 +2911,20 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, return 0; } -static void arm_smmu_cmdq_free_bitmap(void *data) -{ - unsigned long *bitmap = data; - bitmap_free(bitmap); -} - static int arm_smmu_cmdq_init(struct arm_smmu_device *smmu) { - int ret = 0; struct arm_smmu_cmdq *cmdq = &smmu->cmdq; unsigned int nents = 1 << cmdq->q.llq.max_n_shift; - atomic_long_t *bitmap; atomic_set(&cmdq->owner_prod, 0); atomic_set(&cmdq->lock, 0); - bitmap = (atomic_long_t *)bitmap_zalloc(nents, GFP_KERNEL); - if (!bitmap) { - dev_err(smmu->dev, "failed to allocate cmdq bitmap\n"); - ret = -ENOMEM; - } else { - cmdq->valid_map = bitmap; - devm_add_action(smmu->dev, arm_smmu_cmdq_free_bitmap, bitmap); - } + cmdq->valid_map = (atomic_long_t *)devm_bitmap_zalloc(smmu->dev, nents, + GFP_KERNEL); + if (!cmdq->valid_map) + return -ENOMEM; - return ret; + return 0; } static int arm_smmu_init_queues(struct arm_smmu_device *smmu) -- cgit v1.2.3-59-g8ed1b From 93665e0275a2e2badfcfca5ada3b78b22df4a01a Mon Sep 17 00:00:00 2001 From: Miaoqian Lin Date: Wed, 5 Jan 2022 10:16:19 +0000 Subject: iommu/arm-smmu: Add missing pm_runtime_disable() in qcom_iommu_device_probe If the probe fails, we should use pm_runtime_disable() to balance pm_runtime_enable(). Add missing pm_runtime_disable() for error handling. Signed-off-by: Miaoqian Lin Link: https://lore.kernel.org/r/20220105101619.29108-1-linmq006@gmail.com Signed-off-by: Will Deacon --- drivers/iommu/arm/arm-smmu/qcom_iommu.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c index b91874cb6cf3..2f227bc88bd9 100644 --- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c +++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c @@ -827,20 +827,20 @@ static int qcom_iommu_device_probe(struct platform_device *pdev) ret = devm_of_platform_populate(dev); if (ret) { dev_err(dev, "Failed to populate iommu contexts\n"); - return ret; + goto err_pm_disable; } ret = iommu_device_sysfs_add(&qcom_iommu->iommu, dev, NULL, dev_name(dev)); if (ret) { dev_err(dev, "Failed to register iommu in sysfs\n"); - return ret; + goto err_pm_disable; } ret = iommu_device_register(&qcom_iommu->iommu, &qcom_iommu_ops, dev); if (ret) { dev_err(dev, "Failed to register iommu\n"); - return ret; + goto err_pm_disable; } bus_set_iommu(&platform_bus_type, &qcom_iommu_ops); @@ -852,6 +852,10 @@ static int qcom_iommu_device_probe(struct platform_device *pdev) } return 0; + +err_pm_disable: + pm_runtime_disable(dev); + return ret; } static int qcom_iommu_device_remove(struct platform_device *pdev) -- cgit v1.2.3-59-g8ed1b From 30de2b541af98179780054836b48825fcfba4408 Mon Sep 17 00:00:00 2001 From: Zhou Guanghui Date: Wed, 19 Jan 2022 07:07:54 +0000 Subject: iommu/arm-smmu-v3: fix event handling soft lockup During event processing, events are read from the event queue one by one until the queue is empty.If the master device continuously requests address access at the same time and the SMMU generates events, the cyclic processing of the event takes a long time and softlockup warnings may be reported. arm-smmu-v3 arm-smmu-v3.34.auto: event 0x0a received: arm-smmu-v3 arm-smmu-v3.34.auto: 0x00007f220000280a arm-smmu-v3 arm-smmu-v3.34.auto: 0x000010000000007e arm-smmu-v3 arm-smmu-v3.34.auto: 0x00000000034e8670 watchdog: BUG: soft lockup - CPU#0 stuck for 22s! [irq/268-arm-smm:247] Call trace: _dev_info+0x7c/0xa0 arm_smmu_evtq_thread+0x1c0/0x230 irq_thread_fn+0x30/0x80 irq_thread+0x128/0x210 kthread+0x134/0x138 ret_from_fork+0x10/0x1c Kernel panic - not syncing: softlockup: hung tasks Fix this by calling cond_resched() after the event information is printed. Signed-off-by: Zhou Guanghui Link: https://lore.kernel.org/r/20220119070754.26528-1-zhouguanghui1@huawei.com Signed-off-by: Will Deacon --- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index bbc4eeb42811..e92974173861 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -1558,6 +1558,7 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev) dev_info(smmu->dev, "\t0x%016llx\n", (unsigned long long)evt[i]); + cond_resched(); } /* -- cgit v1.2.3-59-g8ed1b From f266c11bce79ea793a741e3da6dfe4d6007df8df Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 1 Feb 2022 19:11:40 +0100 Subject: iommu/vtd: Replace acpi_bus_get_device() Replace acpi_bus_get_device() that is going to be dropped with acpi_fetch_acpi_dev(). No intentional functional impact. Signed-off-by: Rafael J. Wysocki Reviewed-by: Lu Baolu Link: https://lore.kernel.org/r/1807113.tdWV9SEqCh@kreacher Signed-off-by: Joerg Roedel --- drivers/iommu/intel/dmar.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index 915bff76fe96..6d10be50ec30 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -789,7 +789,8 @@ static int __init dmar_acpi_dev_scope_init(void) andd->device_name); continue; } - if (acpi_bus_get_device(h, &adev)) { + adev = acpi_fetch_acpi_dev(h); + if (!adev) { pr_err("Failed to get device for ACPI object %s\n", andd->device_name); continue; -- cgit v1.2.3-59-g8ed1b From 1fdbbfd5099f797a4dac05e7ef0192ba4a9c39b4 Mon Sep 17 00:00:00 2001 From: Jiasheng Jiang Date: Thu, 6 Jan 2022 10:43:02 +0800 Subject: iommu/ipmmu-vmsa: Check for error num after setting mask Because of the possible failure of the dma_supported(), the dma_set_mask_and_coherent() may return error num. Therefore, it should be better to check it and return the error if fails. Fixes: 1c894225bf5b ("iommu/ipmmu-vmsa: IPMMU device is 40-bit bus master") Signed-off-by: Jiasheng Jiang Reviewed-by: Nikita Yushchenko Link: https://lore.kernel.org/r/20220106024302.2574180-1-jiasheng@iscas.ac.cn Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index ca752bdc710f..61bd9a3004ed 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -1006,7 +1006,9 @@ static int ipmmu_probe(struct platform_device *pdev) bitmap_zero(mmu->ctx, IPMMU_CTX_MAX); mmu->features = of_device_get_match_data(&pdev->dev); memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs); - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); + if (ret) + return ret; /* Map I/O memory and request IRQ. */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- cgit v1.2.3-59-g8ed1b From da9f8386d6b7f2bb01098913c64b2b246f39cfce Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Tue, 8 Feb 2022 09:20:29 +0900 Subject: dt-bindings: iommu: renesas,ipmmu-vmsa: add r8a779f0 support Document the compatible values for the IPMMU-VMSA blocks in the Renesas R-Car S4-8 (R8A779F0) SoC and R-Car Gen4. Signed-off-by: Yoshihiro Shimoda Reviewed-by: Geert Uytterhoeven Acked-by: Rob Herring Link: https://lore.kernel.org/r/20220208002030.1319984-2-yoshihiro.shimoda.uh@renesas.com Signed-off-by: Joerg Roedel --- Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.yaml b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.yaml index ce0c715205c6..5159a87f3fa7 100644 --- a/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.yaml +++ b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.yaml @@ -44,6 +44,10 @@ properties: - renesas,ipmmu-r8a77990 # R-Car E3 - renesas,ipmmu-r8a77995 # R-Car D3 - renesas,ipmmu-r8a779a0 # R-Car V3U + - items: + - enum: + - renesas,ipmmu-r8a779f0 # R-Car S4-8 + - const: renesas,rcar-gen4-ipmmu-vmsa # R-Car Gen4 reg: maxItems: 1 -- cgit v1.2.3-59-g8ed1b From ae684caf465b7d6a6bd917049ee3749ae4b19435 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Tue, 8 Feb 2022 09:20:30 +0900 Subject: iommu/ipmmu-vmsa: Add support for R-Car Gen4 Add support for R-Car Gen4 like r8a779f0 (R-Car S4-8). The IPMMU hardware design of r8a779f0 is the same as r8a779a0. So, rename "r8a779a0" to "rcar_gen4". Signed-off-by: Yoshihiro Shimoda Reviewed-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/20220208002030.1319984-3-yoshihiro.shimoda.uh@renesas.com Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 61bd9a3004ed..4c3217b1a870 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -719,6 +719,7 @@ static int ipmmu_init_platform_device(struct device *dev, static const struct soc_device_attribute soc_needs_opt_in[] = { { .family = "R-Car Gen3", }, + { .family = "R-Car Gen4", }, { .family = "RZ/G2", }, { /* sentinel */ } }; @@ -743,7 +744,7 @@ static bool ipmmu_device_is_allowed(struct device *dev) unsigned int i; /* - * R-Car Gen3 and RZ/G2 use the allow list to opt-in devices. + * R-Car Gen3/4 and RZ/G2 use the allow list to opt-in devices. * For Other SoCs, this returns true anyway. */ if (!soc_device_match(soc_needs_opt_in)) @@ -926,7 +927,7 @@ static const struct ipmmu_features ipmmu_features_rcar_gen3 = { .utlb_offset_base = 0, }; -static const struct ipmmu_features ipmmu_features_r8a779a0 = { +static const struct ipmmu_features ipmmu_features_rcar_gen4 = { .use_ns_alias_offset = false, .has_cache_leaf_nodes = true, .number_of_contexts = 16, @@ -982,7 +983,10 @@ static const struct of_device_id ipmmu_of_ids[] = { .data = &ipmmu_features_rcar_gen3, }, { .compatible = "renesas,ipmmu-r8a779a0", - .data = &ipmmu_features_r8a779a0, + .data = &ipmmu_features_rcar_gen4, + }, { + .compatible = "renesas,rcar-gen4-ipmmu", + .data = &ipmmu_features_rcar_gen4, }, { /* Terminator */ }, -- cgit v1.2.3-59-g8ed1b From 114a6f5015df8eda0cf37302914cb15a3da3f086 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 10 Feb 2022 12:29:05 +0000 Subject: iommu: Remove trivial ops->capable implementations Implementing ops->capable to always return false is pointless since it's the default behaviour anyway. Clean up the unnecessary implementations. Signed-off-by: Robin Murphy Reviewed-by: Lu Baolu Link: https://lore.kernel.org/r/8413578c6f8a7cf75530b00cba8f10f5b88f8517.1644495614.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel --- drivers/iommu/msm_iommu.c | 6 ------ drivers/iommu/tegra-gart.c | 6 ------ drivers/iommu/tegra-smmu.c | 6 ------ 3 files changed, 18 deletions(-) diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 3a38352b603f..6a2e511edb85 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -558,11 +558,6 @@ fail: return ret; } -static bool msm_iommu_capable(enum iommu_cap cap) -{ - return false; -} - static void print_ctx_regs(void __iomem *base, int ctx) { unsigned int fsr = GET_FSR(base, ctx); @@ -672,7 +667,6 @@ fail: } static struct iommu_ops msm_iommu_ops = { - .capable = msm_iommu_capable, .domain_alloc = msm_iommu_domain_alloc, .domain_free = msm_iommu_domain_free, .attach_dev = msm_iommu_attach_dev, diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index 6a358f92c7e5..bbd287d19324 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c @@ -238,11 +238,6 @@ static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain, return pte & GART_PAGE_MASK; } -static bool gart_iommu_capable(enum iommu_cap cap) -{ - return false; -} - static struct iommu_device *gart_iommu_probe_device(struct device *dev) { if (!dev_iommu_fwspec_get(dev)) @@ -276,7 +271,6 @@ static void gart_iommu_sync(struct iommu_domain *domain, } static const struct iommu_ops gart_iommu_ops = { - .capable = gart_iommu_capable, .domain_alloc = gart_iommu_domain_alloc, .domain_free = gart_iommu_domain_free, .attach_dev = gart_iommu_attach_dev, diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index e900e3c46903..43df44f918a1 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -272,11 +272,6 @@ static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id) clear_bit(id, smmu->asids); } -static bool tegra_smmu_capable(enum iommu_cap cap) -{ - return false; -} - static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type) { struct tegra_smmu_as *as; @@ -967,7 +962,6 @@ static int tegra_smmu_of_xlate(struct device *dev, } static const struct iommu_ops tegra_smmu_ops = { - .capable = tegra_smmu_capable, .domain_alloc = tegra_smmu_domain_alloc, .domain_free = tegra_smmu_domain_free, .attach_dev = tegra_smmu_attach_dev, -- cgit v1.2.3-59-g8ed1b From 6efd3b8356693898e87610e52bfecd81c70a419b Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Fri, 4 Feb 2022 20:16:41 +0000 Subject: iommu/rockchip: : Use standard driver registration It's been a long time since there was any reason to register IOMMU drivers early. Convert to the standard platform driver helper. CC: Heiko Stuebner Signed-off-by: Robin Murphy Reviewed-by: Heiko Stuebner Link: https://lore.kernel.org/r/c08d58bff340da6a829e76d66d2fa090a9718384.1644005728.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel --- drivers/iommu/rockchip-iommu.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 7f23ad61c094..204a93a72572 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -1407,9 +1407,4 @@ static struct platform_driver rk_iommu_driver = { .suppress_bind_attrs = true, }, }; - -static int __init rk_iommu_init(void) -{ - return platform_driver_register(&rk_iommu_driver); -} -subsys_initcall(rk_iommu_init); +builtin_platform_driver(rk_iommu_driver); -- cgit v1.2.3-59-g8ed1b From 6b813e0e48d77bc026aaea6683fc6e74f5695ab4 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Fri, 4 Feb 2022 20:16:40 +0000 Subject: iommu/msm: Use standard driver registration It's been a long time since there was any reason to register IOMMU drivers early. Convert to the standard platform driver helper. CC: Andy Gross CC: Bjorn Andersson Signed-off-by: Robin Murphy Link: https://lore.kernel.org/all/05ca5e1b29bdd350f4e20b9ceb031a2c281e23d2.1644005728.git.robin.murphy@arm.com/ Signed-off-by: Joerg Roedel --- drivers/iommu/msm_iommu.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 3a38352b603f..06bde6b66732 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -831,16 +831,4 @@ static struct platform_driver msm_iommu_driver = { .probe = msm_iommu_probe, .remove = msm_iommu_remove, }; - -static int __init msm_iommu_driver_init(void) -{ - int ret; - - ret = platform_driver_register(&msm_iommu_driver); - if (ret != 0) - pr_err("Failed to register IOMMU driver\n"); - - return ret; -} -subsys_initcall(msm_iommu_driver_init); - +builtin_platform_driver(msm_iommu_driver); -- cgit v1.2.3-59-g8ed1b From a063158b20afea3cf0dfebea4a17c4e985fb6017 Mon Sep 17 00:00:00 2001 From: David Heidelberg Date: Sun, 6 Feb 2022 21:29:45 +0100 Subject: iommu/msm: Simplify with dev_err_probe() Use the dev_err_probe() helper to simplify error handling during probe. This also handle scenario, when EDEFER is returned and useless error is printed. Fixes warnings as: msm_iommu 7500000.iommu: could not get smmu_pclk Signed-off-by: David Heidelberg Link: https://lore.kernel.org/r/20220206202945.465195-1-david@ixit.cz Signed-off-by: Joerg Roedel --- drivers/iommu/msm_iommu.c | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 06bde6b66732..e98de16c1a42 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -710,36 +710,32 @@ static int msm_iommu_probe(struct platform_device *pdev) INIT_LIST_HEAD(&iommu->ctx_list); iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk"); - if (IS_ERR(iommu->pclk)) { - dev_err(iommu->dev, "could not get smmu_pclk\n"); - return PTR_ERR(iommu->pclk); - } + if (IS_ERR(iommu->pclk)) + return dev_err_probe(iommu->dev, PTR_ERR(iommu->pclk), + "could not get smmu_pclk\n"); ret = clk_prepare(iommu->pclk); - if (ret) { - dev_err(iommu->dev, "could not prepare smmu_pclk\n"); - return ret; - } + if (ret) + return dev_err_probe(iommu->dev, ret, + "could not prepare smmu_pclk\n"); iommu->clk = devm_clk_get(iommu->dev, "iommu_clk"); if (IS_ERR(iommu->clk)) { - dev_err(iommu->dev, "could not get iommu_clk\n"); clk_unprepare(iommu->pclk); - return PTR_ERR(iommu->clk); + return dev_err_probe(iommu->dev, PTR_ERR(iommu->clk), + "could not get iommu_clk\n"); } ret = clk_prepare(iommu->clk); if (ret) { - dev_err(iommu->dev, "could not prepare iommu_clk\n"); clk_unprepare(iommu->pclk); - return ret; + return dev_err_probe(iommu->dev, ret, "could not prepare iommu_clk\n"); } r = platform_get_resource(pdev, IORESOURCE_MEM, 0); iommu->base = devm_ioremap_resource(iommu->dev, r); if (IS_ERR(iommu->base)) { - dev_err(iommu->dev, "could not get iommu base\n"); - ret = PTR_ERR(iommu->base); + ret = dev_err_probe(iommu->dev, PTR_ERR(iommu->base), "could not get iommu base\n"); goto fail; } ioaddr = r->start; -- cgit v1.2.3-59-g8ed1b From 32e92d9f6f876721cc4f565f8369d542b6266380 Mon Sep 17 00:00:00 2001 From: John Garry Date: Thu, 3 Feb 2022 17:59:20 +0800 Subject: iommu/iova: Separate out rcache init Currently the rcache structures are allocated for all IOVA domains, even if they do not use "fast" alloc+free interface. This is wasteful of memory. In addition, fails in init_iova_rcaches() are not handled safely, which is less than ideal. Make "fast" users call a separate rcache init explicitly, which includes error checking. Signed-off-by: John Garry Reviewed-by: Robin Murphy Acked-by: Michael S. Tsirkin Link: https://lore.kernel.org/r/1643882360-241739-1-git-send-email-john.garry@huawei.com Signed-off-by: Joerg Roedel --- drivers/iommu/dma-iommu.c | 4 ++ drivers/iommu/iova.c | 73 +++++++++++++++++++++++++++++------- drivers/vdpa/vdpa_user/iova_domain.c | 11 ++++++ include/linux/iova.h | 15 ++------ 4 files changed, 77 insertions(+), 26 deletions(-) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index d85d54f2b549..b22034975301 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -525,6 +525,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, struct iommu_dma_cookie *cookie = domain->iova_cookie; unsigned long order, base_pfn; struct iova_domain *iovad; + int ret; if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) return -EINVAL; @@ -559,6 +560,9 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, } init_iova_domain(iovad, 1UL << order, base_pfn); + ret = iova_domain_init_rcaches(iovad); + if (ret) + return ret; /* If the FQ fails we can simply fall back to strict mode */ if (domain->type == IOMMU_DOMAIN_DMA_FQ && iommu_dma_init_fq(domain)) diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index b28c9435b898..7e9c3a97c040 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -15,13 +15,14 @@ /* The anchor node sits above the top of the usable address space */ #define IOVA_ANCHOR ~0UL +#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */ + static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn, unsigned long size); static unsigned long iova_rcache_get(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn); -static void init_iova_rcaches(struct iova_domain *iovad); static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad); static void free_iova_rcaches(struct iova_domain *iovad); @@ -64,8 +65,6 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR; rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node); rb_insert_color(&iovad->anchor.node, &iovad->rbroot); - cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, &iovad->cpuhp_dead); - init_iova_rcaches(iovad); } EXPORT_SYMBOL_GPL(init_iova_domain); @@ -488,6 +487,13 @@ free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size) } EXPORT_SYMBOL_GPL(free_iova_fast); +static void iova_domain_free_rcaches(struct iova_domain *iovad) +{ + cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, + &iovad->cpuhp_dead); + free_iova_rcaches(iovad); +} + /** * put_iova_domain - destroys the iova domain * @iovad: - iova domain in question. @@ -497,9 +503,9 @@ void put_iova_domain(struct iova_domain *iovad) { struct iova *iova, *tmp; - cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, - &iovad->cpuhp_dead); - free_iova_rcaches(iovad); + if (iovad->rcaches) + iova_domain_free_rcaches(iovad); + rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node) free_iova_mem(iova); } @@ -608,6 +614,7 @@ EXPORT_SYMBOL_GPL(reserve_iova); */ #define IOVA_MAG_SIZE 128 +#define MAX_GLOBAL_MAGS 32 /* magazines per bin */ struct iova_magazine { unsigned long size; @@ -620,6 +627,13 @@ struct iova_cpu_rcache { struct iova_magazine *prev; }; +struct iova_rcache { + spinlock_t lock; + unsigned long depot_size; + struct iova_magazine *depot[MAX_GLOBAL_MAGS]; + struct iova_cpu_rcache __percpu *cpu_rcaches; +}; + static struct iova_magazine *iova_magazine_alloc(gfp_t flags) { return kzalloc(sizeof(struct iova_magazine), flags); @@ -693,28 +707,54 @@ static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn) mag->pfns[mag->size++] = pfn; } -static void init_iova_rcaches(struct iova_domain *iovad) +int iova_domain_init_rcaches(struct iova_domain *iovad) { - struct iova_cpu_rcache *cpu_rcache; - struct iova_rcache *rcache; unsigned int cpu; - int i; + int i, ret; + + iovad->rcaches = kcalloc(IOVA_RANGE_CACHE_MAX_SIZE, + sizeof(struct iova_rcache), + GFP_KERNEL); + if (!iovad->rcaches) + return -ENOMEM; for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) { + struct iova_cpu_rcache *cpu_rcache; + struct iova_rcache *rcache; + rcache = &iovad->rcaches[i]; spin_lock_init(&rcache->lock); rcache->depot_size = 0; - rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size()); - if (WARN_ON(!rcache->cpu_rcaches)) - continue; + rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), + cache_line_size()); + if (!rcache->cpu_rcaches) { + ret = -ENOMEM; + goto out_err; + } for_each_possible_cpu(cpu) { cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu); + spin_lock_init(&cpu_rcache->lock); cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL); cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL); + if (!cpu_rcache->loaded || !cpu_rcache->prev) { + ret = -ENOMEM; + goto out_err; + } } } + + ret = cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, + &iovad->cpuhp_dead); + if (ret) + goto out_err; + return 0; + +out_err: + free_iova_rcaches(iovad); + return ret; } +EXPORT_SYMBOL_GPL(iova_domain_init_rcaches); /* * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and @@ -831,7 +871,7 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad, { unsigned int log_size = order_base_2(size); - if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE) + if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE || !iovad->rcaches) return 0; return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size); @@ -849,6 +889,8 @@ static void free_iova_rcaches(struct iova_domain *iovad) for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) { rcache = &iovad->rcaches[i]; + if (!rcache->cpu_rcaches) + break; for_each_possible_cpu(cpu) { cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu); iova_magazine_free(cpu_rcache->loaded); @@ -858,6 +900,9 @@ static void free_iova_rcaches(struct iova_domain *iovad) for (j = 0; j < rcache->depot_size; ++j) iova_magazine_free(rcache->depot[j]); } + + kfree(iovad->rcaches); + iovad->rcaches = NULL; } /* diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c index 2b1143f11d8f..22f7d43f8a68 100644 --- a/drivers/vdpa/vdpa_user/iova_domain.c +++ b/drivers/vdpa/vdpa_user/iova_domain.c @@ -480,6 +480,7 @@ vduse_domain_create(unsigned long iova_limit, size_t bounce_size) struct file *file; struct vduse_bounce_map *map; unsigned long pfn, bounce_pfns; + int ret; bounce_pfns = PAGE_ALIGN(bounce_size) >> PAGE_SHIFT; if (iova_limit <= bounce_size) @@ -513,10 +514,20 @@ vduse_domain_create(unsigned long iova_limit, size_t bounce_size) spin_lock_init(&domain->iotlb_lock); init_iova_domain(&domain->stream_iovad, PAGE_SIZE, IOVA_START_PFN); + ret = iova_domain_init_rcaches(&domain->stream_iovad); + if (ret) + goto err_iovad_stream; init_iova_domain(&domain->consistent_iovad, PAGE_SIZE, bounce_pfns); + ret = iova_domain_init_rcaches(&domain->consistent_iovad); + if (ret) + goto err_iovad_consistent; return domain; +err_iovad_consistent: + put_iova_domain(&domain->stream_iovad); +err_iovad_stream: + fput(file); err_file: vfree(domain->bounce_maps); err_map: diff --git a/include/linux/iova.h b/include/linux/iova.h index cea79cb9f26c..320a70e40233 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -21,18 +21,8 @@ struct iova { unsigned long pfn_lo; /* Lowest allocated pfn */ }; -struct iova_magazine; -struct iova_cpu_rcache; -#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */ -#define MAX_GLOBAL_MAGS 32 /* magazines per bin */ - -struct iova_rcache { - spinlock_t lock; - unsigned long depot_size; - struct iova_magazine *depot[MAX_GLOBAL_MAGS]; - struct iova_cpu_rcache __percpu *cpu_rcaches; -}; +struct iova_rcache; /* holds all the iova translations for a domain */ struct iova_domain { @@ -46,7 +36,7 @@ struct iova_domain { unsigned long max32_alloc_size; /* Size of last failed allocation */ struct iova anchor; /* rbtree lookup anchor */ - struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */ + struct iova_rcache *rcaches; struct hlist_node cpuhp_dead; }; @@ -102,6 +92,7 @@ struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, unsigned long pfn_hi); void init_iova_domain(struct iova_domain *iovad, unsigned long granule, unsigned long start_pfn); +int iova_domain_init_rcaches(struct iova_domain *iovad); struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); void put_iova_domain(struct iova_domain *iovad); #else -- cgit v1.2.3-59-g8ed1b From 8ddf4eff71e12e4295b54ac8b02439ad22271fd2 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 9 Feb 2022 20:47:58 +0200 Subject: perf/smmuv3: Don't cast parameter in bit operations While in this particular case it would not be a (critical) issue, the pattern itself is bad and error prone in case somebody blindly copies to their code. Don't cast parameter to unsigned long pointer in the bit operations. Instead copy to a local variable on stack of a proper type and use. Note, new compilers might warn on this line for potential outbound access. Signed-off-by: Andy Shevchenko Reviewed-by: Robin Murphy Link: https://lore.kernel.org/r/20220209184758.56578-1-andriy.shevchenko@linux.intel.com Signed-off-by: Will Deacon --- drivers/perf/arm_smmuv3_pmu.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c index c49108a72865..00d4c45a8017 100644 --- a/drivers/perf/arm_smmuv3_pmu.c +++ b/drivers/perf/arm_smmuv3_pmu.c @@ -654,6 +654,7 @@ static int smmu_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) static irqreturn_t smmu_pmu_handle_irq(int irq_num, void *data) { struct smmu_pmu *smmu_pmu = data; + DECLARE_BITMAP(ovs, BITS_PER_TYPE(u64)); u64 ovsr; unsigned int idx; @@ -663,7 +664,8 @@ static irqreturn_t smmu_pmu_handle_irq(int irq_num, void *data) writeq(ovsr, smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0); - for_each_set_bit(idx, (unsigned long *)&ovsr, smmu_pmu->num_counters) { + bitmap_from_u64(ovs, ovsr); + for_each_set_bit(idx, ovs, smmu_pmu->num_counters) { struct perf_event *event = smmu_pmu->events[idx]; struct hw_perf_event *hwc; -- cgit v1.2.3-59-g8ed1b From 989192ac6ad54acccd5dd1c058055dacd6b94b30 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 16 Feb 2022 10:52:41 +0800 Subject: iommu/vt-d: Remove guest pasid related callbacks The guest pasid related callbacks are not called in the tree. Remove them to avoid dead code. Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20220216025249.3459465-2-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 208 ------------------------------------------- drivers/iommu/intel/pasid.c | 161 ---------------------------------- drivers/iommu/intel/pasid.h | 4 - drivers/iommu/intel/svm.c | 209 -------------------------------------------- include/linux/intel-iommu.h | 10 --- include/linux/intel-svm.h | 12 --- 6 files changed, 604 deletions(-) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 92fea3fbbb11..4f9d95067c8f 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -4825,13 +4825,6 @@ static int prepare_domain_attach_device(struct iommu_domain *domain, if (!iommu) return -ENODEV; - if ((dmar_domain->flags & DOMAIN_FLAG_NESTING_MODE) && - !ecap_nest(iommu->ecap)) { - dev_err(dev, "%s: iommu not support nested translation\n", - iommu->name); - return -EINVAL; - } - /* check if this iommu agaw is sufficient for max mapped address */ addr_width = agaw_to_width(iommu->agaw); if (addr_width > cap_mgaw(iommu->cap)) @@ -4919,185 +4912,6 @@ static void intel_iommu_aux_detach_device(struct iommu_domain *domain, aux_domain_remove_dev(to_dmar_domain(domain), dev); } -#ifdef CONFIG_INTEL_IOMMU_SVM -/* - * 2D array for converting and sanitizing IOMMU generic TLB granularity to - * VT-d granularity. Invalidation is typically included in the unmap operation - * as a result of DMA or VFIO unmap. However, for assigned devices guest - * owns the first level page tables. Invalidations of translation caches in the - * guest are trapped and passed down to the host. - * - * vIOMMU in the guest will only expose first level page tables, therefore - * we do not support IOTLB granularity for request without PASID (second level). - * - * For example, to find the VT-d granularity encoding for IOTLB - * type and page selective granularity within PASID: - * X: indexed by iommu cache type - * Y: indexed by enum iommu_inv_granularity - * [IOMMU_CACHE_INV_TYPE_IOTLB][IOMMU_INV_GRANU_ADDR] - */ - -static const int -inv_type_granu_table[IOMMU_CACHE_INV_TYPE_NR][IOMMU_INV_GRANU_NR] = { - /* - * PASID based IOTLB invalidation: PASID selective (per PASID), - * page selective (address granularity) - */ - {-EINVAL, QI_GRAN_NONG_PASID, QI_GRAN_PSI_PASID}, - /* PASID based dev TLBs */ - {-EINVAL, -EINVAL, QI_DEV_IOTLB_GRAN_PASID_SEL}, - /* PASID cache */ - {-EINVAL, -EINVAL, -EINVAL} -}; - -static inline int to_vtd_granularity(int type, int granu) -{ - return inv_type_granu_table[type][granu]; -} - -static inline u64 to_vtd_size(u64 granu_size, u64 nr_granules) -{ - u64 nr_pages = (granu_size * nr_granules) >> VTD_PAGE_SHIFT; - - /* VT-d size is encoded as 2^size of 4K pages, 0 for 4k, 9 for 2MB, etc. - * IOMMU cache invalidate API passes granu_size in bytes, and number of - * granu size in contiguous memory. - */ - return order_base_2(nr_pages); -} - -static int -intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev, - struct iommu_cache_invalidate_info *inv_info) -{ - struct dmar_domain *dmar_domain = to_dmar_domain(domain); - struct device_domain_info *info; - struct intel_iommu *iommu; - unsigned long flags; - int cache_type; - u8 bus, devfn; - u16 did, sid; - int ret = 0; - u64 size = 0; - - if (!inv_info || !dmar_domain) - return -EINVAL; - - if (!dev || !dev_is_pci(dev)) - return -ENODEV; - - iommu = device_to_iommu(dev, &bus, &devfn); - if (!iommu) - return -ENODEV; - - if (!(dmar_domain->flags & DOMAIN_FLAG_NESTING_MODE)) - return -EINVAL; - - spin_lock_irqsave(&device_domain_lock, flags); - spin_lock(&iommu->lock); - info = get_domain_info(dev); - if (!info) { - ret = -EINVAL; - goto out_unlock; - } - did = dmar_domain->iommu_did[iommu->seq_id]; - sid = PCI_DEVID(bus, devfn); - - /* Size is only valid in address selective invalidation */ - if (inv_info->granularity == IOMMU_INV_GRANU_ADDR) - size = to_vtd_size(inv_info->granu.addr_info.granule_size, - inv_info->granu.addr_info.nb_granules); - - for_each_set_bit(cache_type, - (unsigned long *)&inv_info->cache, - IOMMU_CACHE_INV_TYPE_NR) { - int granu = 0; - u64 pasid = 0; - u64 addr = 0; - - granu = to_vtd_granularity(cache_type, inv_info->granularity); - if (granu == -EINVAL) { - pr_err_ratelimited("Invalid cache type and granu combination %d/%d\n", - cache_type, inv_info->granularity); - break; - } - - /* - * PASID is stored in different locations based on the - * granularity. - */ - if (inv_info->granularity == IOMMU_INV_GRANU_PASID && - (inv_info->granu.pasid_info.flags & IOMMU_INV_PASID_FLAGS_PASID)) - pasid = inv_info->granu.pasid_info.pasid; - else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR && - (inv_info->granu.addr_info.flags & IOMMU_INV_ADDR_FLAGS_PASID)) - pasid = inv_info->granu.addr_info.pasid; - - switch (BIT(cache_type)) { - case IOMMU_CACHE_INV_TYPE_IOTLB: - /* HW will ignore LSB bits based on address mask */ - if (inv_info->granularity == IOMMU_INV_GRANU_ADDR && - size && - (inv_info->granu.addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) { - pr_err_ratelimited("User address not aligned, 0x%llx, size order %llu\n", - inv_info->granu.addr_info.addr, size); - } - - /* - * If granu is PASID-selective, address is ignored. - * We use npages = -1 to indicate that. - */ - qi_flush_piotlb(iommu, did, pasid, - mm_to_dma_pfn(inv_info->granu.addr_info.addr), - (granu == QI_GRAN_NONG_PASID) ? -1 : 1 << size, - inv_info->granu.addr_info.flags & IOMMU_INV_ADDR_FLAGS_LEAF); - - if (!info->ats_enabled) - break; - /* - * Always flush device IOTLB if ATS is enabled. vIOMMU - * in the guest may assume IOTLB flush is inclusive, - * which is more efficient. - */ - fallthrough; - case IOMMU_CACHE_INV_TYPE_DEV_IOTLB: - /* - * PASID based device TLB invalidation does not support - * IOMMU_INV_GRANU_PASID granularity but only supports - * IOMMU_INV_GRANU_ADDR. - * The equivalent of that is we set the size to be the - * entire range of 64 bit. User only provides PASID info - * without address info. So we set addr to 0. - */ - if (inv_info->granularity == IOMMU_INV_GRANU_PASID) { - size = 64 - VTD_PAGE_SHIFT; - addr = 0; - } else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR) { - addr = inv_info->granu.addr_info.addr; - } - - if (info->ats_enabled) - qi_flush_dev_iotlb_pasid(iommu, sid, - info->pfsid, pasid, - info->ats_qdep, addr, - size); - else - pr_warn_ratelimited("Passdown device IOTLB flush w/o ATS!\n"); - break; - default: - dev_err_ratelimited(dev, "Unsupported IOMMU invalidation type %d\n", - cache_type); - ret = -EINVAL; - } - } -out_unlock: - spin_unlock(&iommu->lock); - spin_unlock_irqrestore(&device_domain_lock, flags); - - return ret; -} -#endif - static int intel_iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t hpa, size_t size, int iommu_prot, gfp_t gfp) @@ -5545,24 +5359,6 @@ static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain, return attach_deferred(dev); } -static int -intel_iommu_enable_nesting(struct iommu_domain *domain) -{ - struct dmar_domain *dmar_domain = to_dmar_domain(domain); - unsigned long flags; - int ret = -ENODEV; - - spin_lock_irqsave(&device_domain_lock, flags); - if (list_empty(&dmar_domain->devices)) { - dmar_domain->flags |= DOMAIN_FLAG_NESTING_MODE; - dmar_domain->flags &= ~DOMAIN_FLAG_USE_FIRST_LEVEL; - ret = 0; - } - spin_unlock_irqrestore(&device_domain_lock, flags); - - return ret; -} - /* * Check that the device does not live on an external facing PCI port that is * marked as untrusted. Such devices should not be able to apply quirks and @@ -5599,7 +5395,6 @@ const struct iommu_ops intel_iommu_ops = { .capable = intel_iommu_capable, .domain_alloc = intel_iommu_domain_alloc, .domain_free = intel_iommu_domain_free, - .enable_nesting = intel_iommu_enable_nesting, .attach_dev = intel_iommu_attach_device, .detach_dev = intel_iommu_detach_device, .aux_attach_dev = intel_iommu_aux_attach_device, @@ -5624,9 +5419,6 @@ const struct iommu_ops intel_iommu_ops = { .def_domain_type = device_def_domain_type, .pgsize_bitmap = SZ_4K, #ifdef CONFIG_INTEL_IOMMU_SVM - .cache_invalidate = intel_iommu_sva_invalidate, - .sva_bind_gpasid = intel_svm_bind_gpasid, - .sva_unbind_gpasid = intel_svm_unbind_gpasid, .sva_bind = intel_svm_bind, .sva_unbind = intel_svm_unbind, .sva_get_pasid = intel_svm_get_pasid, diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index 07c390aed1fe..10fb82ea467d 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -762,164 +762,3 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu, return 0; } - -static int -intel_pasid_setup_bind_data(struct intel_iommu *iommu, struct pasid_entry *pte, - struct iommu_gpasid_bind_data_vtd *pasid_data) -{ - /* - * Not all guest PASID table entry fields are passed down during bind, - * here we only set up the ones that are dependent on guest settings. - * Execution related bits such as NXE, SMEP are not supported. - * Other fields, such as snoop related, are set based on host needs - * regardless of guest settings. - */ - if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_SRE) { - if (!ecap_srs(iommu->ecap)) { - pr_err_ratelimited("No supervisor request support on %s\n", - iommu->name); - return -EINVAL; - } - pasid_set_sre(pte); - /* Enable write protect WP if guest requested */ - if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_WPE) - pasid_set_wpe(pte); - } - - if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_EAFE) { - if (!ecap_eafs(iommu->ecap)) { - pr_err_ratelimited("No extended access flag support on %s\n", - iommu->name); - return -EINVAL; - } - pasid_set_eafe(pte); - } - - /* - * Memory type is only applicable to devices inside processor coherent - * domain. Will add MTS support once coherent devices are available. - */ - if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_MTS_MASK) { - pr_warn_ratelimited("No memory type support %s\n", - iommu->name); - return -EINVAL; - } - - return 0; -} - -/** - * intel_pasid_setup_nested() - Set up PASID entry for nested translation. - * This could be used for guest shared virtual address. In this case, the - * first level page tables are used for GVA-GPA translation in the guest, - * second level page tables are used for GPA-HPA translation. - * - * @iommu: IOMMU which the device belong to - * @dev: Device to be set up for translation - * @gpgd: FLPTPTR: First Level Page translation pointer in GPA - * @pasid: PASID to be programmed in the device PASID table - * @pasid_data: Additional PASID info from the guest bind request - * @domain: Domain info for setting up second level page tables - * @addr_width: Address width of the first level (guest) - */ -int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev, - pgd_t *gpgd, u32 pasid, - struct iommu_gpasid_bind_data_vtd *pasid_data, - struct dmar_domain *domain, int addr_width) -{ - struct pasid_entry *pte; - struct dma_pte *pgd; - int ret = 0; - u64 pgd_val; - int agaw; - u16 did; - - if (!ecap_nest(iommu->ecap)) { - pr_err_ratelimited("IOMMU: %s: No nested translation support\n", - iommu->name); - return -EINVAL; - } - - if (!(domain->flags & DOMAIN_FLAG_NESTING_MODE)) { - pr_err_ratelimited("Domain is not in nesting mode, %x\n", - domain->flags); - return -EINVAL; - } - - pte = intel_pasid_get_entry(dev, pasid); - if (WARN_ON(!pte)) - return -EINVAL; - - /* - * Caller must ensure PASID entry is not in use, i.e. not bind the - * same PASID to the same device twice. - */ - if (pasid_pte_is_present(pte)) - return -EBUSY; - - pasid_clear_entry(pte); - - /* Sanity checking performed by caller to make sure address - * width matching in two dimensions: - * 1. CPU vs. IOMMU - * 2. Guest vs. Host. - */ - switch (addr_width) { -#ifdef CONFIG_X86 - case ADDR_WIDTH_5LEVEL: - if (!cpu_feature_enabled(X86_FEATURE_LA57) || - !cap_5lp_support(iommu->cap)) { - dev_err_ratelimited(dev, - "5-level paging not supported\n"); - return -EINVAL; - } - - pasid_set_flpm(pte, 1); - break; -#endif - case ADDR_WIDTH_4LEVEL: - pasid_set_flpm(pte, 0); - break; - default: - dev_err_ratelimited(dev, "Invalid guest address width %d\n", - addr_width); - return -EINVAL; - } - - /* First level PGD is in GPA, must be supported by the second level */ - if ((uintptr_t)gpgd > domain->max_addr) { - dev_err_ratelimited(dev, - "Guest PGD %lx not supported, max %llx\n", - (uintptr_t)gpgd, domain->max_addr); - return -EINVAL; - } - pasid_set_flptr(pte, (uintptr_t)gpgd); - - ret = intel_pasid_setup_bind_data(iommu, pte, pasid_data); - if (ret) - return ret; - - /* Setup the second level based on the given domain */ - pgd = domain->pgd; - - agaw = iommu_skip_agaw(domain, iommu, &pgd); - if (agaw < 0) { - dev_err_ratelimited(dev, "Invalid domain page table\n"); - return -EINVAL; - } - pgd_val = virt_to_phys(pgd); - pasid_set_slptr(pte, pgd_val); - pasid_set_fault_enable(pte); - - did = domain->iommu_did[iommu->seq_id]; - pasid_set_domain_id(pte, did); - - pasid_set_address_width(pte, agaw); - pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); - - pasid_set_translation_type(pte, PASID_ENTRY_PGTT_NESTED); - pasid_set_present(pte); - pasid_flush_caches(iommu, pte, pasid, did); - - return ret; -} diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h index d5552e2c160d..ab4408c824a5 100644 --- a/drivers/iommu/intel/pasid.h +++ b/drivers/iommu/intel/pasid.h @@ -118,10 +118,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, int intel_pasid_setup_pass_through(struct intel_iommu *iommu, struct dmar_domain *domain, struct device *dev, u32 pasid); -int intel_pasid_setup_nested(struct intel_iommu *iommu, - struct device *dev, pgd_t *pgd, u32 pasid, - struct iommu_gpasid_bind_data_vtd *pasid_data, - struct dmar_domain *domain, int addr_width); void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev, u32 pasid, bool fault_ignore); diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index 5b5d69b04fcc..d04c83dd3a58 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -318,193 +318,6 @@ out: return 0; } -int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev, - struct iommu_gpasid_bind_data *data) -{ - struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); - struct intel_svm_dev *sdev = NULL; - struct dmar_domain *dmar_domain; - struct device_domain_info *info; - struct intel_svm *svm = NULL; - unsigned long iflags; - int ret = 0; - - if (WARN_ON(!iommu) || !data) - return -EINVAL; - - if (data->format != IOMMU_PASID_FORMAT_INTEL_VTD) - return -EINVAL; - - /* IOMMU core ensures argsz is more than the start of the union */ - if (data->argsz < offsetofend(struct iommu_gpasid_bind_data, vendor.vtd)) - return -EINVAL; - - /* Make sure no undefined flags are used in vendor data */ - if (data->vendor.vtd.flags & ~(IOMMU_SVA_VTD_GPASID_LAST - 1)) - return -EINVAL; - - if (!dev_is_pci(dev)) - return -ENOTSUPP; - - /* VT-d supports devices with full 20 bit PASIDs only */ - if (pci_max_pasids(to_pci_dev(dev)) != PASID_MAX) - return -EINVAL; - - /* - * We only check host PASID range, we have no knowledge to check - * guest PASID range. - */ - if (data->hpasid <= 0 || data->hpasid >= PASID_MAX) - return -EINVAL; - - info = get_domain_info(dev); - if (!info) - return -EINVAL; - - dmar_domain = to_dmar_domain(domain); - - mutex_lock(&pasid_mutex); - ret = pasid_to_svm_sdev(dev, data->hpasid, &svm, &sdev); - if (ret) - goto out; - - if (sdev) { - /* - * Do not allow multiple bindings of the same device-PASID since - * there is only one SL page tables per PASID. We may revisit - * once sharing PGD across domains are supported. - */ - dev_warn_ratelimited(dev, "Already bound with PASID %u\n", - svm->pasid); - ret = -EBUSY; - goto out; - } - - if (!svm) { - /* We come here when PASID has never been bond to a device. */ - svm = kzalloc(sizeof(*svm), GFP_KERNEL); - if (!svm) { - ret = -ENOMEM; - goto out; - } - /* REVISIT: upper layer/VFIO can track host process that bind - * the PASID. ioasid_set = mm might be sufficient for vfio to - * check pasid VMM ownership. We can drop the following line - * once VFIO and IOASID set check is in place. - */ - svm->mm = get_task_mm(current); - svm->pasid = data->hpasid; - if (data->flags & IOMMU_SVA_GPASID_VAL) { - svm->gpasid = data->gpasid; - svm->flags |= SVM_FLAG_GUEST_PASID; - } - pasid_private_add(data->hpasid, svm); - INIT_LIST_HEAD_RCU(&svm->devs); - mmput(svm->mm); - } - sdev = kzalloc(sizeof(*sdev), GFP_KERNEL); - if (!sdev) { - ret = -ENOMEM; - goto out; - } - sdev->dev = dev; - sdev->sid = PCI_DEVID(info->bus, info->devfn); - sdev->iommu = iommu; - - /* Only count users if device has aux domains */ - if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX)) - sdev->users = 1; - - /* Set up device context entry for PASID if not enabled already */ - ret = intel_iommu_enable_pasid(iommu, sdev->dev); - if (ret) { - dev_err_ratelimited(dev, "Failed to enable PASID capability\n"); - kfree(sdev); - goto out; - } - - /* - * PASID table is per device for better security. Therefore, for - * each bind of a new device even with an existing PASID, we need to - * call the nested mode setup function here. - */ - spin_lock_irqsave(&iommu->lock, iflags); - ret = intel_pasid_setup_nested(iommu, dev, - (pgd_t *)(uintptr_t)data->gpgd, - data->hpasid, &data->vendor.vtd, dmar_domain, - data->addr_width); - spin_unlock_irqrestore(&iommu->lock, iflags); - if (ret) { - dev_err_ratelimited(dev, "Failed to set up PASID %llu in nested mode, Err %d\n", - data->hpasid, ret); - /* - * PASID entry should be in cleared state if nested mode - * set up failed. So we only need to clear IOASID tracking - * data such that free call will succeed. - */ - kfree(sdev); - goto out; - } - - svm->flags |= SVM_FLAG_GUEST_MODE; - - init_rcu_head(&sdev->rcu); - list_add_rcu(&sdev->list, &svm->devs); - out: - if (!IS_ERR_OR_NULL(svm) && list_empty(&svm->devs)) { - pasid_private_remove(data->hpasid); - kfree(svm); - } - - mutex_unlock(&pasid_mutex); - return ret; -} - -int intel_svm_unbind_gpasid(struct device *dev, u32 pasid) -{ - struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); - struct intel_svm_dev *sdev; - struct intel_svm *svm; - int ret; - - if (WARN_ON(!iommu)) - return -EINVAL; - - mutex_lock(&pasid_mutex); - ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev); - if (ret) - goto out; - - if (sdev) { - if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX)) - sdev->users--; - if (!sdev->users) { - list_del_rcu(&sdev->list); - intel_pasid_tear_down_entry(iommu, dev, - svm->pasid, false); - intel_svm_drain_prq(dev, svm->pasid); - kfree_rcu(sdev, rcu); - - if (list_empty(&svm->devs)) { - /* - * We do not free the IOASID here in that - * IOMMU driver did not allocate it. - * Unlike native SVM, IOASID for guest use was - * allocated prior to the bind call. - * In any case, if the free call comes before - * the unbind, IOMMU driver will get notified - * and perform cleanup. - */ - pasid_private_remove(pasid); - kfree(svm); - } - } - } -out: - mutex_unlock(&pasid_mutex); - return ret; -} - static int intel_svm_alloc_pasid(struct device *dev, struct mm_struct *mm, unsigned int flags) { @@ -1125,28 +938,6 @@ int intel_svm_page_response(struct device *dev, goto out; } - /* - * For responses from userspace, need to make sure that the - * pasid has been bound to its mm. - */ - if (svm->flags & SVM_FLAG_GUEST_MODE) { - struct mm_struct *mm; - - mm = get_task_mm(current); - if (!mm) { - ret = -EINVAL; - goto out; - } - - if (mm != svm->mm) { - ret = -ENODEV; - mmput(mm); - goto out; - } - - mmput(mm); - } - /* * Per VT-d spec. v3.0 ch7.7, system software must respond * with page group response if private data is present (PDP) diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 69230fd695ea..beaacb589abd 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -525,12 +525,6 @@ struct context_entry { */ #define DOMAIN_FLAG_USE_FIRST_LEVEL BIT(1) -/* - * Domain represents a virtual machine which demands iommu nested - * translation mode support. - */ -#define DOMAIN_FLAG_NESTING_MODE BIT(2) - struct dmar_domain { int nid; /* node id */ @@ -765,9 +759,6 @@ struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn); extern void intel_svm_check(struct intel_iommu *iommu); extern int intel_svm_enable_prq(struct intel_iommu *iommu); extern int intel_svm_finish_prq(struct intel_iommu *iommu); -int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev, - struct iommu_gpasid_bind_data *data); -int intel_svm_unbind_gpasid(struct device *dev, u32 pasid); struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm, void *drvdata); void intel_svm_unbind(struct iommu_sva *handle); @@ -795,7 +786,6 @@ struct intel_svm { unsigned int flags; u32 pasid; - int gpasid; /* In case that guest PASID is different from host PASID */ struct list_head devs; }; #else diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h index 1b73bab7eeff..b3b125b332aa 100644 --- a/include/linux/intel-svm.h +++ b/include/linux/intel-svm.h @@ -25,17 +25,5 @@ * do such IOTLB flushes automatically. */ #define SVM_FLAG_SUPERVISOR_MODE BIT(0) -/* - * The SVM_FLAG_GUEST_MODE flag is used when a PASID bind is for guest - * processes. Compared to the host bind, the primary differences are: - * 1. mm life cycle management - * 2. fault reporting - */ -#define SVM_FLAG_GUEST_MODE BIT(1) -/* - * The SVM_FLAG_GUEST_PASID flag is used when a guest has its own PASID space, - * which requires guest and host PASID translation at both directions. - */ -#define SVM_FLAG_GUEST_PASID BIT(2) #endif /* __INTEL_SVM_H__ */ -- cgit v1.2.3-59-g8ed1b From 0c9f178778919bb500443f340647b44227778fb2 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 16 Feb 2022 10:52:42 +0800 Subject: iommu: Remove guest pasid related interfaces and definitions The guest pasid related uapi interfaces and definitions are not referenced anywhere in the tree. We've also reached a consensus to replace them with a new iommufd design. Remove them to avoid dead code. Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20220216025249.3459465-3-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 210 --------------------------------------------- include/linux/iommu.h | 44 ---------- include/uapi/linux/iommu.h | 181 -------------------------------------- 3 files changed, 435 deletions(-) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 107dcf5938d6..3cbf4781e5bd 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2031,216 +2031,6 @@ int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) return 0; } -/* - * Check flags and other user provided data for valid combinations. We also - * make sure no reserved fields or unused flags are set. This is to ensure - * not breaking userspace in the future when these fields or flags are used. - */ -static int iommu_check_cache_invl_data(struct iommu_cache_invalidate_info *info) -{ - u32 mask; - int i; - - if (info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1) - return -EINVAL; - - mask = (1 << IOMMU_CACHE_INV_TYPE_NR) - 1; - if (info->cache & ~mask) - return -EINVAL; - - if (info->granularity >= IOMMU_INV_GRANU_NR) - return -EINVAL; - - switch (info->granularity) { - case IOMMU_INV_GRANU_ADDR: - if (info->cache & IOMMU_CACHE_INV_TYPE_PASID) - return -EINVAL; - - mask = IOMMU_INV_ADDR_FLAGS_PASID | - IOMMU_INV_ADDR_FLAGS_ARCHID | - IOMMU_INV_ADDR_FLAGS_LEAF; - - if (info->granu.addr_info.flags & ~mask) - return -EINVAL; - break; - case IOMMU_INV_GRANU_PASID: - mask = IOMMU_INV_PASID_FLAGS_PASID | - IOMMU_INV_PASID_FLAGS_ARCHID; - if (info->granu.pasid_info.flags & ~mask) - return -EINVAL; - - break; - case IOMMU_INV_GRANU_DOMAIN: - if (info->cache & IOMMU_CACHE_INV_TYPE_DEV_IOTLB) - return -EINVAL; - break; - default: - return -EINVAL; - } - - /* Check reserved padding fields */ - for (i = 0; i < sizeof(info->padding); i++) { - if (info->padding[i]) - return -EINVAL; - } - - return 0; -} - -int iommu_uapi_cache_invalidate(struct iommu_domain *domain, struct device *dev, - void __user *uinfo) -{ - struct iommu_cache_invalidate_info inv_info = { 0 }; - u32 minsz; - int ret; - - if (unlikely(!domain->ops->cache_invalidate)) - return -ENODEV; - - /* - * No new spaces can be added before the variable sized union, the - * minimum size is the offset to the union. - */ - minsz = offsetof(struct iommu_cache_invalidate_info, granu); - - /* Copy minsz from user to get flags and argsz */ - if (copy_from_user(&inv_info, uinfo, minsz)) - return -EFAULT; - - /* Fields before the variable size union are mandatory */ - if (inv_info.argsz < minsz) - return -EINVAL; - - /* PASID and address granu require additional info beyond minsz */ - if (inv_info.granularity == IOMMU_INV_GRANU_PASID && - inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.pasid_info)) - return -EINVAL; - - if (inv_info.granularity == IOMMU_INV_GRANU_ADDR && - inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.addr_info)) - return -EINVAL; - - /* - * User might be using a newer UAPI header which has a larger data - * size, we shall support the existing flags within the current - * size. Copy the remaining user data _after_ minsz but not more - * than the current kernel supported size. - */ - if (copy_from_user((void *)&inv_info + minsz, uinfo + minsz, - min_t(u32, inv_info.argsz, sizeof(inv_info)) - minsz)) - return -EFAULT; - - /* Now the argsz is validated, check the content */ - ret = iommu_check_cache_invl_data(&inv_info); - if (ret) - return ret; - - return domain->ops->cache_invalidate(domain, dev, &inv_info); -} -EXPORT_SYMBOL_GPL(iommu_uapi_cache_invalidate); - -static int iommu_check_bind_data(struct iommu_gpasid_bind_data *data) -{ - u64 mask; - int i; - - if (data->version != IOMMU_GPASID_BIND_VERSION_1) - return -EINVAL; - - /* Check the range of supported formats */ - if (data->format >= IOMMU_PASID_FORMAT_LAST) - return -EINVAL; - - /* Check all flags */ - mask = IOMMU_SVA_GPASID_VAL; - if (data->flags & ~mask) - return -EINVAL; - - /* Check reserved padding fields */ - for (i = 0; i < sizeof(data->padding); i++) { - if (data->padding[i]) - return -EINVAL; - } - - return 0; -} - -static int iommu_sva_prepare_bind_data(void __user *udata, - struct iommu_gpasid_bind_data *data) -{ - u32 minsz; - - /* - * No new spaces can be added before the variable sized union, the - * minimum size is the offset to the union. - */ - minsz = offsetof(struct iommu_gpasid_bind_data, vendor); - - /* Copy minsz from user to get flags and argsz */ - if (copy_from_user(data, udata, minsz)) - return -EFAULT; - - /* Fields before the variable size union are mandatory */ - if (data->argsz < minsz) - return -EINVAL; - /* - * User might be using a newer UAPI header, we shall let IOMMU vendor - * driver decide on what size it needs. Since the guest PASID bind data - * can be vendor specific, larger argsz could be the result of extension - * for one vendor but it should not affect another vendor. - * Copy the remaining user data _after_ minsz - */ - if (copy_from_user((void *)data + minsz, udata + minsz, - min_t(u32, data->argsz, sizeof(*data)) - minsz)) - return -EFAULT; - - return iommu_check_bind_data(data); -} - -int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, struct device *dev, - void __user *udata) -{ - struct iommu_gpasid_bind_data data = { 0 }; - int ret; - - if (unlikely(!domain->ops->sva_bind_gpasid)) - return -ENODEV; - - ret = iommu_sva_prepare_bind_data(udata, &data); - if (ret) - return ret; - - return domain->ops->sva_bind_gpasid(domain, dev, &data); -} -EXPORT_SYMBOL_GPL(iommu_uapi_sva_bind_gpasid); - -int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, - ioasid_t pasid) -{ - if (unlikely(!domain->ops->sva_unbind_gpasid)) - return -ENODEV; - - return domain->ops->sva_unbind_gpasid(dev, pasid); -} -EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid); - -int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, - void __user *udata) -{ - struct iommu_gpasid_bind_data data = { 0 }; - int ret; - - if (unlikely(!domain->ops->sva_bind_gpasid)) - return -ENODEV; - - ret = iommu_sva_prepare_bind_data(udata, &data); - if (ret) - return ret; - - return iommu_sva_unbind_gpasid(domain, dev, data.hpasid); -} -EXPORT_SYMBOL_GPL(iommu_uapi_sva_unbind_gpasid); - static void __iommu_detach_device(struct iommu_domain *domain, struct device *dev) { diff --git a/include/linux/iommu.h b/include/linux/iommu.h index de0c57a567c8..cde1d0aad60f 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -229,9 +229,6 @@ struct iommu_iotlb_gather { * @sva_unbind: Unbind process address space from device * @sva_get_pasid: Get PASID associated to a SVA handle * @page_response: handle page request response - * @cache_invalidate: invalidate translation caches - * @sva_bind_gpasid: bind guest pasid and mm - * @sva_unbind_gpasid: unbind guest pasid and mm * @def_domain_type: device default domain type, return value: * - IOMMU_DOMAIN_IDENTITY: must use an identity domain * - IOMMU_DOMAIN_DMA: must use a dma domain @@ -301,12 +298,6 @@ struct iommu_ops { int (*page_response)(struct device *dev, struct iommu_fault_event *evt, struct iommu_page_response *msg); - int (*cache_invalidate)(struct iommu_domain *domain, struct device *dev, - struct iommu_cache_invalidate_info *inv_info); - int (*sva_bind_gpasid)(struct iommu_domain *domain, - struct device *dev, struct iommu_gpasid_bind_data *data); - - int (*sva_unbind_gpasid)(struct device *dev, u32 pasid); int (*def_domain_type)(struct device *dev); @@ -421,14 +412,6 @@ extern int iommu_attach_device(struct iommu_domain *domain, struct device *dev); extern void iommu_detach_device(struct iommu_domain *domain, struct device *dev); -extern int iommu_uapi_cache_invalidate(struct iommu_domain *domain, - struct device *dev, - void __user *uinfo); - -extern int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, - struct device *dev, void __user *udata); -extern int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, - struct device *dev, void __user *udata); extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, ioasid_t pasid); extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); @@ -1051,33 +1034,6 @@ static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle) return IOMMU_PASID_INVALID; } -static inline int -iommu_uapi_cache_invalidate(struct iommu_domain *domain, - struct device *dev, - struct iommu_cache_invalidate_info *inv_info) -{ - return -ENODEV; -} - -static inline int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, - struct device *dev, void __user *udata) -{ - return -ENODEV; -} - -static inline int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, - struct device *dev, void __user *udata) -{ - return -ENODEV; -} - -static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain, - struct device *dev, - ioasid_t pasid) -{ - return -ENODEV; -} - static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) { return NULL; diff --git a/include/uapi/linux/iommu.h b/include/uapi/linux/iommu.h index 59178fc229ca..65d8b0234f69 100644 --- a/include/uapi/linux/iommu.h +++ b/include/uapi/linux/iommu.h @@ -158,185 +158,4 @@ struct iommu_page_response { __u32 code; }; -/* defines the granularity of the invalidation */ -enum iommu_inv_granularity { - IOMMU_INV_GRANU_DOMAIN, /* domain-selective invalidation */ - IOMMU_INV_GRANU_PASID, /* PASID-selective invalidation */ - IOMMU_INV_GRANU_ADDR, /* page-selective invalidation */ - IOMMU_INV_GRANU_NR, /* number of invalidation granularities */ -}; - -/** - * struct iommu_inv_addr_info - Address Selective Invalidation Structure - * - * @flags: indicates the granularity of the address-selective invalidation - * - If the PASID bit is set, the @pasid field is populated and the invalidation - * relates to cache entries tagged with this PASID and matching the address - * range. - * - If ARCHID bit is set, @archid is populated and the invalidation relates - * to cache entries tagged with this architecture specific ID and matching - * the address range. - * - Both PASID and ARCHID can be set as they may tag different caches. - * - If neither PASID or ARCHID is set, global addr invalidation applies. - * - The LEAF flag indicates whether only the leaf PTE caching needs to be - * invalidated and other paging structure caches can be preserved. - * @pasid: process address space ID - * @archid: architecture-specific ID - * @addr: first stage/level input address - * @granule_size: page/block size of the mapping in bytes - * @nb_granules: number of contiguous granules to be invalidated - */ -struct iommu_inv_addr_info { -#define IOMMU_INV_ADDR_FLAGS_PASID (1 << 0) -#define IOMMU_INV_ADDR_FLAGS_ARCHID (1 << 1) -#define IOMMU_INV_ADDR_FLAGS_LEAF (1 << 2) - __u32 flags; - __u32 archid; - __u64 pasid; - __u64 addr; - __u64 granule_size; - __u64 nb_granules; -}; - -/** - * struct iommu_inv_pasid_info - PASID Selective Invalidation Structure - * - * @flags: indicates the granularity of the PASID-selective invalidation - * - If the PASID bit is set, the @pasid field is populated and the invalidation - * relates to cache entries tagged with this PASID and matching the address - * range. - * - If the ARCHID bit is set, the @archid is populated and the invalidation - * relates to cache entries tagged with this architecture specific ID and - * matching the address range. - * - Both PASID and ARCHID can be set as they may tag different caches. - * - At least one of PASID or ARCHID must be set. - * @pasid: process address space ID - * @archid: architecture-specific ID - */ -struct iommu_inv_pasid_info { -#define IOMMU_INV_PASID_FLAGS_PASID (1 << 0) -#define IOMMU_INV_PASID_FLAGS_ARCHID (1 << 1) - __u32 flags; - __u32 archid; - __u64 pasid; -}; - -/** - * struct iommu_cache_invalidate_info - First level/stage invalidation - * information - * @argsz: User filled size of this data - * @version: API version of this structure - * @cache: bitfield that allows to select which caches to invalidate - * @granularity: defines the lowest granularity used for the invalidation: - * domain > PASID > addr - * @padding: reserved for future use (should be zero) - * @pasid_info: invalidation data when @granularity is %IOMMU_INV_GRANU_PASID - * @addr_info: invalidation data when @granularity is %IOMMU_INV_GRANU_ADDR - * - * Not all the combinations of cache/granularity are valid: - * - * +--------------+---------------+---------------+---------------+ - * | type / | DEV_IOTLB | IOTLB | PASID | - * | granularity | | | cache | - * +==============+===============+===============+===============+ - * | DOMAIN | N/A | Y | Y | - * +--------------+---------------+---------------+---------------+ - * | PASID | Y | Y | Y | - * +--------------+---------------+---------------+---------------+ - * | ADDR | Y | Y | N/A | - * +--------------+---------------+---------------+---------------+ - * - * Invalidations by %IOMMU_INV_GRANU_DOMAIN don't take any argument other than - * @version and @cache. - * - * If multiple cache types are invalidated simultaneously, they all - * must support the used granularity. - */ -struct iommu_cache_invalidate_info { - __u32 argsz; -#define IOMMU_CACHE_INVALIDATE_INFO_VERSION_1 1 - __u32 version; -/* IOMMU paging structure cache */ -#define IOMMU_CACHE_INV_TYPE_IOTLB (1 << 0) /* IOMMU IOTLB */ -#define IOMMU_CACHE_INV_TYPE_DEV_IOTLB (1 << 1) /* Device IOTLB */ -#define IOMMU_CACHE_INV_TYPE_PASID (1 << 2) /* PASID cache */ -#define IOMMU_CACHE_INV_TYPE_NR (3) - __u8 cache; - __u8 granularity; - __u8 padding[6]; - union { - struct iommu_inv_pasid_info pasid_info; - struct iommu_inv_addr_info addr_info; - } granu; -}; - -/** - * struct iommu_gpasid_bind_data_vtd - Intel VT-d specific data on device and guest - * SVA binding. - * - * @flags: VT-d PASID table entry attributes - * @pat: Page attribute table data to compute effective memory type - * @emt: Extended memory type - * - * Only guest vIOMMU selectable and effective options are passed down to - * the host IOMMU. - */ -struct iommu_gpasid_bind_data_vtd { -#define IOMMU_SVA_VTD_GPASID_SRE (1 << 0) /* supervisor request */ -#define IOMMU_SVA_VTD_GPASID_EAFE (1 << 1) /* extended access enable */ -#define IOMMU_SVA_VTD_GPASID_PCD (1 << 2) /* page-level cache disable */ -#define IOMMU_SVA_VTD_GPASID_PWT (1 << 3) /* page-level write through */ -#define IOMMU_SVA_VTD_GPASID_EMTE (1 << 4) /* extended mem type enable */ -#define IOMMU_SVA_VTD_GPASID_CD (1 << 5) /* PASID-level cache disable */ -#define IOMMU_SVA_VTD_GPASID_WPE (1 << 6) /* Write protect enable */ -#define IOMMU_SVA_VTD_GPASID_LAST (1 << 7) - __u64 flags; - __u32 pat; - __u32 emt; -}; - -#define IOMMU_SVA_VTD_GPASID_MTS_MASK (IOMMU_SVA_VTD_GPASID_CD | \ - IOMMU_SVA_VTD_GPASID_EMTE | \ - IOMMU_SVA_VTD_GPASID_PCD | \ - IOMMU_SVA_VTD_GPASID_PWT) - -/** - * struct iommu_gpasid_bind_data - Information about device and guest PASID binding - * @argsz: User filled size of this data - * @version: Version of this data structure - * @format: PASID table entry format - * @flags: Additional information on guest bind request - * @gpgd: Guest page directory base of the guest mm to bind - * @hpasid: Process address space ID used for the guest mm in host IOMMU - * @gpasid: Process address space ID used for the guest mm in guest IOMMU - * @addr_width: Guest virtual address width - * @padding: Reserved for future use (should be zero) - * @vtd: Intel VT-d specific data - * - * Guest to host PASID mapping can be an identity or non-identity, where guest - * has its own PASID space. For non-identify mapping, guest to host PASID lookup - * is needed when VM programs guest PASID into an assigned device. VMM may - * trap such PASID programming then request host IOMMU driver to convert guest - * PASID to host PASID based on this bind data. - */ -struct iommu_gpasid_bind_data { - __u32 argsz; -#define IOMMU_GPASID_BIND_VERSION_1 1 - __u32 version; -#define IOMMU_PASID_FORMAT_INTEL_VTD 1 -#define IOMMU_PASID_FORMAT_LAST 2 - __u32 format; - __u32 addr_width; -#define IOMMU_SVA_GPASID_VAL (1 << 0) /* guest PASID valid */ - __u64 flags; - __u64 gpgd; - __u64 hpasid; - __u64 gpasid; - __u8 padding[8]; - /* Vendor specific data */ - union { - struct iommu_gpasid_bind_data_vtd vtd; - } vendor; -}; - #endif /* _UAPI_IOMMU_H */ -- cgit v1.2.3-59-g8ed1b From 241469685d8d54075f12a6f3eb0628f85e13ff37 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 16 Feb 2022 10:52:43 +0800 Subject: iommu/vt-d: Remove aux-domain related callbacks The aux-domain related callbacks are not called in the tree. Remove them to avoid dead code. Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20220216025249.3459465-4-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/debugfs.c | 3 +- drivers/iommu/intel/iommu.c | 309 +----------------------------------------- include/linux/intel-iommu.h | 17 --- 3 files changed, 3 insertions(+), 326 deletions(-) diff --git a/drivers/iommu/intel/debugfs.c b/drivers/iommu/intel/debugfs.c index 62e23ff3c987..db7a0ca73626 100644 --- a/drivers/iommu/intel/debugfs.c +++ b/drivers/iommu/intel/debugfs.c @@ -351,8 +351,7 @@ static int show_device_domain_translation(struct device *dev, void *data) if (!domain) return 0; - seq_printf(m, "Device %s with pasid %d @0x%llx\n", - dev_name(dev), domain->default_pasid, + seq_printf(m, "Device %s @0x%llx\n", dev_name(dev), (u64)virt_to_phys(domain->pgd)); seq_puts(m, "IOVA_PFN\t\tPML5E\t\t\tPML4E\t\t\tPDPE\t\t\tPDE\t\t\tPTE\n"); diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 4f9d95067c8f..2b5f4e57a8bb 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -1573,18 +1573,6 @@ static void domain_update_iotlb(struct dmar_domain *domain) break; } - if (!has_iotlb_device) { - struct subdev_domain_info *sinfo; - - list_for_each_entry(sinfo, &domain->subdevices, link_domain) { - info = get_domain_info(sinfo->pdev); - if (info && info->ats_enabled) { - has_iotlb_device = true; - break; - } - } - } - domain->has_iotlb_device = has_iotlb_device; } @@ -1682,7 +1670,6 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain, { unsigned long flags; struct device_domain_info *info; - struct subdev_domain_info *sinfo; if (!domain->has_iotlb_device) return; @@ -1691,27 +1678,9 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain, list_for_each_entry(info, &domain->devices, link) __iommu_flush_dev_iotlb(info, addr, mask); - list_for_each_entry(sinfo, &domain->subdevices, link_domain) { - info = get_domain_info(sinfo->pdev); - __iommu_flush_dev_iotlb(info, addr, mask); - } spin_unlock_irqrestore(&device_domain_lock, flags); } -static void domain_flush_piotlb(struct intel_iommu *iommu, - struct dmar_domain *domain, - u64 addr, unsigned long npages, bool ih) -{ - u16 did = domain->iommu_did[iommu->seq_id]; - - if (domain->default_pasid) - qi_flush_piotlb(iommu, did, domain->default_pasid, - addr, npages, ih); - - if (!list_empty(&domain->devices)) - qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, npages, ih); -} - static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, struct dmar_domain *domain, unsigned long pfn, unsigned int pages, @@ -1727,7 +1696,7 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, ih = 1 << 6; if (domain_use_first_level(domain)) { - domain_flush_piotlb(iommu, domain, addr, pages, ih); + qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, pages, ih); } else { /* * Fallback to domain selective flush if no PSI support or @@ -1776,7 +1745,7 @@ static void intel_flush_iotlb_all(struct iommu_domain *domain) u16 did = dmar_domain->iommu_did[iommu->seq_id]; if (domain_use_first_level(dmar_domain)) - domain_flush_piotlb(iommu, dmar_domain, 0, -1, 0); + qi_flush_piotlb(iommu, did, PASID_RID2PASID, 0, -1, 0); else iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); @@ -1983,7 +1952,6 @@ static struct dmar_domain *alloc_domain(unsigned int type) domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL; domain->has_iotlb_device = false; INIT_LIST_HEAD(&domain->devices); - INIT_LIST_HEAD(&domain->subdevices); return domain; } @@ -2676,8 +2644,6 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, info->domain = domain; info->iommu = iommu; info->pasid_table = NULL; - info->auxd_enabled = 0; - INIT_LIST_HEAD(&info->subdevices); if (dev && dev_is_pci(dev)) { struct pci_dev *pdev = to_pci_dev(info->dev); @@ -4637,183 +4603,6 @@ static void intel_iommu_domain_free(struct iommu_domain *domain) domain_exit(to_dmar_domain(domain)); } -/* - * Check whether a @domain could be attached to the @dev through the - * aux-domain attach/detach APIs. - */ -static inline bool -is_aux_domain(struct device *dev, struct iommu_domain *domain) -{ - struct device_domain_info *info = get_domain_info(dev); - - return info && info->auxd_enabled && - domain->type == IOMMU_DOMAIN_UNMANAGED; -} - -static inline struct subdev_domain_info * -lookup_subdev_info(struct dmar_domain *domain, struct device *dev) -{ - struct subdev_domain_info *sinfo; - - if (!list_empty(&domain->subdevices)) { - list_for_each_entry(sinfo, &domain->subdevices, link_domain) { - if (sinfo->pdev == dev) - return sinfo; - } - } - - return NULL; -} - -static int auxiliary_link_device(struct dmar_domain *domain, - struct device *dev) -{ - struct device_domain_info *info = get_domain_info(dev); - struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev); - - assert_spin_locked(&device_domain_lock); - if (WARN_ON(!info)) - return -EINVAL; - - if (!sinfo) { - sinfo = kzalloc(sizeof(*sinfo), GFP_ATOMIC); - if (!sinfo) - return -ENOMEM; - sinfo->domain = domain; - sinfo->pdev = dev; - list_add(&sinfo->link_phys, &info->subdevices); - list_add(&sinfo->link_domain, &domain->subdevices); - } - - return ++sinfo->users; -} - -static int auxiliary_unlink_device(struct dmar_domain *domain, - struct device *dev) -{ - struct device_domain_info *info = get_domain_info(dev); - struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev); - int ret; - - assert_spin_locked(&device_domain_lock); - if (WARN_ON(!info || !sinfo || sinfo->users <= 0)) - return -EINVAL; - - ret = --sinfo->users; - if (!ret) { - list_del(&sinfo->link_phys); - list_del(&sinfo->link_domain); - kfree(sinfo); - } - - return ret; -} - -static int aux_domain_add_dev(struct dmar_domain *domain, - struct device *dev) -{ - int ret; - unsigned long flags; - struct intel_iommu *iommu; - - iommu = device_to_iommu(dev, NULL, NULL); - if (!iommu) - return -ENODEV; - - if (domain->default_pasid <= 0) { - u32 pasid; - - /* No private data needed for the default pasid */ - pasid = ioasid_alloc(NULL, PASID_MIN, - pci_max_pasids(to_pci_dev(dev)) - 1, - NULL); - if (pasid == INVALID_IOASID) { - pr_err("Can't allocate default pasid\n"); - return -ENODEV; - } - domain->default_pasid = pasid; - } - - spin_lock_irqsave(&device_domain_lock, flags); - ret = auxiliary_link_device(domain, dev); - if (ret <= 0) - goto link_failed; - - /* - * Subdevices from the same physical device can be attached to the - * same domain. For such cases, only the first subdevice attachment - * needs to go through the full steps in this function. So if ret > - * 1, just goto out. - */ - if (ret > 1) - goto out; - - /* - * iommu->lock must be held to attach domain to iommu and setup the - * pasid entry for second level translation. - */ - spin_lock(&iommu->lock); - ret = domain_attach_iommu(domain, iommu); - if (ret) - goto attach_failed; - - /* Setup the PASID entry for mediated devices: */ - if (domain_use_first_level(domain)) - ret = domain_setup_first_level(iommu, domain, dev, - domain->default_pasid); - else - ret = intel_pasid_setup_second_level(iommu, domain, dev, - domain->default_pasid); - if (ret) - goto table_failed; - - spin_unlock(&iommu->lock); -out: - spin_unlock_irqrestore(&device_domain_lock, flags); - - return 0; - -table_failed: - domain_detach_iommu(domain, iommu); -attach_failed: - spin_unlock(&iommu->lock); - auxiliary_unlink_device(domain, dev); -link_failed: - spin_unlock_irqrestore(&device_domain_lock, flags); - if (list_empty(&domain->subdevices) && domain->default_pasid > 0) - ioasid_put(domain->default_pasid); - - return ret; -} - -static void aux_domain_remove_dev(struct dmar_domain *domain, - struct device *dev) -{ - struct device_domain_info *info; - struct intel_iommu *iommu; - unsigned long flags; - - if (!is_aux_domain(dev, &domain->domain)) - return; - - spin_lock_irqsave(&device_domain_lock, flags); - info = get_domain_info(dev); - iommu = info->iommu; - - if (!auxiliary_unlink_device(domain, dev)) { - spin_lock(&iommu->lock); - intel_pasid_tear_down_entry(iommu, dev, - domain->default_pasid, false); - domain_detach_iommu(domain, iommu); - spin_unlock(&iommu->lock); - } - - spin_unlock_irqrestore(&device_domain_lock, flags); - - if (list_empty(&domain->subdevices) && domain->default_pasid > 0) - ioasid_put(domain->default_pasid); -} - static int prepare_domain_attach_device(struct iommu_domain *domain, struct device *dev) { @@ -4866,9 +4655,6 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, return -EPERM; } - if (is_aux_domain(dev, domain)) - return -EPERM; - /* normally dev is not mapped */ if (unlikely(domain_context_mapped(dev))) { struct dmar_domain *old_domain; @@ -4885,33 +4671,12 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, return domain_add_dev_info(to_dmar_domain(domain), dev); } -static int intel_iommu_aux_attach_device(struct iommu_domain *domain, - struct device *dev) -{ - int ret; - - if (!is_aux_domain(dev, domain)) - return -EPERM; - - ret = prepare_domain_attach_device(domain, dev); - if (ret) - return ret; - - return aux_domain_add_dev(to_dmar_domain(domain), dev); -} - static void intel_iommu_detach_device(struct iommu_domain *domain, struct device *dev) { dmar_remove_one_dev_info(dev); } -static void intel_iommu_aux_detach_device(struct iommu_domain *domain, - struct device *dev) -{ - aux_domain_remove_dev(to_dmar_domain(domain), dev); -} - static int intel_iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t hpa, size_t size, int iommu_prot, gfp_t gfp) @@ -5205,46 +4970,6 @@ static struct iommu_group *intel_iommu_device_group(struct device *dev) return generic_device_group(dev); } -static int intel_iommu_enable_auxd(struct device *dev) -{ - struct device_domain_info *info; - struct intel_iommu *iommu; - unsigned long flags; - int ret; - - iommu = device_to_iommu(dev, NULL, NULL); - if (!iommu || dmar_disabled) - return -EINVAL; - - if (!sm_supported(iommu) || !pasid_supported(iommu)) - return -EINVAL; - - ret = intel_iommu_enable_pasid(iommu, dev); - if (ret) - return -ENODEV; - - spin_lock_irqsave(&device_domain_lock, flags); - info = get_domain_info(dev); - info->auxd_enabled = 1; - spin_unlock_irqrestore(&device_domain_lock, flags); - - return 0; -} - -static int intel_iommu_disable_auxd(struct device *dev) -{ - struct device_domain_info *info; - unsigned long flags; - - spin_lock_irqsave(&device_domain_lock, flags); - info = get_domain_info(dev); - if (!WARN_ON(!info)) - info->auxd_enabled = 0; - spin_unlock_irqrestore(&device_domain_lock, flags); - - return 0; -} - static int intel_iommu_enable_sva(struct device *dev) { struct device_domain_info *info = get_domain_info(dev); @@ -5301,9 +5026,6 @@ static int intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat) { switch (feat) { - case IOMMU_DEV_FEAT_AUX: - return intel_iommu_enable_auxd(dev); - case IOMMU_DEV_FEAT_IOPF: return intel_iommu_enable_iopf(dev); @@ -5319,9 +5041,6 @@ static int intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat) { switch (feat) { - case IOMMU_DEV_FEAT_AUX: - return intel_iommu_disable_auxd(dev); - case IOMMU_DEV_FEAT_IOPF: return 0; @@ -5333,26 +5052,6 @@ intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat) } } -static bool -intel_iommu_dev_feat_enabled(struct device *dev, enum iommu_dev_features feat) -{ - struct device_domain_info *info = get_domain_info(dev); - - if (feat == IOMMU_DEV_FEAT_AUX) - return scalable_mode_support() && info && info->auxd_enabled; - - return false; -} - -static int -intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) -{ - struct dmar_domain *dmar_domain = to_dmar_domain(domain); - - return dmar_domain->default_pasid > 0 ? - dmar_domain->default_pasid : -EINVAL; -} - static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain, struct device *dev) { @@ -5397,9 +5096,6 @@ const struct iommu_ops intel_iommu_ops = { .domain_free = intel_iommu_domain_free, .attach_dev = intel_iommu_attach_device, .detach_dev = intel_iommu_detach_device, - .aux_attach_dev = intel_iommu_aux_attach_device, - .aux_detach_dev = intel_iommu_aux_detach_device, - .aux_get_pasid = intel_iommu_aux_get_pasid, .map_pages = intel_iommu_map_pages, .unmap_pages = intel_iommu_unmap_pages, .iotlb_sync_map = intel_iommu_iotlb_sync_map, @@ -5412,7 +5108,6 @@ const struct iommu_ops intel_iommu_ops = { .get_resv_regions = intel_iommu_get_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions, .device_group = intel_iommu_device_group, - .dev_feat_enabled = intel_iommu_dev_feat_enabled, .dev_enable_feat = intel_iommu_dev_enable_feat, .dev_disable_feat = intel_iommu_dev_disable_feat, .is_attach_deferred = intel_iommu_is_attach_deferred, diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index beaacb589abd..5cfda90b2cca 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -542,7 +542,6 @@ struct dmar_domain { u8 iommu_snooping: 1; /* indicate snooping control feature */ struct list_head devices; /* all devices' list */ - struct list_head subdevices; /* all subdevices' list */ struct iova_domain iovad; /* iova's that belong to this domain */ struct dma_pte *pgd; /* virtual address */ @@ -557,11 +556,6 @@ struct dmar_domain { 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */ u64 max_addr; /* maximum mapped address */ - u32 default_pasid; /* - * The default pasid used for non-SVM - * traffic on mediated devices. - */ - struct iommu_domain domain; /* generic domain data structure for iommu core */ }; @@ -614,21 +608,11 @@ struct intel_iommu { void *perf_statistic; }; -/* Per subdevice private data */ -struct subdev_domain_info { - struct list_head link_phys; /* link to phys device siblings */ - struct list_head link_domain; /* link to domain siblings */ - struct device *pdev; /* physical device derived from */ - struct dmar_domain *domain; /* aux-domain */ - int users; /* user count */ -}; - /* PCI domain-device relationship */ struct device_domain_info { struct list_head link; /* link to domain siblings */ struct list_head global; /* link to global list */ struct list_head table; /* link to pasid table */ - struct list_head subdevices; /* subdevices sibling */ u32 segment; /* PCI segment number */ u8 bus; /* PCI bus number */ u8 devfn; /* PCI devfn number */ @@ -639,7 +623,6 @@ struct device_domain_info { u8 pri_enabled:1; u8 ats_supported:1; u8 ats_enabled:1; - u8 auxd_enabled:1; /* Multiple domains per device */ u8 ats_qdep; struct device *dev; /* it's NULL for PCIe-to-PCI bridge */ struct intel_iommu *iommu; /* IOMMU used by this device */ -- cgit v1.2.3-59-g8ed1b From 8652d875939b6a1fe6d5c93cf4fb91df61cda5a7 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 16 Feb 2022 10:52:44 +0800 Subject: iommu: Remove aux-domain related interfaces and iommu_ops The aux-domain related interfaces and iommu_ops are not referenced anywhere in the tree. We've also reached a consensus to redesign it based the new iommufd framework. Remove them to avoid dead code. Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20220216025249.3459465-5-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 46 ---------------------------------------------- include/linux/iommu.h | 29 ----------------------------- 2 files changed, 75 deletions(-) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 3cbf4781e5bd..0ebaf561a70e 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2749,8 +2749,6 @@ EXPORT_SYMBOL_GPL(iommu_dev_enable_feature); /* * The device drivers should do the necessary cleanups before calling this. - * For example, before disabling the aux-domain feature, the device driver - * should detach all aux-domains. Otherwise, this will return -EBUSY. */ int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) { @@ -2778,50 +2776,6 @@ bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat) } EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled); -/* - * Aux-domain specific attach/detach. - * - * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns - * true. Also, as long as domains are attached to a device through this - * interface, any tries to call iommu_attach_device() should fail - * (iommu_detach_device() can't fail, so we fail when trying to re-attach). - * This should make us safe against a device being attached to a guest as a - * whole while there are still pasid users on it (aux and sva). - */ -int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev) -{ - int ret = -ENODEV; - - if (domain->ops->aux_attach_dev) - ret = domain->ops->aux_attach_dev(domain, dev); - - if (!ret) - trace_attach_device_to_domain(dev); - - return ret; -} -EXPORT_SYMBOL_GPL(iommu_aux_attach_device); - -void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev) -{ - if (domain->ops->aux_detach_dev) { - domain->ops->aux_detach_dev(domain, dev); - trace_detach_device_from_domain(dev); - } -} -EXPORT_SYMBOL_GPL(iommu_aux_detach_device); - -int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) -{ - int ret = -ENODEV; - - if (domain->ops->aux_get_pasid) - ret = domain->ops->aux_get_pasid(domain, dev); - - return ret; -} -EXPORT_SYMBOL_GPL(iommu_aux_get_pasid); - /** * iommu_sva_bind_device() - Bind a process address space to a device * @dev: the device diff --git a/include/linux/iommu.h b/include/linux/iommu.h index cde1d0aad60f..9983a01373b2 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -144,7 +144,6 @@ struct iommu_resv_region { /** * enum iommu_dev_features - Per device IOMMU features - * @IOMMU_DEV_FEAT_AUX: Auxiliary domain feature * @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses * @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally * enabling %IOMMU_DEV_FEAT_SVA requires @@ -157,7 +156,6 @@ struct iommu_resv_region { * iommu_dev_has_feature(), and enable it using iommu_dev_enable_feature(). */ enum iommu_dev_features { - IOMMU_DEV_FEAT_AUX, IOMMU_DEV_FEAT_SVA, IOMMU_DEV_FEAT_IOPF, }; @@ -223,8 +221,6 @@ struct iommu_iotlb_gather { * @dev_has/enable/disable_feat: per device entries to check/enable/disable * iommu specific features. * @dev_feat_enabled: check enabled feature - * @aux_attach/detach_dev: aux-domain specific attach/detach entries. - * @aux_get_pasid: get the pasid given an aux-domain * @sva_bind: Bind process address space to device * @sva_unbind: Unbind process address space from device * @sva_get_pasid: Get PASID associated to a SVA handle @@ -285,11 +281,6 @@ struct iommu_ops { int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f); int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f); - /* Aux-domain specific attach/detach entries */ - int (*aux_attach_dev)(struct iommu_domain *domain, struct device *dev); - void (*aux_detach_dev)(struct iommu_domain *domain, struct device *dev); - int (*aux_get_pasid)(struct iommu_domain *domain, struct device *dev); - struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm, void *drvdata); void (*sva_unbind)(struct iommu_sva *handle); @@ -655,9 +646,6 @@ void iommu_release_device(struct device *dev); int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f); int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f); bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f); -int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev); -void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev); -int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev); struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, @@ -1002,23 +990,6 @@ iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) return -ENODEV; } -static inline int -iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev) -{ - return -ENODEV; -} - -static inline void -iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev) -{ -} - -static inline int -iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) -{ - return -ENODEV; -} - static inline struct iommu_sva * iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata) { -- cgit v1.2.3-59-g8ed1b From 71fe30698dc3be6fd277c49c0c7b220c7ae92ffd Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 16 Feb 2022 10:52:45 +0800 Subject: iommu: Remove apply_resv_region The apply_resv_region callback in iommu_ops was introduced to reserve an IOVA range in the given DMA domain when the IOMMU driver manages the IOVA by itself. As all drivers converted to use dma-iommu in the core, there's no driver using this anymore. Remove it to avoid dead code. Suggested-by: Robin Murphy Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20220216025249.3459465-6-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 3 --- include/linux/iommu.h | 4 ---- 2 files changed, 7 deletions(-) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 0ebaf561a70e..7cf073c56036 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -790,9 +790,6 @@ static int iommu_create_device_direct_mappings(struct iommu_group *group, dma_addr_t start, end, addr; size_t map_size = 0; - if (domain->ops->apply_resv_region) - domain->ops->apply_resv_region(dev, domain, entry); - start = ALIGN(entry->start, pg_size); end = ALIGN(entry->start + entry->length, pg_size); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 9983a01373b2..9ffa2e88f3d0 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -214,7 +214,6 @@ struct iommu_iotlb_gather { * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*) * @get_resv_regions: Request list of reserved regions for a device * @put_resv_regions: Free list of reserved regions for a device - * @apply_resv_region: Temporary helper call-back for iova reserved ranges * @of_xlate: add OF master IDs to iommu grouping * @is_attach_deferred: Check if domain attach should be deferred from iommu * driver init to device driver init (default no) @@ -268,9 +267,6 @@ struct iommu_ops { /* Request/Free a list of reserved regions for a device */ void (*get_resv_regions)(struct device *dev, struct list_head *list); void (*put_resv_regions)(struct device *dev, struct list_head *list); - void (*apply_resv_region)(struct device *dev, - struct iommu_domain *domain, - struct iommu_resv_region *region); int (*of_xlate)(struct device *dev, struct of_phandle_args *args); bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev); -- cgit v1.2.3-59-g8ed1b From 7eef7f670086f06b3a461f1b4d1e84f793ed4861 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 16 Feb 2022 10:52:46 +0800 Subject: drm/nouveau/device: Get right pgsize_bitmap of iommu_domain The supported page sizes of an iommu_domain are saved in the pgsize_bitmap field. Retrieve the value from the right place. Signed-off-by: Lu Baolu Reviewed-by: Robin Murphy Link: https://lore.kernel.org/r/20211218074546.1772553-1-baolu.lu@linux.intel.com Reviewed-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20220216025249.3459465-7-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c index d0d52c1d4aee..992cc285f2fe 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c @@ -133,7 +133,7 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev) * or equal to the system's PAGE_SIZE, with a preference if * both are equal. */ - pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap; + pgsize_bitmap = tdev->iommu.domain->pgsize_bitmap; if (pgsize_bitmap & PAGE_SIZE) { tdev->iommu.pgshift = PAGE_SHIFT; } else { -- cgit v1.2.3-59-g8ed1b From 3f6634d997dba8140b3a7cba01776b9638d70dff Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 16 Feb 2022 10:52:47 +0800 Subject: iommu: Use right way to retrieve iommu_ops The common iommu_ops is hooked to both device and domain. When a helper has both device and domain pointer, the way to get the iommu_ops looks messy in iommu core. This sorts out the way to get iommu_ops. The device related helpers go through device pointer, while the domain related ones go through domain pointer. Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20220216025249.3459465-8-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 50 +++++++++++++++++++++++++------------------------- include/linux/iommu.h | 11 +++++++++++ 2 files changed, 36 insertions(+), 25 deletions(-) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 7cf073c56036..7af0ee670deb 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -323,13 +323,14 @@ err_out: void iommu_release_device(struct device *dev) { - const struct iommu_ops *ops = dev->bus->iommu_ops; + const struct iommu_ops *ops; if (!dev->iommu) return; iommu_device_unlink(dev->iommu->iommu_dev, dev); + ops = dev_iommu_ops(dev); ops->release_device(dev); iommu_group_remove_device(dev); @@ -833,8 +834,10 @@ out: static bool iommu_is_attach_deferred(struct iommu_domain *domain, struct device *dev) { - if (domain->ops->is_attach_deferred) - return domain->ops->is_attach_deferred(domain, dev); + const struct iommu_ops *ops = dev_iommu_ops(dev); + + if (ops->is_attach_deferred) + return ops->is_attach_deferred(domain, dev); return false; } @@ -1252,10 +1255,10 @@ int iommu_page_response(struct device *dev, struct iommu_fault_event *evt; struct iommu_fault_page_request *prm; struct dev_iommu *param = dev->iommu; + const struct iommu_ops *ops = dev_iommu_ops(dev); bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID; - struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - if (!domain || !domain->ops->page_response) + if (!ops->page_response) return -ENODEV; if (!param || !param->fault_param) @@ -1296,7 +1299,7 @@ int iommu_page_response(struct device *dev, msg->pasid = 0; } - ret = domain->ops->page_response(dev, evt, msg); + ret = ops->page_response(dev, evt, msg); list_del(&evt->list); kfree(evt); break; @@ -1521,7 +1524,7 @@ EXPORT_SYMBOL_GPL(fsl_mc_device_group); static int iommu_get_def_domain_type(struct device *dev) { - const struct iommu_ops *ops = dev->bus->iommu_ops; + const struct iommu_ops *ops = dev_iommu_ops(dev); if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted) return IOMMU_DOMAIN_DMA; @@ -1580,7 +1583,7 @@ static int iommu_alloc_default_domain(struct iommu_group *group, */ static struct iommu_group *iommu_group_get_for_dev(struct device *dev) { - const struct iommu_ops *ops = dev->bus->iommu_ops; + const struct iommu_ops *ops = dev_iommu_ops(dev); struct iommu_group *group; int ret; @@ -1588,9 +1591,6 @@ static struct iommu_group *iommu_group_get_for_dev(struct device *dev) if (group) return group; - if (!ops) - return ERR_PTR(-EINVAL); - group = ops->device_group(dev); if (WARN_ON_ONCE(group == NULL)) return ERR_PTR(-EINVAL); @@ -1759,10 +1759,10 @@ static int __iommu_group_dma_attach(struct iommu_group *group) static int iommu_group_do_probe_finalize(struct device *dev, void *data) { - struct iommu_domain *domain = data; + const struct iommu_ops *ops = dev_iommu_ops(dev); - if (domain->ops->probe_finalize) - domain->ops->probe_finalize(dev); + if (ops->probe_finalize) + ops->probe_finalize(dev); return 0; } @@ -2020,7 +2020,7 @@ EXPORT_SYMBOL_GPL(iommu_attach_device); int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) { - const struct iommu_ops *ops = domain->ops; + const struct iommu_ops *ops = dev_iommu_ops(dev); if (ops->is_attach_deferred && ops->is_attach_deferred(domain, dev)) return __iommu_attach_device(domain, dev); @@ -2579,17 +2579,17 @@ EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks); void iommu_get_resv_regions(struct device *dev, struct list_head *list) { - const struct iommu_ops *ops = dev->bus->iommu_ops; + const struct iommu_ops *ops = dev_iommu_ops(dev); - if (ops && ops->get_resv_regions) + if (ops->get_resv_regions) ops->get_resv_regions(dev, list); } void iommu_put_resv_regions(struct device *dev, struct list_head *list) { - const struct iommu_ops *ops = dev->bus->iommu_ops; + const struct iommu_ops *ops = dev_iommu_ops(dev); - if (ops && ops->put_resv_regions) + if (ops->put_resv_regions) ops->put_resv_regions(dev, list); } @@ -2794,9 +2794,9 @@ iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata) { struct iommu_group *group; struct iommu_sva *handle = ERR_PTR(-EINVAL); - const struct iommu_ops *ops = dev->bus->iommu_ops; + const struct iommu_ops *ops = dev_iommu_ops(dev); - if (!ops || !ops->sva_bind) + if (!ops->sva_bind) return ERR_PTR(-ENODEV); group = iommu_group_get(dev); @@ -2837,9 +2837,9 @@ void iommu_sva_unbind_device(struct iommu_sva *handle) { struct iommu_group *group; struct device *dev = handle->dev; - const struct iommu_ops *ops = dev->bus->iommu_ops; + const struct iommu_ops *ops = dev_iommu_ops(dev); - if (!ops || !ops->sva_unbind) + if (!ops->sva_unbind) return; group = iommu_group_get(dev); @@ -2856,9 +2856,9 @@ EXPORT_SYMBOL_GPL(iommu_sva_unbind_device); u32 iommu_sva_get_pasid(struct iommu_sva *handle) { - const struct iommu_ops *ops = handle->dev->bus->iommu_ops; + const struct iommu_ops *ops = dev_iommu_ops(handle->dev); - if (!ops || !ops->sva_get_pasid) + if (!ops->sva_get_pasid) return IOMMU_PASID_INVALID; return ops->sva_get_pasid(handle); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 9ffa2e88f3d0..90f1b5e3809d 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -381,6 +381,17 @@ static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) }; } +static inline const struct iommu_ops *dev_iommu_ops(struct device *dev) +{ + /* + * Assume that valid ops must be installed if iommu_probe_device() + * has succeeded. The device ops are essentially for internal use + * within the IOMMU subsystem itself, so we should be able to trust + * ourselves not to misuse the helper. + */ + return dev->iommu->iommu_dev->ops; +} + #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */ #define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */ -- cgit v1.2.3-59-g8ed1b From 41bb23e70b50b89b6137cbecd37009d782454860 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 16 Feb 2022 10:52:48 +0800 Subject: iommu: Remove unused argument in is_attach_deferred The is_attach_deferred iommu_ops callback is a device op. The domain argument is unnecessary and never used. Remove it to make code clean. Suggested-by: Robin Murphy Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20220216025249.3459465-9-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/amd/amd_iommu.h | 3 +-- drivers/iommu/amd/iommu.c | 3 +-- drivers/iommu/amd/iommu_v2.c | 2 +- drivers/iommu/intel/iommu.c | 3 +-- drivers/iommu/iommu.c | 15 ++++++--------- include/linux/iommu.h | 2 +- 6 files changed, 11 insertions(+), 17 deletions(-) diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index 416815a525d6..3b2f06b7aca6 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -116,8 +116,7 @@ void amd_iommu_domain_clr_pt_root(struct protection_domain *domain) extern bool translation_pre_enabled(struct amd_iommu *iommu); -extern bool amd_iommu_is_attach_deferred(struct iommu_domain *domain, - struct device *dev); +extern bool amd_iommu_is_attach_deferred(struct device *dev); extern int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line); diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 461f1844ed1f..37f2fbb4b129 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2215,8 +2215,7 @@ static void amd_iommu_get_resv_regions(struct device *dev, list_add_tail(®ion->list, head); } -bool amd_iommu_is_attach_deferred(struct iommu_domain *domain, - struct device *dev) +bool amd_iommu_is_attach_deferred(struct device *dev) { struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c index 58da08cc3d01..7c94ec05d289 100644 --- a/drivers/iommu/amd/iommu_v2.c +++ b/drivers/iommu/amd/iommu_v2.c @@ -537,7 +537,7 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data) ret = NOTIFY_DONE; /* In kdump kernel pci dev is not initialized yet -> send INVALID */ - if (amd_iommu_is_attach_deferred(NULL, &pdev->dev)) { + if (amd_iommu_is_attach_deferred(&pdev->dev)) { amd_iommu_complete_ppr(pdev, iommu_fault->pasid, PPR_INVALID, tag); goto out; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 2b5f4e57a8bb..80f1294be634 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -5052,8 +5052,7 @@ intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat) } } -static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain, - struct device *dev) +static bool intel_iommu_is_attach_deferred(struct device *dev) { return attach_deferred(dev); } diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 7af0ee670deb..27276421d253 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -831,13 +831,12 @@ out: return ret; } -static bool iommu_is_attach_deferred(struct iommu_domain *domain, - struct device *dev) +static bool iommu_is_attach_deferred(struct device *dev) { const struct iommu_ops *ops = dev_iommu_ops(dev); if (ops->is_attach_deferred) - return ops->is_attach_deferred(domain, dev); + return ops->is_attach_deferred(dev); return false; } @@ -894,7 +893,7 @@ rename: mutex_lock(&group->mutex); list_add_tail(&device->list, &group->devices); - if (group->domain && !iommu_is_attach_deferred(group->domain, dev)) + if (group->domain && !iommu_is_attach_deferred(dev)) ret = __iommu_attach_device(group->domain, dev); mutex_unlock(&group->mutex); if (ret) @@ -1745,7 +1744,7 @@ static int iommu_group_do_dma_attach(struct device *dev, void *data) struct iommu_domain *domain = data; int ret = 0; - if (!iommu_is_attach_deferred(domain, dev)) + if (!iommu_is_attach_deferred(dev)) ret = __iommu_attach_device(domain, dev); return ret; @@ -2020,9 +2019,7 @@ EXPORT_SYMBOL_GPL(iommu_attach_device); int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) { - const struct iommu_ops *ops = dev_iommu_ops(dev); - - if (ops->is_attach_deferred && ops->is_attach_deferred(domain, dev)) + if (iommu_is_attach_deferred(dev)) return __iommu_attach_device(domain, dev); return 0; @@ -2031,7 +2028,7 @@ int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) static void __iommu_detach_device(struct iommu_domain *domain, struct device *dev) { - if (iommu_is_attach_deferred(domain, dev)) + if (iommu_is_attach_deferred(dev)) return; if (unlikely(domain->ops->detach_dev == NULL)) diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 90f1b5e3809d..1ded6076a75c 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -269,7 +269,7 @@ struct iommu_ops { void (*put_resv_regions)(struct device *dev, struct list_head *list); int (*of_xlate)(struct device *dev, struct of_phandle_args *args); - bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev); + bool (*is_attach_deferred)(struct device *dev); /* Per device IOMMU features */ bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f); -- cgit v1.2.3-59-g8ed1b From 9a630a4b41a2639b65d024a5d2d88ed3ecca130a Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 16 Feb 2022 10:52:49 +0800 Subject: iommu: Split struct iommu_ops Move the domain specific operations out of struct iommu_ops into a new structure that only has domain specific operations. This solves the problem of needing to know if the method vector for a given operation needs to be retrieved from the device or the domain. Logically the domain ops are the ones that make sense for external subsystems and endpoint drivers to use, while device ops, with the sole exception of domain_alloc, are IOMMU API internals. Signed-off-by: Lu Baolu Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20220216025249.3459465-10-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/amd/iommu.c | 20 ++++--- drivers/iommu/apple-dart.c | 20 ++++--- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 18 +++--- drivers/iommu/arm/arm-smmu/arm-smmu.c | 20 ++++--- drivers/iommu/arm/arm-smmu/qcom_iommu.c | 18 +++--- drivers/iommu/exynos-iommu.c | 14 +++-- drivers/iommu/fsl_pamu_domain.c | 10 ++-- drivers/iommu/intel/iommu.c | 20 ++++--- drivers/iommu/iommu.c | 19 +++--- drivers/iommu/ipmmu-vmsa.c | 18 +++--- drivers/iommu/msm_iommu.c | 30 +++++----- drivers/iommu/mtk_iommu.c | 20 ++++--- drivers/iommu/mtk_iommu_v1.c | 14 +++-- drivers/iommu/omap-iommu.c | 14 +++-- drivers/iommu/rockchip-iommu.c | 14 +++-- drivers/iommu/s390-iommu.c | 14 +++-- drivers/iommu/sprd-iommu.c | 18 +++--- drivers/iommu/sun50i-iommu.c | 18 +++--- drivers/iommu/tegra-gart.c | 18 +++--- drivers/iommu/tegra-smmu.c | 14 +++-- drivers/iommu/virtio-iommu.c | 14 +++-- include/linux/iommu.h | 91 +++++++++++++++++------------ 22 files changed, 256 insertions(+), 200 deletions(-) diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 37f2fbb4b129..2bed113c4e6d 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2268,13 +2268,6 @@ static int amd_iommu_def_domain_type(struct device *dev) const struct iommu_ops amd_iommu_ops = { .capable = amd_iommu_capable, .domain_alloc = amd_iommu_domain_alloc, - .domain_free = amd_iommu_domain_free, - .attach_dev = amd_iommu_attach_device, - .detach_dev = amd_iommu_detach_device, - .map = amd_iommu_map, - .iotlb_sync_map = amd_iommu_iotlb_sync_map, - .unmap = amd_iommu_unmap, - .iova_to_phys = amd_iommu_iova_to_phys, .probe_device = amd_iommu_probe_device, .release_device = amd_iommu_release_device, .probe_finalize = amd_iommu_probe_finalize, @@ -2283,9 +2276,18 @@ const struct iommu_ops amd_iommu_ops = { .put_resv_regions = generic_iommu_put_resv_regions, .is_attach_deferred = amd_iommu_is_attach_deferred, .pgsize_bitmap = AMD_IOMMU_PGSIZES, - .flush_iotlb_all = amd_iommu_flush_iotlb_all, - .iotlb_sync = amd_iommu_iotlb_sync, .def_domain_type = amd_iommu_def_domain_type, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = amd_iommu_attach_device, + .detach_dev = amd_iommu_detach_device, + .map = amd_iommu_map, + .unmap = amd_iommu_unmap, + .iotlb_sync_map = amd_iommu_iotlb_sync_map, + .iova_to_phys = amd_iommu_iova_to_phys, + .flush_iotlb_all = amd_iommu_flush_iotlb_all, + .iotlb_sync = amd_iommu_iotlb_sync, + .free = amd_iommu_domain_free, + } }; /***************************************************************************** diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c index 565ef5598811..decafb07ad08 100644 --- a/drivers/iommu/apple-dart.c +++ b/drivers/iommu/apple-dart.c @@ -765,15 +765,6 @@ static void apple_dart_get_resv_regions(struct device *dev, static const struct iommu_ops apple_dart_iommu_ops = { .domain_alloc = apple_dart_domain_alloc, - .domain_free = apple_dart_domain_free, - .attach_dev = apple_dart_attach_dev, - .detach_dev = apple_dart_detach_dev, - .map_pages = apple_dart_map_pages, - .unmap_pages = apple_dart_unmap_pages, - .flush_iotlb_all = apple_dart_flush_iotlb_all, - .iotlb_sync = apple_dart_iotlb_sync, - .iotlb_sync_map = apple_dart_iotlb_sync_map, - .iova_to_phys = apple_dart_iova_to_phys, .probe_device = apple_dart_probe_device, .release_device = apple_dart_release_device, .device_group = apple_dart_device_group, @@ -782,6 +773,17 @@ static const struct iommu_ops apple_dart_iommu_ops = { .get_resv_regions = apple_dart_get_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions, .pgsize_bitmap = -1UL, /* Restricted during dart probe */ + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = apple_dart_attach_dev, + .detach_dev = apple_dart_detach_dev, + .map_pages = apple_dart_map_pages, + .unmap_pages = apple_dart_unmap_pages, + .flush_iotlb_all = apple_dart_flush_iotlb_all, + .iotlb_sync = apple_dart_iotlb_sync, + .iotlb_sync_map = apple_dart_iotlb_sync_map, + .iova_to_phys = apple_dart_iova_to_phys, + .free = apple_dart_domain_free, + } }; static irqreturn_t apple_dart_irq(int irq, void *dev) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 6dc6d8b6b368..fd49282c03a3 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -2841,17 +2841,9 @@ static int arm_smmu_dev_disable_feature(struct device *dev, static struct iommu_ops arm_smmu_ops = { .capable = arm_smmu_capable, .domain_alloc = arm_smmu_domain_alloc, - .domain_free = arm_smmu_domain_free, - .attach_dev = arm_smmu_attach_dev, - .map_pages = arm_smmu_map_pages, - .unmap_pages = arm_smmu_unmap_pages, - .flush_iotlb_all = arm_smmu_flush_iotlb_all, - .iotlb_sync = arm_smmu_iotlb_sync, - .iova_to_phys = arm_smmu_iova_to_phys, .probe_device = arm_smmu_probe_device, .release_device = arm_smmu_release_device, .device_group = arm_smmu_device_group, - .enable_nesting = arm_smmu_enable_nesting, .of_xlate = arm_smmu_of_xlate, .get_resv_regions = arm_smmu_get_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions, @@ -2865,6 +2857,16 @@ static struct iommu_ops arm_smmu_ops = { .page_response = arm_smmu_page_response, .pgsize_bitmap = -1UL, /* Restricted during device attach */ .owner = THIS_MODULE, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = arm_smmu_attach_dev, + .map_pages = arm_smmu_map_pages, + .unmap_pages = arm_smmu_unmap_pages, + .flush_iotlb_all = arm_smmu_flush_iotlb_all, + .iotlb_sync = arm_smmu_iotlb_sync, + .iova_to_phys = arm_smmu_iova_to_phys, + .enable_nesting = arm_smmu_enable_nesting, + .free = arm_smmu_domain_free, + } }; /* Probing and initialisation functions */ diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index 4bc75c4ce402..41321fb47152 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -1583,25 +1583,27 @@ static int arm_smmu_def_domain_type(struct device *dev) static struct iommu_ops arm_smmu_ops = { .capable = arm_smmu_capable, .domain_alloc = arm_smmu_domain_alloc, - .domain_free = arm_smmu_domain_free, - .attach_dev = arm_smmu_attach_dev, - .map_pages = arm_smmu_map_pages, - .unmap_pages = arm_smmu_unmap_pages, - .flush_iotlb_all = arm_smmu_flush_iotlb_all, - .iotlb_sync = arm_smmu_iotlb_sync, - .iova_to_phys = arm_smmu_iova_to_phys, .probe_device = arm_smmu_probe_device, .release_device = arm_smmu_release_device, .probe_finalize = arm_smmu_probe_finalize, .device_group = arm_smmu_device_group, - .enable_nesting = arm_smmu_enable_nesting, - .set_pgtable_quirks = arm_smmu_set_pgtable_quirks, .of_xlate = arm_smmu_of_xlate, .get_resv_regions = arm_smmu_get_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions, .def_domain_type = arm_smmu_def_domain_type, .pgsize_bitmap = -1UL, /* Restricted during device attach */ .owner = THIS_MODULE, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = arm_smmu_attach_dev, + .map_pages = arm_smmu_map_pages, + .unmap_pages = arm_smmu_unmap_pages, + .flush_iotlb_all = arm_smmu_flush_iotlb_all, + .iotlb_sync = arm_smmu_iotlb_sync, + .iova_to_phys = arm_smmu_iova_to_phys, + .enable_nesting = arm_smmu_enable_nesting, + .set_pgtable_quirks = arm_smmu_set_pgtable_quirks, + .free = arm_smmu_domain_free, + } }; static void arm_smmu_device_reset(struct arm_smmu_device *smmu) diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c index b91874cb6cf3..ed659de20661 100644 --- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c +++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c @@ -590,19 +590,21 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) static const struct iommu_ops qcom_iommu_ops = { .capable = qcom_iommu_capable, .domain_alloc = qcom_iommu_domain_alloc, - .domain_free = qcom_iommu_domain_free, - .attach_dev = qcom_iommu_attach_dev, - .detach_dev = qcom_iommu_detach_dev, - .map = qcom_iommu_map, - .unmap = qcom_iommu_unmap, - .flush_iotlb_all = qcom_iommu_flush_iotlb_all, - .iotlb_sync = qcom_iommu_iotlb_sync, - .iova_to_phys = qcom_iommu_iova_to_phys, .probe_device = qcom_iommu_probe_device, .release_device = qcom_iommu_release_device, .device_group = generic_device_group, .of_xlate = qcom_iommu_of_xlate, .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = qcom_iommu_attach_dev, + .detach_dev = qcom_iommu_detach_dev, + .map = qcom_iommu_map, + .unmap = qcom_iommu_unmap, + .flush_iotlb_all = qcom_iommu_flush_iotlb_all, + .iotlb_sync = qcom_iommu_iotlb_sync, + .iova_to_phys = qcom_iommu_iova_to_phys, + .free = qcom_iommu_domain_free, + } }; static int qcom_iommu_sec_ptbl_init(struct device *dev) diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 939ffa768986..71f2018e23fe 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -1309,17 +1309,19 @@ static int exynos_iommu_of_xlate(struct device *dev, static const struct iommu_ops exynos_iommu_ops = { .domain_alloc = exynos_iommu_domain_alloc, - .domain_free = exynos_iommu_domain_free, - .attach_dev = exynos_iommu_attach_device, - .detach_dev = exynos_iommu_detach_device, - .map = exynos_iommu_map, - .unmap = exynos_iommu_unmap, - .iova_to_phys = exynos_iommu_iova_to_phys, .device_group = generic_device_group, .probe_device = exynos_iommu_probe_device, .release_device = exynos_iommu_release_device, .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE, .of_xlate = exynos_iommu_of_xlate, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = exynos_iommu_attach_device, + .detach_dev = exynos_iommu_detach_device, + .map = exynos_iommu_map, + .unmap = exynos_iommu_unmap, + .iova_to_phys = exynos_iommu_iova_to_phys, + .free = exynos_iommu_domain_free, + } }; static int __init exynos_iommu_init(void) diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index a47f47307109..69a4a62dc3b9 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -453,13 +453,15 @@ static void fsl_pamu_release_device(struct device *dev) static const struct iommu_ops fsl_pamu_ops = { .capable = fsl_pamu_capable, .domain_alloc = fsl_pamu_domain_alloc, - .domain_free = fsl_pamu_domain_free, - .attach_dev = fsl_pamu_attach_device, - .detach_dev = fsl_pamu_detach_device, - .iova_to_phys = fsl_pamu_iova_to_phys, .probe_device = fsl_pamu_probe_device, .release_device = fsl_pamu_release_device, .device_group = fsl_pamu_device_group, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = fsl_pamu_attach_device, + .detach_dev = fsl_pamu_detach_device, + .iova_to_phys = fsl_pamu_iova_to_phys, + .free = fsl_pamu_domain_free, + } }; int __init pamu_domain_init(void) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 80f1294be634..b549172e88ef 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -5092,15 +5092,6 @@ static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain, const struct iommu_ops intel_iommu_ops = { .capable = intel_iommu_capable, .domain_alloc = intel_iommu_domain_alloc, - .domain_free = intel_iommu_domain_free, - .attach_dev = intel_iommu_attach_device, - .detach_dev = intel_iommu_detach_device, - .map_pages = intel_iommu_map_pages, - .unmap_pages = intel_iommu_unmap_pages, - .iotlb_sync_map = intel_iommu_iotlb_sync_map, - .flush_iotlb_all = intel_flush_iotlb_all, - .iotlb_sync = intel_iommu_tlb_sync, - .iova_to_phys = intel_iommu_iova_to_phys, .probe_device = intel_iommu_probe_device, .probe_finalize = intel_iommu_probe_finalize, .release_device = intel_iommu_release_device, @@ -5118,6 +5109,17 @@ const struct iommu_ops intel_iommu_ops = { .sva_get_pasid = intel_svm_get_pasid, .page_response = intel_svm_page_response, #endif + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = intel_iommu_attach_device, + .detach_dev = intel_iommu_detach_device, + .map_pages = intel_iommu_map_pages, + .unmap_pages = intel_iommu_unmap_pages, + .iotlb_sync_map = intel_iommu_iotlb_sync_map, + .flush_iotlb_all = intel_flush_iotlb_all, + .iotlb_sync = intel_iommu_tlb_sync, + .iova_to_phys = intel_iommu_iova_to_phys, + .free = intel_iommu_domain_free, + } }; static void quirk_iommu_igfx(struct pci_dev *dev) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 27276421d253..f2c45b85b9fc 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1950,10 +1950,11 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, if (!domain) return NULL; - domain->ops = bus->iommu_ops; domain->type = type; /* Assume all sizes by default; the driver may override this later */ - domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; + domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; + if (!domain->ops) + domain->ops = bus->iommu_ops->default_domain_ops; if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) { iommu_domain_free(domain); @@ -1971,7 +1972,7 @@ EXPORT_SYMBOL_GPL(iommu_domain_alloc); void iommu_domain_free(struct iommu_domain *domain) { iommu_put_dma_cookie(domain); - domain->ops->domain_free(domain); + domain->ops->free(domain); } EXPORT_SYMBOL_GPL(iommu_domain_free); @@ -2242,7 +2243,7 @@ static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot, gfp_t gfp, size_t *mapped) { - const struct iommu_ops *ops = domain->ops; + const struct iommu_domain_ops *ops = domain->ops; size_t pgsize, count; int ret; @@ -2265,7 +2266,7 @@ static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova, static int __iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { - const struct iommu_ops *ops = domain->ops; + const struct iommu_domain_ops *ops = domain->ops; unsigned long orig_iova = iova; unsigned int min_pagesz; size_t orig_size = size; @@ -2325,7 +2326,7 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova, static int _iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { - const struct iommu_ops *ops = domain->ops; + const struct iommu_domain_ops *ops = domain->ops; int ret; ret = __iommu_map(domain, iova, paddr, size, prot, gfp); @@ -2354,7 +2355,7 @@ static size_t __iommu_unmap_pages(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather) { - const struct iommu_ops *ops = domain->ops; + const struct iommu_domain_ops *ops = domain->ops; size_t pgsize, count; pgsize = iommu_pgsize(domain, iova, iova, size, &count); @@ -2367,7 +2368,7 @@ static size_t __iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather) { - const struct iommu_ops *ops = domain->ops; + const struct iommu_domain_ops *ops = domain->ops; size_t unmapped_page, unmapped = 0; unsigned long orig_iova = iova; unsigned int min_pagesz; @@ -2443,7 +2444,7 @@ static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot, gfp_t gfp) { - const struct iommu_ops *ops = domain->ops; + const struct iommu_domain_ops *ops = domain->ops; size_t len = 0, mapped = 0; phys_addr_t start; unsigned int i = 0; diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index ca752bdc710f..00d5f7a33499 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -868,14 +868,6 @@ static struct iommu_group *ipmmu_find_group(struct device *dev) static const struct iommu_ops ipmmu_ops = { .domain_alloc = ipmmu_domain_alloc, - .domain_free = ipmmu_domain_free, - .attach_dev = ipmmu_attach_device, - .detach_dev = ipmmu_detach_device, - .map = ipmmu_map, - .unmap = ipmmu_unmap, - .flush_iotlb_all = ipmmu_flush_iotlb_all, - .iotlb_sync = ipmmu_iotlb_sync, - .iova_to_phys = ipmmu_iova_to_phys, .probe_device = ipmmu_probe_device, .release_device = ipmmu_release_device, .probe_finalize = ipmmu_probe_finalize, @@ -883,6 +875,16 @@ static const struct iommu_ops ipmmu_ops = { ? generic_device_group : ipmmu_find_group, .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, .of_xlate = ipmmu_of_xlate, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = ipmmu_attach_device, + .detach_dev = ipmmu_detach_device, + .map = ipmmu_map, + .unmap = ipmmu_unmap, + .flush_iotlb_all = ipmmu_flush_iotlb_all, + .iotlb_sync = ipmmu_iotlb_sync, + .iova_to_phys = ipmmu_iova_to_phys, + .free = ipmmu_domain_free, + } }; /* ----------------------------------------------------------------------------- diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 6a2e511edb85..4f441f1b750d 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -668,25 +668,27 @@ fail: static struct iommu_ops msm_iommu_ops = { .domain_alloc = msm_iommu_domain_alloc, - .domain_free = msm_iommu_domain_free, - .attach_dev = msm_iommu_attach_dev, - .detach_dev = msm_iommu_detach_dev, - .map = msm_iommu_map, - .unmap = msm_iommu_unmap, - /* - * Nothing is needed here, the barrier to guarantee - * completion of the tlb sync operation is implicitly - * taken care when the iommu client does a writel before - * kick starting the other master. - */ - .iotlb_sync = NULL, - .iotlb_sync_map = msm_iommu_sync_map, - .iova_to_phys = msm_iommu_iova_to_phys, .probe_device = msm_iommu_probe_device, .release_device = msm_iommu_release_device, .device_group = generic_device_group, .pgsize_bitmap = MSM_IOMMU_PGSIZES, .of_xlate = qcom_iommu_of_xlate, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = msm_iommu_attach_dev, + .detach_dev = msm_iommu_detach_dev, + .map = msm_iommu_map, + .unmap = msm_iommu_unmap, + /* + * Nothing is needed here, the barrier to guarantee + * completion of the tlb sync operation is implicitly + * taken care when the iommu client does a writel before + * kick starting the other master. + */ + .iotlb_sync = NULL, + .iotlb_sync_map = msm_iommu_sync_map, + .iova_to_phys = msm_iommu_iova_to_phys, + .free = msm_iommu_domain_free, + } }; static int msm_iommu_probe(struct platform_device *pdev) diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 25b834104790..884c288a8f63 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -658,15 +658,6 @@ static void mtk_iommu_get_resv_regions(struct device *dev, static const struct iommu_ops mtk_iommu_ops = { .domain_alloc = mtk_iommu_domain_alloc, - .domain_free = mtk_iommu_domain_free, - .attach_dev = mtk_iommu_attach_device, - .detach_dev = mtk_iommu_detach_device, - .map = mtk_iommu_map, - .unmap = mtk_iommu_unmap, - .flush_iotlb_all = mtk_iommu_flush_iotlb_all, - .iotlb_sync = mtk_iommu_iotlb_sync, - .iotlb_sync_map = mtk_iommu_sync_map, - .iova_to_phys = mtk_iommu_iova_to_phys, .probe_device = mtk_iommu_probe_device, .release_device = mtk_iommu_release_device, .device_group = mtk_iommu_device_group, @@ -675,6 +666,17 @@ static const struct iommu_ops mtk_iommu_ops = { .put_resv_regions = generic_iommu_put_resv_regions, .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, .owner = THIS_MODULE, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = mtk_iommu_attach_device, + .detach_dev = mtk_iommu_detach_device, + .map = mtk_iommu_map, + .unmap = mtk_iommu_unmap, + .flush_iotlb_all = mtk_iommu_flush_iotlb_all, + .iotlb_sync = mtk_iommu_iotlb_sync, + .iotlb_sync_map = mtk_iommu_sync_map, + .iova_to_phys = mtk_iommu_iova_to_phys, + .free = mtk_iommu_domain_free, + } }; static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index be22fcf988ce..293ef7e1a861 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -514,12 +514,6 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) static const struct iommu_ops mtk_iommu_ops = { .domain_alloc = mtk_iommu_domain_alloc, - .domain_free = mtk_iommu_domain_free, - .attach_dev = mtk_iommu_attach_device, - .detach_dev = mtk_iommu_detach_device, - .map = mtk_iommu_map, - .unmap = mtk_iommu_unmap, - .iova_to_phys = mtk_iommu_iova_to_phys, .probe_device = mtk_iommu_probe_device, .probe_finalize = mtk_iommu_probe_finalize, .release_device = mtk_iommu_release_device, @@ -527,6 +521,14 @@ static const struct iommu_ops mtk_iommu_ops = { .device_group = generic_device_group, .pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT, .owner = THIS_MODULE, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = mtk_iommu_attach_device, + .detach_dev = mtk_iommu_detach_device, + .map = mtk_iommu_map, + .unmap = mtk_iommu_unmap, + .iova_to_phys = mtk_iommu_iova_to_phys, + .free = mtk_iommu_domain_free, + } }; static const struct of_device_id mtk_iommu_of_ids[] = { diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 980e4af3f06b..4aab631ef517 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1734,16 +1734,18 @@ static struct iommu_group *omap_iommu_device_group(struct device *dev) static const struct iommu_ops omap_iommu_ops = { .domain_alloc = omap_iommu_domain_alloc, - .domain_free = omap_iommu_domain_free, - .attach_dev = omap_iommu_attach_dev, - .detach_dev = omap_iommu_detach_dev, - .map = omap_iommu_map, - .unmap = omap_iommu_unmap, - .iova_to_phys = omap_iommu_iova_to_phys, .probe_device = omap_iommu_probe_device, .release_device = omap_iommu_release_device, .device_group = omap_iommu_device_group, .pgsize_bitmap = OMAP_IOMMU_PGSIZES, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = omap_iommu_attach_dev, + .detach_dev = omap_iommu_detach_dev, + .map = omap_iommu_map, + .unmap = omap_iommu_unmap, + .iova_to_phys = omap_iommu_iova_to_phys, + .free = omap_iommu_domain_free, + } }; static int __init omap_iommu_init(void) diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 7f23ad61c094..2ae97a84d41b 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -1187,17 +1187,19 @@ static int rk_iommu_of_xlate(struct device *dev, static const struct iommu_ops rk_iommu_ops = { .domain_alloc = rk_iommu_domain_alloc, - .domain_free = rk_iommu_domain_free, - .attach_dev = rk_iommu_attach_device, - .detach_dev = rk_iommu_detach_device, - .map = rk_iommu_map, - .unmap = rk_iommu_unmap, .probe_device = rk_iommu_probe_device, .release_device = rk_iommu_release_device, - .iova_to_phys = rk_iommu_iova_to_phys, .device_group = rk_iommu_device_group, .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP, .of_xlate = rk_iommu_of_xlate, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = rk_iommu_attach_device, + .detach_dev = rk_iommu_detach_device, + .map = rk_iommu_map, + .unmap = rk_iommu_unmap, + .iova_to_phys = rk_iommu_iova_to_phys, + .free = rk_iommu_domain_free, + } }; static int rk_iommu_probe(struct platform_device *pdev) diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c index 50860ebdd087..3833e86c6e7b 100644 --- a/drivers/iommu/s390-iommu.c +++ b/drivers/iommu/s390-iommu.c @@ -363,16 +363,18 @@ void zpci_destroy_iommu(struct zpci_dev *zdev) static const struct iommu_ops s390_iommu_ops = { .capable = s390_iommu_capable, .domain_alloc = s390_domain_alloc, - .domain_free = s390_domain_free, - .attach_dev = s390_iommu_attach_device, - .detach_dev = s390_iommu_detach_device, - .map = s390_iommu_map, - .unmap = s390_iommu_unmap, - .iova_to_phys = s390_iommu_iova_to_phys, .probe_device = s390_iommu_probe_device, .release_device = s390_iommu_release_device, .device_group = generic_device_group, .pgsize_bitmap = S390_IOMMU_PGSIZES, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = s390_iommu_attach_device, + .detach_dev = s390_iommu_detach_device, + .map = s390_iommu_map, + .unmap = s390_iommu_unmap, + .iova_to_phys = s390_iommu_iova_to_phys, + .free = s390_domain_free, + } }; static int __init s390_iommu_init(void) diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c index 27ac818b0354..bd409bab6286 100644 --- a/drivers/iommu/sprd-iommu.c +++ b/drivers/iommu/sprd-iommu.c @@ -416,20 +416,22 @@ static int sprd_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) static const struct iommu_ops sprd_iommu_ops = { .domain_alloc = sprd_iommu_domain_alloc, - .domain_free = sprd_iommu_domain_free, - .attach_dev = sprd_iommu_attach_device, - .detach_dev = sprd_iommu_detach_device, - .map = sprd_iommu_map, - .unmap = sprd_iommu_unmap, - .iotlb_sync_map = sprd_iommu_sync_map, - .iotlb_sync = sprd_iommu_sync, - .iova_to_phys = sprd_iommu_iova_to_phys, .probe_device = sprd_iommu_probe_device, .release_device = sprd_iommu_release_device, .device_group = sprd_iommu_device_group, .of_xlate = sprd_iommu_of_xlate, .pgsize_bitmap = ~0UL << SPRD_IOMMU_PAGE_SHIFT, .owner = THIS_MODULE, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = sprd_iommu_attach_device, + .detach_dev = sprd_iommu_detach_device, + .map = sprd_iommu_map, + .unmap = sprd_iommu_unmap, + .iotlb_sync_map = sprd_iommu_sync_map, + .iotlb_sync = sprd_iommu_sync, + .iova_to_phys = sprd_iommu_iova_to_phys, + .free = sprd_iommu_domain_free, + } }; static const struct of_device_id sprd_iommu_of_match[] = { diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c index 92997021e188..c54ab477b8fd 100644 --- a/drivers/iommu/sun50i-iommu.c +++ b/drivers/iommu/sun50i-iommu.c @@ -760,19 +760,21 @@ static int sun50i_iommu_of_xlate(struct device *dev, static const struct iommu_ops sun50i_iommu_ops = { .pgsize_bitmap = SZ_4K, - .attach_dev = sun50i_iommu_attach_device, - .detach_dev = sun50i_iommu_detach_device, .device_group = sun50i_iommu_device_group, .domain_alloc = sun50i_iommu_domain_alloc, - .domain_free = sun50i_iommu_domain_free, - .flush_iotlb_all = sun50i_iommu_flush_iotlb_all, - .iotlb_sync = sun50i_iommu_iotlb_sync, - .iova_to_phys = sun50i_iommu_iova_to_phys, - .map = sun50i_iommu_map, .of_xlate = sun50i_iommu_of_xlate, .probe_device = sun50i_iommu_probe_device, .release_device = sun50i_iommu_release_device, - .unmap = sun50i_iommu_unmap, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = sun50i_iommu_attach_device, + .detach_dev = sun50i_iommu_detach_device, + .flush_iotlb_all = sun50i_iommu_flush_iotlb_all, + .iotlb_sync = sun50i_iommu_iotlb_sync, + .iova_to_phys = sun50i_iommu_iova_to_phys, + .map = sun50i_iommu_map, + .unmap = sun50i_iommu_unmap, + .free = sun50i_iommu_domain_free, + } }; static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu, diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index bbd287d19324..a6700a40a6f8 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c @@ -272,19 +272,21 @@ static void gart_iommu_sync(struct iommu_domain *domain, static const struct iommu_ops gart_iommu_ops = { .domain_alloc = gart_iommu_domain_alloc, - .domain_free = gart_iommu_domain_free, - .attach_dev = gart_iommu_attach_dev, - .detach_dev = gart_iommu_detach_dev, .probe_device = gart_iommu_probe_device, .release_device = gart_iommu_release_device, .device_group = generic_device_group, - .map = gart_iommu_map, - .unmap = gart_iommu_unmap, - .iova_to_phys = gart_iommu_iova_to_phys, .pgsize_bitmap = GART_IOMMU_PGSIZES, .of_xlate = gart_iommu_of_xlate, - .iotlb_sync_map = gart_iommu_sync_map, - .iotlb_sync = gart_iommu_sync, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = gart_iommu_attach_dev, + .detach_dev = gart_iommu_detach_dev, + .map = gart_iommu_map, + .unmap = gart_iommu_unmap, + .iova_to_phys = gart_iommu_iova_to_phys, + .iotlb_sync_map = gart_iommu_sync_map, + .iotlb_sync = gart_iommu_sync, + .free = gart_iommu_domain_free, + } }; int tegra_gart_suspend(struct gart_device *gart) diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 43df44f918a1..44c3716e16ee 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -963,17 +963,19 @@ static int tegra_smmu_of_xlate(struct device *dev, static const struct iommu_ops tegra_smmu_ops = { .domain_alloc = tegra_smmu_domain_alloc, - .domain_free = tegra_smmu_domain_free, - .attach_dev = tegra_smmu_attach_dev, - .detach_dev = tegra_smmu_detach_dev, .probe_device = tegra_smmu_probe_device, .release_device = tegra_smmu_release_device, .device_group = tegra_smmu_device_group, - .map = tegra_smmu_map, - .unmap = tegra_smmu_unmap, - .iova_to_phys = tegra_smmu_iova_to_phys, .of_xlate = tegra_smmu_of_xlate, .pgsize_bitmap = SZ_4K, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = tegra_smmu_attach_dev, + .detach_dev = tegra_smmu_detach_dev, + .map = tegra_smmu_map, + .unmap = tegra_smmu_unmap, + .iova_to_phys = tegra_smmu_iova_to_phys, + .free = tegra_smmu_domain_free, + } }; static void tegra_smmu_ahb_enable(void) diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index f2aa34f57454..25be4b822aa0 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -1008,12 +1008,6 @@ static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args) static struct iommu_ops viommu_ops = { .domain_alloc = viommu_domain_alloc, - .domain_free = viommu_domain_free, - .attach_dev = viommu_attach_dev, - .map = viommu_map, - .unmap = viommu_unmap, - .iova_to_phys = viommu_iova_to_phys, - .iotlb_sync = viommu_iotlb_sync, .probe_device = viommu_probe_device, .probe_finalize = viommu_probe_finalize, .release_device = viommu_release_device, @@ -1022,6 +1016,14 @@ static struct iommu_ops viommu_ops = { .put_resv_regions = generic_iommu_put_resv_regions, .of_xlate = viommu_of_xlate, .owner = THIS_MODULE, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = viommu_attach_dev, + .map = viommu_map, + .unmap = viommu_unmap, + .iova_to_phys = viommu_iova_to_phys, + .iotlb_sync = viommu_iotlb_sync, + .free = viommu_domain_free, + } }; static int viommu_init_vqs(struct viommu_dev *viommu) diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 1ded6076a75c..9208eca4b0d1 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -37,6 +37,7 @@ struct iommu_group; struct bus_type; struct device; struct iommu_domain; +struct iommu_domain_ops; struct notifier_block; struct iommu_sva; struct iommu_fault_event; @@ -88,7 +89,7 @@ struct iommu_domain_geometry { struct iommu_domain { unsigned type; - const struct iommu_ops *ops; + const struct iommu_domain_ops *ops; unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ iommu_fault_handler_t handler; void *handler_token; @@ -192,26 +193,11 @@ struct iommu_iotlb_gather { * struct iommu_ops - iommu ops and capabilities * @capable: check capability * @domain_alloc: allocate iommu domain - * @domain_free: free iommu domain - * @attach_dev: attach device to an iommu domain - * @detach_dev: detach device from an iommu domain - * @map: map a physically contiguous memory region to an iommu domain - * @map_pages: map a physically contiguous set of pages of the same size to - * an iommu domain. - * @unmap: unmap a physically contiguous memory region from an iommu domain - * @unmap_pages: unmap a number of pages of the same size from an iommu domain - * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain - * @iotlb_sync_map: Sync mappings created recently using @map to the hardware - * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush - * queue - * @iova_to_phys: translate iova to physical address * @probe_device: Add device to iommu driver handling * @release_device: Remove device from iommu driver handling * @probe_finalize: Do final setup work after the device is added to an IOMMU * group and attached to the groups domain * @device_group: find iommu group for a particular device - * @enable_nesting: Enable nesting - * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*) * @get_resv_regions: Request list of reserved regions for a device * @put_resv_regions: Free list of reserved regions for a device * @of_xlate: add OF master IDs to iommu grouping @@ -228,6 +214,7 @@ struct iommu_iotlb_gather { * - IOMMU_DOMAIN_IDENTITY: must use an identity domain * - IOMMU_DOMAIN_DMA: must use a dma domain * - 0: use the default setting + * @default_domain_ops: the default ops for domains * @pgsize_bitmap: bitmap of all possible supported page sizes * @owner: Driver module providing these ops */ @@ -236,33 +223,11 @@ struct iommu_ops { /* Domain allocation and freeing by the iommu driver */ struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); - void (*domain_free)(struct iommu_domain *); - int (*attach_dev)(struct iommu_domain *domain, struct device *dev); - void (*detach_dev)(struct iommu_domain *domain, struct device *dev); - int (*map)(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot, gfp_t gfp); - int (*map_pages)(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t pgsize, size_t pgcount, - int prot, gfp_t gfp, size_t *mapped); - size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, - size_t size, struct iommu_iotlb_gather *iotlb_gather); - size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova, - size_t pgsize, size_t pgcount, - struct iommu_iotlb_gather *iotlb_gather); - void (*flush_iotlb_all)(struct iommu_domain *domain); - void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova, - size_t size); - void (*iotlb_sync)(struct iommu_domain *domain, - struct iommu_iotlb_gather *iotlb_gather); - phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); struct iommu_device *(*probe_device)(struct device *dev); void (*release_device)(struct device *dev); void (*probe_finalize)(struct device *dev); struct iommu_group *(*device_group)(struct device *dev); - int (*enable_nesting)(struct iommu_domain *domain); - int (*set_pgtable_quirks)(struct iommu_domain *domain, - unsigned long quirks); /* Request/Free a list of reserved regions for a device */ void (*get_resv_regions)(struct device *dev, struct list_head *list); @@ -288,10 +253,60 @@ struct iommu_ops { int (*def_domain_type)(struct device *dev); + const struct iommu_domain_ops *default_domain_ops; unsigned long pgsize_bitmap; struct module *owner; }; +/** + * struct iommu_domain_ops - domain specific operations + * @attach_dev: attach an iommu domain to a device + * @detach_dev: detach an iommu domain from a device + * @map: map a physically contiguous memory region to an iommu domain + * @map_pages: map a physically contiguous set of pages of the same size to + * an iommu domain. + * @unmap: unmap a physically contiguous memory region from an iommu domain + * @unmap_pages: unmap a number of pages of the same size from an iommu domain + * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain + * @iotlb_sync_map: Sync mappings created recently using @map to the hardware + * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush + * queue + * @iova_to_phys: translate iova to physical address + * @enable_nesting: Enable nesting + * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*) + * @free: Release the domain after use. + */ +struct iommu_domain_ops { + int (*attach_dev)(struct iommu_domain *domain, struct device *dev); + void (*detach_dev)(struct iommu_domain *domain, struct device *dev); + + int (*map)(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot, gfp_t gfp); + int (*map_pages)(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t pgsize, size_t pgcount, + int prot, gfp_t gfp, size_t *mapped); + size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, + size_t size, struct iommu_iotlb_gather *iotlb_gather); + size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova, + size_t pgsize, size_t pgcount, + struct iommu_iotlb_gather *iotlb_gather); + + void (*flush_iotlb_all)(struct iommu_domain *domain); + void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova, + size_t size); + void (*iotlb_sync)(struct iommu_domain *domain, + struct iommu_iotlb_gather *iotlb_gather); + + phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, + dma_addr_t iova); + + int (*enable_nesting)(struct iommu_domain *domain); + int (*set_pgtable_quirks)(struct iommu_domain *domain, + unsigned long quirks); + + void (*free)(struct iommu_domain *domain); +}; + /** * struct iommu_device - IOMMU core representation of one IOMMU hardware * instance -- cgit v1.2.3-59-g8ed1b From 06687a03805e29dcf068a8e6436ed2a2bbd8b9e9 Mon Sep 17 00:00:00 2001 From: Suravee Suthikulpanit Date: Tue, 1 Mar 2022 14:26:22 +0530 Subject: iommu/amd: Improve error handling for amd_iommu_init_pci Add error messages to prevent silent failure. Signed-off-by: Vasant Hegde Signed-off-by: Suravee Suthikulpanit Link: https://lore.kernel.org/r/20220301085626.87680-2-vasant.hegde@amd.com Signed-off-by: Joerg Roedel --- drivers/iommu/amd/init.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index b10fb52ea442..6b5af568f3d5 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -1943,9 +1943,11 @@ static int __init amd_iommu_init_pci(void) for_each_iommu(iommu) { ret = iommu_init_pci(iommu); - if (ret) - break; - + if (ret) { + pr_err("IOMMU%d: Failed to initialize IOMMU Hardware (error=%d)!\n", + iommu->index, ret); + goto out; + } /* Need to setup range after PCI init */ iommu_set_cwwb_range(iommu); } @@ -1961,6 +1963,11 @@ static int __init amd_iommu_init_pci(void) * active. */ ret = amd_iommu_init_api(); + if (ret) { + pr_err("IOMMU: Failed to initialize IOMMU-API interface (error=%d)!\n", + ret); + goto out; + } init_device_table_dma(); @@ -1970,6 +1977,7 @@ static int __init amd_iommu_init_pci(void) if (!ret) print_iommu_info(); +out: return ret; } -- cgit v1.2.3-59-g8ed1b From 5b61343b50590fb04a3f6be2cdc4868091757262 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 3 Mar 2022 14:40:08 +0000 Subject: iommu/iova: Improve 32-bit free space estimate For various reasons based on the allocator behaviour and typical use-cases at the time, when the max32_alloc_size optimisation was introduced it seemed reasonable to couple the reset of the tracked size to the update of cached32_node upon freeing a relevant IOVA. However, since subsequent optimisations focused on helping genuine 32-bit devices make best use of even more limited address spaces, it is now a lot more likely for cached32_node to be anywhere in a "full" 32-bit address space, and as such more likely for space to become available from IOVAs below that node being freed. At this point, the short-cut in __cached_rbnode_delete_update() really doesn't hold up any more, and we need to fix the logic to reliably provide the expected behaviour. We still want cached32_node to only move upwards, but we should reset the allocation size if *any* 32-bit space has become available. Reported-by: Yunfei Wang Signed-off-by: Robin Murphy Reviewed-by: Miles Chen Link: https://lore.kernel.org/r/033815732d83ca73b13c11485ac39336f15c3b40.1646318408.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel --- drivers/iommu/iova.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 7e9c3a97c040..db77aa675145 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -94,10 +94,11 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) cached_iova = to_iova(iovad->cached32_node); if (free == cached_iova || (free->pfn_hi < iovad->dma_32bit_pfn && - free->pfn_lo >= cached_iova->pfn_lo)) { + free->pfn_lo >= cached_iova->pfn_lo)) iovad->cached32_node = rb_next(&free->node); + + if (free->pfn_lo < iovad->dma_32bit_pfn) iovad->max32_alloc_size = iovad->dma_32bit_pfn; - } cached_iova = to_iova(iovad->cached_node); if (free->pfn_lo >= cached_iova->pfn_lo) -- cgit v1.2.3-59-g8ed1b From 17224e08af73fbbc5334226ae75feecb1e037cae Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Wed, 8 Dec 2021 14:07:40 +0200 Subject: iommu/mediatek: Remove for_each_m4u in tlb_sync_all The tlb_sync_all is called from these three functions: a) flush_iotlb_all: it will be called for each a iommu HW. b) tlb_flush_range_sync: it already has for_each_m4u. c) in irq: When IOMMU HW translation fault, Only need flush itself. Thus, No need for_each_m4u in this tlb_sync_all. Remove it. Signed-off-by: Yong Wu Reviewed-by: Dafna Hirschfeld Reviewed-by: AngeloGioacchino Del Regno Link: https://lore.kernel.org/r/20211208120744.2415-2-dafna.hirschfeld@collabora.com Signed-off-by: Joerg Roedel --- drivers/iommu/mtk_iommu.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 25b834104790..1356137f91f9 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -210,17 +210,15 @@ static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom) static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data) { - for_each_m4u(data) { - if (pm_runtime_get_if_in_use(data->dev) <= 0) - continue; + if (pm_runtime_get_if_in_use(data->dev) <= 0) + return; - writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, - data->base + data->plat_data->inv_sel_reg); - writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE); - wmb(); /* Make sure the tlb flush all done */ + writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, + data->base + data->plat_data->inv_sel_reg); + writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE); + wmb(); /* Make sure the tlb flush all done */ - pm_runtime_put(data->dev); - } + pm_runtime_put(data->dev); } static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size, -- cgit v1.2.3-59-g8ed1b From 4ea794452ae7220bb02ad950d77d86adcbb86b10 Mon Sep 17 00:00:00 2001 From: Sebastian Reichel Date: Wed, 8 Dec 2021 14:07:41 +0200 Subject: iommu/mediatek: Always check runtime PM status in tlb flush range callback In case of v4l2_reqbufs() it is possible, that a TLB flush is done without runtime PM being enabled. In that case the "Partial TLB flush timed out, falling back to full flush" warning is printed. Commit c0b57581b73b ("iommu/mediatek: Add power-domain operation") introduced has_pm as optimization to avoid checking runtime PM when there is no power domain attached. But without the PM domain there is still the device driver's runtime PM suspend handler, which disables the clock. Thus flushing should also be avoided when there is no PM domain involved. Signed-off-by: Sebastian Reichel Reviewed-by: Dafna Hirschfeld Reviewed-by: AngeloGioacchino Del Regno Reviewed-by: Yong Wu Link: https://lore.kernel.org/r/20211208120744.2415-3-dafna.hirschfeld@collabora.com Signed-off-by: Joerg Roedel --- drivers/iommu/mtk_iommu.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 1356137f91f9..bcf1f5e11fbd 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -225,16 +225,13 @@ static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size, size_t granule, struct mtk_iommu_data *data) { - bool has_pm = !!data->dev->pm_domain; unsigned long flags; int ret; u32 tmp; for_each_m4u(data) { - if (has_pm) { - if (pm_runtime_get_if_in_use(data->dev) <= 0) - continue; - } + if (pm_runtime_get_if_in_use(data->dev) <= 0) + continue; spin_lock_irqsave(&data->tlb_lock, flags); writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, @@ -259,8 +256,7 @@ static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size, writel_relaxed(0, data->base + REG_MMU_CPE_DONE); spin_unlock_irqrestore(&data->tlb_lock, flags); - if (has_pm) - pm_runtime_put(data->dev); + pm_runtime_put(data->dev); } } -- cgit v1.2.3-59-g8ed1b From ad5042ecbe944bd34d0bdd23f3b728a7e92c0e12 Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Wed, 8 Dec 2021 14:07:42 +0200 Subject: iommu/mediatek: Remove the power status checking in tlb flush all To simplify the code, Remove the power status checking in the tlb_flush_all, remove this: if (pm_runtime_get_if_in_use(data->dev) <= 0) continue; The mtk_iommu_tlb_flush_all is called from a) isr b) tlb flush range fail case c) iommu_create_device_direct_mappings In first two cases, the power and clock are always enabled. In the third case tlb flush is unnecessary because in a later patch in the series a full flush from the pm_runtime_resume callback is added. In addition, writing the tlb control register when the iommu is not resumed is ok and the write is ignored. Signed-off-by: Yong Wu [refactor commit log] Signed-off-by: Dafna Hirschfeld Reviewed-by: AngeloGioacchino Del Regno Link: https://lore.kernel.org/r/20211208120744.2415-4-dafna.hirschfeld@collabora.com Signed-off-by: Joerg Roedel --- drivers/iommu/mtk_iommu.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index bcf1f5e11fbd..7cdf2819f4b7 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -210,15 +210,10 @@ static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom) static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data) { - if (pm_runtime_get_if_in_use(data->dev) <= 0) - return; - writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, data->base + data->plat_data->inv_sel_reg); writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE); wmb(); /* Make sure the tlb flush all done */ - - pm_runtime_put(data->dev); } static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size, -- cgit v1.2.3-59-g8ed1b From 15672b6dc5d03a947e9180db1d6564f43b0c6799 Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Wed, 8 Dec 2021 14:07:43 +0200 Subject: iommu/mediatek: Add tlb_lock in tlb_flush_all The tlb_flush_all touches the registers controlling tlb operations. Protect it with the tlb_lock spinlock. This also require the range_sync func to release that spinlock before calling tlb_flush_all. Signed-off-by: Yong Wu [refactor commit log] Signed-off-by: Dafna Hirschfeld Reviewed-by: AngeloGioacchino Del Regno Link: https://lore.kernel.org/r/20211208120744.2415-5-dafna.hirschfeld@collabora.com Signed-off-by: Joerg Roedel --- drivers/iommu/mtk_iommu.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 7cdf2819f4b7..7cbfe3bfaaab 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -210,10 +210,14 @@ static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom) static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data) { + unsigned long flags; + + spin_lock_irqsave(&data->tlb_lock, flags); writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, data->base + data->plat_data->inv_sel_reg); writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE); wmb(); /* Make sure the tlb flush all done */ + spin_unlock_irqrestore(&data->tlb_lock, flags); } static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size, @@ -242,14 +246,16 @@ static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size, /* tlb sync */ ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, tmp, tmp != 0, 10, 1000); + + /* Clear the CPE status */ + writel_relaxed(0, data->base + REG_MMU_CPE_DONE); + spin_unlock_irqrestore(&data->tlb_lock, flags); + if (ret) { dev_warn(data->dev, "Partial TLB flush timed out, falling back to full flush\n"); mtk_iommu_tlb_flush_all(data); } - /* Clear the CPE status */ - writel_relaxed(0, data->base + REG_MMU_CPE_DONE); - spin_unlock_irqrestore(&data->tlb_lock, flags); pm_runtime_put(data->dev); } -- cgit v1.2.3-59-g8ed1b From 4f23f6d45821701c68b99c2847a075472b53198c Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Wed, 8 Dec 2021 14:07:44 +0200 Subject: iommu/mediatek: Always tlb_flush_all when each PM resume Prepare for 2 HWs that sharing pgtable in different power-domains. When there are 2 M4U HWs, it may has problem in the flush_range in which we get the pm_status via the m4u dev, BUT that function don't reflect the real power-domain status of the HW since there may be other HW also use that power-domain. DAM allocation is often done while the allocating device is runtime suspended. In such a case the iommu will also be suspended and partial flushing of the tlb will not be executed. Therefore, we add a tlb_flush_all in the pm_runtime_resume to make sure the tlb is always clean. In other case, the iommu's power should be active via device link with smi. Signed-off-by: Yong Wu [move the call to mtk_iommu_tlb_flush_all to the bottom of resume cb, improve doc/log] Signed-off-by: Dafna Hirschfeld Reviewed-by: AngeloGioacchino Del Regno Link: https://lore.kernel.org/r/20211208120744.2415-6-dafna.hirschfeld@collabora.com Signed-off-by: Joerg Roedel --- drivers/iommu/mtk_iommu.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 7cbfe3bfaaab..eb43e7edb9dc 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -975,6 +975,13 @@ static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev) writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR); writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG); writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK, base + REG_MMU_PT_BASE_ADDR); + + /* + * Users may allocate dma buffer before they call pm_runtime_get, + * in which case it will lack the necessary tlb flush. + * Thus, make sure to update the tlb after each PM resume. + */ + mtk_iommu_tlb_flush_all(data); return 0; } -- cgit v1.2.3-59-g8ed1b From 402e6688a7df482cc57be0b0dd5b443d995073fb Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Tue, 1 Mar 2022 10:01:48 +0800 Subject: iommu/vt-d: Remove intel_iommu::domains The "domains" field of the intel_iommu structure keeps the mapping of domain_id to dmar_domain. This information is not used anywhere. Remove and cleanup it to avoid unnecessary memory consumption. Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20220214025704.3184654-1-baolu.lu@linux.intel.com Link: https://lore.kernel.org/r/20220301020159.633356-2-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 68 ++------------------------------------------- include/linux/intel-iommu.h | 1 - 2 files changed, 3 insertions(+), 66 deletions(-) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index b549172e88ef..e3b04d5d87b0 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -455,36 +455,6 @@ __setup("intel_iommu=", intel_iommu_setup); static struct kmem_cache *iommu_domain_cache; static struct kmem_cache *iommu_devinfo_cache; -static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did) -{ - struct dmar_domain **domains; - int idx = did >> 8; - - domains = iommu->domains[idx]; - if (!domains) - return NULL; - - return domains[did & 0xff]; -} - -static void set_iommu_domain(struct intel_iommu *iommu, u16 did, - struct dmar_domain *domain) -{ - struct dmar_domain **domains; - int idx = did >> 8; - - if (!iommu->domains[idx]) { - size_t size = 256 * sizeof(struct dmar_domain *); - iommu->domains[idx] = kzalloc(size, GFP_ATOMIC); - } - - domains = iommu->domains[idx]; - if (WARN_ON(!domains)) - return; - else - domains[did & 0xff] = domain; -} - void *alloc_pgtable_page(int node) { struct page *page; @@ -1751,8 +1721,7 @@ static void intel_flush_iotlb_all(struct iommu_domain *domain) DMA_TLB_DSI_FLUSH); if (!cap_caching_mode(iommu->cap)) - iommu_flush_dev_iotlb(get_iommu_domain(iommu, did), - 0, MAX_AGAW_PFN_WIDTH); + iommu_flush_dev_iotlb(dmar_domain, 0, MAX_AGAW_PFN_WIDTH); } } @@ -1815,7 +1784,6 @@ static void iommu_disable_translation(struct intel_iommu *iommu) static int iommu_init_domains(struct intel_iommu *iommu) { u32 ndomains; - size_t size; ndomains = cap_ndoms(iommu->cap); pr_debug("%s: Number of Domains supported <%d>\n", @@ -1827,24 +1795,6 @@ static int iommu_init_domains(struct intel_iommu *iommu) if (!iommu->domain_ids) return -ENOMEM; - size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **); - iommu->domains = kzalloc(size, GFP_KERNEL); - - if (iommu->domains) { - size = 256 * sizeof(struct dmar_domain *); - iommu->domains[0] = kzalloc(size, GFP_KERNEL); - } - - if (!iommu->domains || !iommu->domains[0]) { - pr_err("%s: Allocating domain array failed\n", - iommu->name); - bitmap_free(iommu->domain_ids); - kfree(iommu->domains); - iommu->domain_ids = NULL; - iommu->domains = NULL; - return -ENOMEM; - } - /* * If Caching mode is set, then invalid translations are tagged * with domain-id 0, hence we need to pre-allocate it. We also @@ -1871,7 +1821,7 @@ static void disable_dmar_iommu(struct intel_iommu *iommu) struct device_domain_info *info, *tmp; unsigned long flags; - if (!iommu->domains || !iommu->domain_ids) + if (!iommu->domain_ids) return; spin_lock_irqsave(&device_domain_lock, flags); @@ -1892,15 +1842,8 @@ static void disable_dmar_iommu(struct intel_iommu *iommu) static void free_dmar_iommu(struct intel_iommu *iommu) { - if ((iommu->domains) && (iommu->domain_ids)) { - int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8; - int i; - - for (i = 0; i < elems; i++) - kfree(iommu->domains[i]); - kfree(iommu->domains); + if (iommu->domain_ids) { bitmap_free(iommu->domain_ids); - iommu->domains = NULL; iommu->domain_ids = NULL; } @@ -1978,11 +1921,8 @@ static int domain_attach_iommu(struct dmar_domain *domain, } set_bit(num, iommu->domain_ids); - set_iommu_domain(iommu, num, domain); - domain->iommu_did[iommu->seq_id] = num; domain->nid = iommu->node; - domain_update_iommu_cap(domain); } @@ -2001,8 +1941,6 @@ static void domain_detach_iommu(struct dmar_domain *domain, if (domain->iommu_refcnt[iommu->seq_id] == 0) { num = domain->iommu_did[iommu->seq_id]; clear_bit(num, iommu->domain_ids); - set_iommu_domain(iommu, num, NULL); - domain_update_iommu_cap(domain); domain->iommu_did[iommu->seq_id] = 0; } diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 5cfda90b2cca..8c7591b5f3e2 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -578,7 +578,6 @@ struct intel_iommu { #ifdef CONFIG_INTEL_IOMMU unsigned long *domain_ids; /* bitmap of domains */ - struct dmar_domain ***domains; /* ptr to domains */ spinlock_t lock; /* protect context, domain ids */ struct root_entry *root_entry; /* virtual address */ -- cgit v1.2.3-59-g8ed1b From c5d27545fb2f3810c1ee192f4f3e0822172fa474 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Tue, 1 Mar 2022 10:01:49 +0800 Subject: iommu/vt-d: Remove finding domain in dmar_insert_one_dev_info() The Intel IOMMU driver has already converted to use default domain framework in iommu core. There's no need to find a domain for the device in the domain attaching path. Cleanup that code. Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20220214025704.3184654-1-baolu.lu@linux.intel.com Link: https://lore.kernel.org/r/20220301020159.633356-3-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index e3b04d5d87b0..b3075933864e 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -2554,7 +2554,6 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, struct device *dev, struct dmar_domain *domain) { - struct dmar_domain *found = NULL; struct device_domain_info *info; unsigned long flags; int ret; @@ -2605,26 +2604,6 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, } spin_lock_irqsave(&device_domain_lock, flags); - if (dev) - found = find_domain(dev); - - if (!found) { - struct device_domain_info *info2; - info2 = dmar_search_domain_by_dev_info(info->segment, info->bus, - info->devfn); - if (info2) { - found = info2->domain; - info2->dev = dev; - } - } - - if (found) { - spin_unlock_irqrestore(&device_domain_lock, flags); - free_devinfo_mem(info); - /* Caller must free the original domain */ - return found; - } - spin_lock(&iommu->lock); ret = domain_attach_iommu(domain, iommu); spin_unlock(&iommu->lock); -- cgit v1.2.3-59-g8ed1b From c8850a6e6d71e2f95cfe1eae8c8dd195dd05b950 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Tue, 1 Mar 2022 10:01:50 +0800 Subject: iommu/vt-d: Remove iova_cache_get/put() These have been done in drivers/iommu/dma-iommu.c. Remove this duplicate code. Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20220214025704.3184654-1-baolu.lu@linux.intel.com Link: https://lore.kernel.org/r/20220301020159.633356-4-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index b3075933864e..7c2b427bea3b 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -3381,9 +3381,6 @@ static inline int iommu_devinfo_cache_init(void) static int __init iommu_init_mempool(void) { int ret; - ret = iova_cache_get(); - if (ret) - return ret; ret = iommu_domain_cache_init(); if (ret) @@ -3395,7 +3392,6 @@ static int __init iommu_init_mempool(void) kmem_cache_destroy(iommu_domain_cache); domain_error: - iova_cache_put(); return -ENOMEM; } @@ -3404,7 +3400,6 @@ static void __init iommu_exit_mempool(void) { kmem_cache_destroy(iommu_devinfo_cache); kmem_cache_destroy(iommu_domain_cache); - iova_cache_put(); } static void __init init_no_remapping_devices(void) -- cgit v1.2.3-59-g8ed1b From ee2653bbe89df777e222ec1aa015c2ed7ea06b6b Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Tue, 1 Mar 2022 10:01:51 +0800 Subject: iommu/vt-d: Remove domain and devinfo mempool The domain and devinfo memory blocks are only allocated during device probe and released during remove. There's no hot-path context, hence no need for memory pools. Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20220214025704.3184654-1-baolu.lu@linux.intel.com Link: https://lore.kernel.org/r/20220301020159.633356-5-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 104 +++----------------------------------------- 1 file changed, 5 insertions(+), 99 deletions(-) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 7c2b427bea3b..51db26d8606c 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -452,9 +452,6 @@ static int __init intel_iommu_setup(char *str) } __setup("intel_iommu=", intel_iommu_setup); -static struct kmem_cache *iommu_domain_cache; -static struct kmem_cache *iommu_devinfo_cache; - void *alloc_pgtable_page(int node) { struct page *page; @@ -471,26 +468,6 @@ void free_pgtable_page(void *vaddr) free_page((unsigned long)vaddr); } -static inline void *alloc_domain_mem(void) -{ - return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC); -} - -static void free_domain_mem(void *vaddr) -{ - kmem_cache_free(iommu_domain_cache, vaddr); -} - -static inline void * alloc_devinfo_mem(void) -{ - return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC); -} - -static inline void free_devinfo_mem(void *vaddr) -{ - kmem_cache_free(iommu_devinfo_cache, vaddr); -} - static inline int domain_type_is_si(struct dmar_domain *domain) { return domain->domain.type == IOMMU_DOMAIN_IDENTITY; @@ -1885,11 +1862,10 @@ static struct dmar_domain *alloc_domain(unsigned int type) { struct dmar_domain *domain; - domain = alloc_domain_mem(); + domain = kzalloc(sizeof(*domain), GFP_KERNEL); if (!domain) return NULL; - memset(domain, 0, sizeof(*domain)); domain->nid = NUMA_NO_NODE; if (first_level_by_default(type)) domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL; @@ -1973,7 +1949,7 @@ static void domain_exit(struct dmar_domain *domain) put_pages_list(&freelist); } - free_domain_mem(domain); + kfree(domain); } /* @@ -2558,7 +2534,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, unsigned long flags; int ret; - info = alloc_devinfo_mem(); + info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return NULL; @@ -2574,13 +2550,9 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, info->segment = pci_domain_nr(pdev->bus); } - info->ats_supported = info->pasid_supported = info->pri_supported = 0; - info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0; - info->ats_qdep = 0; info->dev = dev; info->domain = domain; info->iommu = iommu; - info->pasid_table = NULL; if (dev && dev_is_pci(dev)) { struct pci_dev *pdev = to_pci_dev(info->dev); @@ -2610,7 +2582,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, if (ret) { spin_unlock_irqrestore(&device_domain_lock, flags); - free_devinfo_mem(info); + kfree(info); return NULL; } @@ -3343,65 +3315,6 @@ error: return ret; } -static inline int iommu_domain_cache_init(void) -{ - int ret = 0; - - iommu_domain_cache = kmem_cache_create("iommu_domain", - sizeof(struct dmar_domain), - 0, - SLAB_HWCACHE_ALIGN, - - NULL); - if (!iommu_domain_cache) { - pr_err("Couldn't create iommu_domain cache\n"); - ret = -ENOMEM; - } - - return ret; -} - -static inline int iommu_devinfo_cache_init(void) -{ - int ret = 0; - - iommu_devinfo_cache = kmem_cache_create("iommu_devinfo", - sizeof(struct device_domain_info), - 0, - SLAB_HWCACHE_ALIGN, - NULL); - if (!iommu_devinfo_cache) { - pr_err("Couldn't create devinfo cache\n"); - ret = -ENOMEM; - } - - return ret; -} - -static int __init iommu_init_mempool(void) -{ - int ret; - - ret = iommu_domain_cache_init(); - if (ret) - goto domain_error; - - ret = iommu_devinfo_cache_init(); - if (!ret) - return ret; - - kmem_cache_destroy(iommu_domain_cache); -domain_error: - - return -ENOMEM; -} - -static void __init iommu_exit_mempool(void) -{ - kmem_cache_destroy(iommu_devinfo_cache); - kmem_cache_destroy(iommu_domain_cache); -} - static void __init init_no_remapping_devices(void) { struct dmar_drhd_unit *drhd; @@ -4253,12 +4166,6 @@ int __init intel_iommu_init(void) force_on = (!intel_iommu_tboot_noforce && tboot_force_iommu()) || platform_optin_force_iommu(); - if (iommu_init_mempool()) { - if (force_on) - panic("tboot: Failed to initialize iommu memory\n"); - return -ENOMEM; - } - down_write(&dmar_global_lock); if (dmar_table_init()) { if (force_on) @@ -4379,7 +4286,6 @@ int __init intel_iommu_init(void) out_free_dmar: intel_iommu_free_dmars(); up_write(&dmar_global_lock); - iommu_exit_mempool(); return ret; } @@ -4436,7 +4342,7 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info) domain_detach_iommu(domain, iommu); spin_unlock_irqrestore(&iommu->lock, flags); - free_devinfo_mem(info); + kfree(info); } static void dmar_remove_one_dev_info(struct device *dev) -- cgit v1.2.3-59-g8ed1b From 586081d3f6b13ec9dfdfdf3d7842a688b376fa5e Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Tue, 1 Mar 2022 10:01:52 +0800 Subject: iommu/vt-d: Remove DEFER_DEVICE_DOMAIN_INFO Allocate and set the per-device iommu private data during iommu device probe. This makes the per-device iommu private data always available during iommu_probe_device() and iommu_release_device(). With this changed, the dummy DEFER_DEVICE_DOMAIN_INFO pointer could be removed. The wrappers for getting the private data and domain are also cleaned. Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20220214025704.3184654-1-baolu.lu@linux.intel.com Link: https://lore.kernel.org/r/20220301020159.633356-6-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/debugfs.c | 3 +- drivers/iommu/intel/iommu.c | 186 +++++++++++++++--------------------------- drivers/iommu/intel/pasid.c | 12 +-- drivers/iommu/intel/svm.c | 6 +- include/linux/intel-iommu.h | 2 - 5 files changed, 79 insertions(+), 130 deletions(-) diff --git a/drivers/iommu/intel/debugfs.c b/drivers/iommu/intel/debugfs.c index db7a0ca73626..ed796eea4581 100644 --- a/drivers/iommu/intel/debugfs.c +++ b/drivers/iommu/intel/debugfs.c @@ -344,7 +344,8 @@ static void pgtable_walk_level(struct seq_file *m, struct dma_pte *pde, static int show_device_domain_translation(struct device *dev, void *data) { - struct dmar_domain *domain = find_domain(dev); + struct device_domain_info *info = dev_iommu_priv_get(dev); + struct dmar_domain *domain = info->domain; struct seq_file *m = data; u64 path[6] = { 0 }; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 51db26d8606c..d9965e72d9a8 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -342,21 +342,6 @@ static int iommu_skip_te_disable; int intel_iommu_gfx_mapped; EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); -#define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2)) -struct device_domain_info *get_domain_info(struct device *dev) -{ - struct device_domain_info *info; - - if (!dev) - return NULL; - - info = dev_iommu_priv_get(dev); - if (unlikely(info == DEFER_DEVICE_DOMAIN_INFO)) - return NULL; - - return info; -} - DEFINE_SPINLOCK(device_domain_lock); static LIST_HEAD(device_domain_list); @@ -741,11 +726,6 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, return &context[devfn]; } -static bool attach_deferred(struct device *dev) -{ - return dev_iommu_priv_get(dev) == DEFER_DEVICE_DOMAIN_INFO; -} - /** * is_downstream_to_pci_bridge - test if a device belongs to the PCI * sub-hierarchy of a candidate PCI-PCI bridge @@ -2432,15 +2412,6 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8 __iommu_flush_dev_iotlb(info, 0, MAX_AGAW_PFN_WIDTH); } -static inline void unlink_domain_info(struct device_domain_info *info) -{ - assert_spin_locked(&device_domain_lock); - list_del(&info->link); - list_del(&info->global); - if (info->dev) - dev_iommu_priv_set(info->dev, NULL); -} - static void domain_remove_dev_info(struct dmar_domain *domain) { struct device_domain_info *info, *tmp; @@ -2452,24 +2423,6 @@ static void domain_remove_dev_info(struct dmar_domain *domain) spin_unlock_irqrestore(&device_domain_lock, flags); } -struct dmar_domain *find_domain(struct device *dev) -{ - struct device_domain_info *info; - - if (unlikely(!dev || !dev->iommu)) - return NULL; - - if (unlikely(attach_deferred(dev))) - return NULL; - - /* No lock here, assumes no domain exit in normal case */ - info = get_domain_info(dev); - if (likely(info)) - return info->domain; - - return NULL; -} - static inline struct device_domain_info * dmar_search_domain_by_dev_info(int segment, int bus, int devfn) { @@ -2530,66 +2483,20 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, struct device *dev, struct dmar_domain *domain) { - struct device_domain_info *info; + struct device_domain_info *info = dev_iommu_priv_get(dev); unsigned long flags; int ret; - info = kzalloc(sizeof(*info), GFP_KERNEL); - if (!info) - return NULL; - - if (!dev_is_real_dma_subdevice(dev)) { - info->bus = bus; - info->devfn = devfn; - info->segment = iommu->segment; - } else { - struct pci_dev *pdev = to_pci_dev(dev); - - info->bus = pdev->bus->number; - info->devfn = pdev->devfn; - info->segment = pci_domain_nr(pdev->bus); - } - - info->dev = dev; - info->domain = domain; - info->iommu = iommu; - - if (dev && dev_is_pci(dev)) { - struct pci_dev *pdev = to_pci_dev(info->dev); - - if (ecap_dev_iotlb_support(iommu->ecap) && - pci_ats_supported(pdev) && - dmar_find_matched_atsr_unit(pdev)) - info->ats_supported = 1; - - if (sm_supported(iommu)) { - if (pasid_supported(iommu)) { - int features = pci_pasid_features(pdev); - if (features >= 0) - info->pasid_supported = features | 1; - } - - if (info->ats_supported && ecap_prs(iommu->ecap) && - pci_pri_supported(pdev)) - info->pri_supported = 1; - } - } - spin_lock_irqsave(&device_domain_lock, flags); + info->domain = domain; spin_lock(&iommu->lock); ret = domain_attach_iommu(domain, iommu); spin_unlock(&iommu->lock); - if (ret) { spin_unlock_irqrestore(&device_domain_lock, flags); - kfree(info); return NULL; } - list_add(&info->link, &domain->devices); - list_add(&info->global, &device_domain_list); - if (dev) - dev_iommu_priv_set(dev, info); spin_unlock_irqrestore(&device_domain_lock, flags); /* PASID table is mandatory for a PCI device in scalable mode. */ @@ -4336,13 +4243,11 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info) intel_pasid_free_table(info->dev); } - unlink_domain_info(info); + list_del(&info->link); spin_lock_irqsave(&iommu->lock, flags); domain_detach_iommu(domain, iommu); spin_unlock_irqrestore(&iommu->lock, flags); - - kfree(info); } static void dmar_remove_one_dev_info(struct device *dev) @@ -4351,7 +4256,7 @@ static void dmar_remove_one_dev_info(struct device *dev) unsigned long flags; spin_lock_irqsave(&device_domain_lock, flags); - info = get_domain_info(dev); + info = dev_iommu_priv_get(dev); if (info) __dmar_remove_one_dev_info(info); spin_unlock_irqrestore(&device_domain_lock, flags); @@ -4475,10 +4380,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, /* normally dev is not mapped */ if (unlikely(domain_context_mapped(dev))) { - struct dmar_domain *old_domain; + struct device_domain_info *info = dev_iommu_priv_get(dev); - old_domain = find_domain(dev); - if (old_domain) + if (info->domain) dmar_remove_one_dev_info(dev); } @@ -4642,28 +4546,73 @@ static bool intel_iommu_capable(enum iommu_cap cap) static struct iommu_device *intel_iommu_probe_device(struct device *dev) { + struct pci_dev *pdev = dev_is_pci(dev) ? to_pci_dev(dev) : NULL; + struct device_domain_info *info; struct intel_iommu *iommu; + unsigned long flags; + u8 bus, devfn; - iommu = device_to_iommu(dev, NULL, NULL); + iommu = device_to_iommu(dev, &bus, &devfn); if (!iommu) return ERR_PTR(-ENODEV); - if (translation_pre_enabled(iommu)) - dev_iommu_priv_set(dev, DEFER_DEVICE_DOMAIN_INFO); + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return ERR_PTR(-ENOMEM); + + if (dev_is_real_dma_subdevice(dev)) { + info->bus = pdev->bus->number; + info->devfn = pdev->devfn; + info->segment = pci_domain_nr(pdev->bus); + } else { + info->bus = bus; + info->devfn = devfn; + info->segment = iommu->segment; + } + + info->dev = dev; + info->iommu = iommu; + if (dev_is_pci(dev)) { + if (ecap_dev_iotlb_support(iommu->ecap) && + pci_ats_supported(pdev) && + dmar_find_matched_atsr_unit(pdev)) + info->ats_supported = 1; + + if (sm_supported(iommu)) { + if (pasid_supported(iommu)) { + int features = pci_pasid_features(pdev); + + if (features >= 0) + info->pasid_supported = features | 1; + } + + if (info->ats_supported && ecap_prs(iommu->ecap) && + pci_pri_supported(pdev)) + info->pri_supported = 1; + } + } + + spin_lock_irqsave(&device_domain_lock, flags); + list_add(&info->global, &device_domain_list); + dev_iommu_priv_set(dev, info); + spin_unlock_irqrestore(&device_domain_lock, flags); return &iommu->iommu; } static void intel_iommu_release_device(struct device *dev) { - struct intel_iommu *iommu; - - iommu = device_to_iommu(dev, NULL, NULL); - if (!iommu) - return; + struct device_domain_info *info = dev_iommu_priv_get(dev); + unsigned long flags; dmar_remove_one_dev_info(dev); + spin_lock_irqsave(&device_domain_lock, flags); + dev_iommu_priv_set(dev, NULL); + list_del(&info->global); + spin_unlock_irqrestore(&device_domain_lock, flags); + + kfree(info); set_dma_ops(dev, NULL); } @@ -4732,14 +4681,14 @@ static void intel_iommu_get_resv_regions(struct device *device, int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev) { - struct device_domain_info *info; + struct device_domain_info *info = dev_iommu_priv_get(dev); struct context_entry *context; struct dmar_domain *domain; unsigned long flags; u64 ctx_lo; int ret; - domain = find_domain(dev); + domain = info->domain; if (!domain) return -EINVAL; @@ -4747,8 +4696,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev) spin_lock(&iommu->lock); ret = -EINVAL; - info = get_domain_info(dev); - if (!info || !info->pasid_supported) + if (!info->pasid_supported) goto out; context = iommu_context_addr(iommu, info->bus, info->devfn, 0); @@ -4790,7 +4738,7 @@ static struct iommu_group *intel_iommu_device_group(struct device *dev) static int intel_iommu_enable_sva(struct device *dev) { - struct device_domain_info *info = get_domain_info(dev); + struct device_domain_info *info = dev_iommu_priv_get(dev); struct intel_iommu *iommu; int ret; @@ -4819,7 +4767,7 @@ static int intel_iommu_enable_sva(struct device *dev) static int intel_iommu_disable_sva(struct device *dev) { - struct device_domain_info *info = get_domain_info(dev); + struct device_domain_info *info = dev_iommu_priv_get(dev); struct intel_iommu *iommu = info->iommu; int ret; @@ -4832,7 +4780,7 @@ static int intel_iommu_disable_sva(struct device *dev) static int intel_iommu_enable_iopf(struct device *dev) { - struct device_domain_info *info = get_domain_info(dev); + struct device_domain_info *info = dev_iommu_priv_get(dev); if (info && info->pri_supported) return 0; @@ -4872,7 +4820,9 @@ intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat) static bool intel_iommu_is_attach_deferred(struct device *dev) { - return attach_deferred(dev); + struct device_domain_info *info = dev_iommu_priv_get(dev); + + return translation_pre_enabled(info->iommu) && !info->domain; } /* diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index 10fb82ea467d..f8d215d85695 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -150,7 +150,7 @@ int intel_pasid_alloc_table(struct device *dev) int size; might_sleep(); - info = get_domain_info(dev); + info = dev_iommu_priv_get(dev); if (WARN_ON(!info || !dev_is_pci(dev) || info->pasid_table)) return -EINVAL; @@ -197,7 +197,7 @@ void intel_pasid_free_table(struct device *dev) struct pasid_entry *table; int i, max_pde; - info = get_domain_info(dev); + info = dev_iommu_priv_get(dev); if (!info || !dev_is_pci(dev) || !info->pasid_table) return; @@ -223,7 +223,7 @@ struct pasid_table *intel_pasid_get_table(struct device *dev) { struct device_domain_info *info; - info = get_domain_info(dev); + info = dev_iommu_priv_get(dev); if (!info) return NULL; @@ -234,7 +234,7 @@ static int intel_pasid_get_dev_max_id(struct device *dev) { struct device_domain_info *info; - info = get_domain_info(dev); + info = dev_iommu_priv_get(dev); if (!info || !info->pasid_table) return 0; @@ -254,7 +254,7 @@ static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid) return NULL; dir = pasid_table->table; - info = get_domain_info(dev); + info = dev_iommu_priv_get(dev); dir_index = pasid >> PASID_PDE_SHIFT; index = pasid & PASID_PTE_MASK; @@ -487,7 +487,7 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu, struct device_domain_info *info; u16 sid, qdep, pfsid; - info = get_domain_info(dev); + info = dev_iommu_priv_get(dev); if (!info || !info->ats_enabled) return; diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index d04c83dd3a58..944e2408b6d2 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -200,7 +200,7 @@ static void __flush_svm_range_dev(struct intel_svm *svm, unsigned long address, unsigned long pages, int ih) { - struct device_domain_info *info = get_domain_info(sdev->dev); + struct device_domain_info *info = dev_iommu_priv_get(sdev->dev); if (WARN_ON(!pages)) return; @@ -337,7 +337,7 @@ static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu, struct mm_struct *mm, unsigned int flags) { - struct device_domain_info *info = get_domain_info(dev); + struct device_domain_info *info = dev_iommu_priv_get(dev); unsigned long iflags, sflags; struct intel_svm_dev *sdev; struct intel_svm *svm; @@ -545,7 +545,7 @@ static void intel_svm_drain_prq(struct device *dev, u32 pasid) u16 sid, did; int qdep; - info = get_domain_info(dev); + info = dev_iommu_priv_get(dev); if (WARN_ON(!info || !dev_is_pci(dev))) return; diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 8c7591b5f3e2..03f1134fc2fe 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -733,8 +733,6 @@ int for_each_device_domain(int (*fn)(struct device_domain_info *info, void *data), void *data); void iommu_flush_write_buffer(struct intel_iommu *iommu); int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev); -struct dmar_domain *find_domain(struct device *dev); -struct device_domain_info *get_domain_info(struct device *dev); struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn); #ifdef CONFIG_INTEL_IOMMU_SVM -- cgit v1.2.3-59-g8ed1b From 763e656c693771d2190bc6c6010e71f403e47301 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Tue, 1 Mar 2022 10:01:53 +0800 Subject: iommu/vt-d: Remove unnecessary includes Remove unnecessary include files and sort the remaining alphabetically. Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20220214025704.3184654-1-baolu.lu@linux.intel.com Link: https://lore.kernel.org/r/20220301020159.633356-7-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 34 +++++++--------------------------- 1 file changed, 7 insertions(+), 27 deletions(-) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index d9965e72d9a8..b6a6fea8525d 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -13,38 +13,18 @@ #define pr_fmt(fmt) "DMAR: " fmt #define dev_fmt(fmt) pr_fmt(fmt) -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include +#include #include +#include #include #include +#include +#include +#include +#include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include "../irq_remapping.h" #include "../iommu-sva-lib.h" -- cgit v1.2.3-59-g8ed1b From 782861df7dcd67b7fed47f3b6b9bf40851760c6f Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Tue, 1 Mar 2022 10:01:54 +0800 Subject: iommu/vt-d: Remove unnecessary prototypes Some prototypes in iommu.c are unnecessary. Delete them. Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20220214025704.3184654-1-baolu.lu@linux.intel.com Link: https://lore.kernel.org/r/20220301020159.633356-8-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index b6a6fea8525d..dfd3698406fa 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -296,14 +296,9 @@ static LIST_HEAD(dmar_satc_units); /* bitmap for indexing intel_iommus */ static int g_num_of_iommus; -static void domain_exit(struct dmar_domain *domain); static void domain_remove_dev_info(struct dmar_domain *domain); static void dmar_remove_one_dev_info(struct device *dev); static void __dmar_remove_one_dev_info(struct device_domain_info *info); -static int intel_iommu_attach_device(struct iommu_domain *domain, - struct device *dev); -static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, - dma_addr_t iova); int dmar_disabled = !IS_ENABLED(CONFIG_INTEL_IOMMU_DEFAULT_ON); int intel_iommu_sm = IS_ENABLED(CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON); -- cgit v1.2.3-59-g8ed1b From 2187a57ef0c572bcca3ab2497cfcfce664ddbf70 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Tue, 1 Mar 2022 10:01:55 +0800 Subject: iommu/vt-d: Fix indentation of goto labels Remove blanks before goto labels. Signed-off-by: Lu Baolu Reviewed-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20220214025704.3184654-1-baolu.lu@linux.intel.com Link: https://lore.kernel.org/r/20220301020159.633356-9-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index dfd3698406fa..d7fac0a1761d 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -827,7 +827,7 @@ struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) } if (pdev && drhd->include_all) { - got_pdev: +got_pdev: if (bus && devfn) { *bus = pdev->bus->number; *devfn = pdev->devfn; @@ -836,7 +836,7 @@ struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) } } iommu = NULL; - out: +out: if (iommu_is_dummy(iommu, dev)) iommu = NULL; -- cgit v1.2.3-59-g8ed1b From 2852631d96a643e0b21a9b14ad38caf465d7225f Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 1 Mar 2022 10:01:56 +0800 Subject: iommu/vt-d: Move intel_iommu_ops to header file Compiler is not happy about hidden declaration of intel_iommu_ops. .../iommu.c:414:24: warning: symbol 'intel_iommu_ops' was not declared. Should it be static? Move declaration to header file to make compiler happy. Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20220207141240.8253-1-andriy.shevchenko@linux.intel.com Signed-off-by: Lu Baolu Link: https://lore.kernel.org/r/20220301020159.633356-10-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/dmar.c | 2 -- include/linux/intel-iommu.h | 2 ++ 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index 6d10be50ec30..4de960834a1b 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -66,8 +66,6 @@ static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)]; static int alloc_iommu(struct dmar_drhd_unit *drhd); static void free_iommu(struct intel_iommu *iommu); -extern const struct iommu_ops intel_iommu_ops; - static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) { /* diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 03f1134fc2fe..4909d6c9ac21 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -783,6 +783,8 @@ bool context_present(struct context_entry *context); struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, u8 devfn, int alloc); +extern const struct iommu_ops intel_iommu_ops; + #ifdef CONFIG_INTEL_IOMMU extern int iommu_calculate_agaw(struct intel_iommu *iommu); extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); -- cgit v1.2.3-59-g8ed1b From 45967ffb9e50aa3472cc6c69a769ef0f09cced5d Mon Sep 17 00:00:00 2001 From: Marco Bonelli Date: Tue, 1 Mar 2022 10:01:57 +0800 Subject: iommu/vt-d: Add missing "__init" for rmrr_sanity_check() rmrr_sanity_check() calls arch_rmrr_sanity_check(), which is marked as "__init". Add the annotation for rmrr_sanity_check() too. This function is currently only called from dmar_parse_one_rmrr() which is also "__init". Signed-off-by: Marco Bonelli Link: https://lore.kernel.org/r/20220210023141.9208-1-marco@mebeim.net Signed-off-by: Lu Baolu Link: https://lore.kernel.org/r/20220301020159.633356-11-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index d7fac0a1761d..2aa7afd90c3e 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -3364,7 +3364,7 @@ static void __init init_iommu_pm_ops(void) static inline void init_iommu_pm_ops(void) {} #endif /* CONFIG_PM */ -static int rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr) +static int __init rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr) { if (!IS_ALIGNED(rmrr->base_address, PAGE_SIZE) || !IS_ALIGNED(rmrr->end_address + 1, PAGE_SIZE) || -- cgit v1.2.3-59-g8ed1b From b897a1b7ad3f239fa19ee40aec429e96f5162669 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Tue, 1 Mar 2022 10:01:58 +0800 Subject: iommu/vt-d: Remove unused function intel_svm_capable() This is unused since commit 4048377414162 ("iommu/vt-d: Use iommu_sva_alloc(free)_pasid() helpers"). Signed-off-by: YueHaibing Link: https://lore.kernel.org/r/20220216113851.25004-1-yuehaibing@huawei.com Signed-off-by: Lu Baolu Link: https://lore.kernel.org/r/20220301020159.633356-12-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/svm.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index 944e2408b6d2..241d095b6dcf 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -168,11 +168,6 @@ int intel_svm_finish_prq(struct intel_iommu *iommu) return 0; } -static inline bool intel_svm_capable(struct intel_iommu *iommu) -{ - return iommu->flags & VTD_FLAG_SVM_CAPABLE; -} - void intel_svm_check(struct intel_iommu *iommu) { if (!pasid_supported(iommu)) -- cgit v1.2.3-59-g8ed1b From 97f2f2c5317f55ae3440733a090a96a251da222b Mon Sep 17 00:00:00 2001 From: Yian Chen Date: Tue, 1 Mar 2022 10:01:59 +0800 Subject: iommu/vt-d: Enable ATS for the devices in SATC table Starting from Intel VT-d v3.2, Intel platform BIOS can provide additional SATC table structure. SATC table includes a list of SoC integrated devices that support ATC (Address translation cache). Enabling ATC (via ATS capability) can be a functional requirement for SATC device operation or optional to enhance device performance/functionality. This is determined by the bit of ATC_REQUIRED in SATC table. When IOMMU is working in scalable mode, software chooses to always enable ATS for every device in SATC table because Intel SoC devices in SATC table are trusted to use ATS. On the other hand, if IOMMU is in legacy mode, ATS of SATC capable devices can work transparently to software and be automatically enabled by IOMMU hardware. As the result, there is no need for software to enable ATS on these devices. This also removes dmar_find_matched_atsr_unit() helper as it becomes dead code now. Signed-off-by: Yian Chen Link: https://lore.kernel.org/r/20220222185416.1722611-1-yian.chen@intel.com Signed-off-by: Lu Baolu Link: https://lore.kernel.org/r/20220301020159.633356-13-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 40 ++++++++++++++++++++++++++++++++++++++-- include/linux/intel-iommu.h | 1 - 2 files changed, 38 insertions(+), 3 deletions(-) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 2aa7afd90c3e..8662f5c10721 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -3693,7 +3693,31 @@ static void intel_iommu_free_dmars(void) } } -int dmar_find_matched_atsr_unit(struct pci_dev *dev) +static struct dmar_satc_unit *dmar_find_matched_satc_unit(struct pci_dev *dev) +{ + struct dmar_satc_unit *satcu; + struct acpi_dmar_satc *satc; + struct device *tmp; + int i; + + dev = pci_physfn(dev); + rcu_read_lock(); + + list_for_each_entry_rcu(satcu, &dmar_satc_units, list) { + satc = container_of(satcu->hdr, struct acpi_dmar_satc, header); + if (satc->segment != pci_domain_nr(dev->bus)) + continue; + for_each_dev_scope(satcu->devices, satcu->devices_cnt, i, tmp) + if (to_pci_dev(tmp) == dev) + goto out; + } + satcu = NULL; +out: + rcu_read_unlock(); + return satcu; +} + +static int dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu) { int i, ret = 1; struct pci_bus *bus; @@ -3701,8 +3725,20 @@ int dmar_find_matched_atsr_unit(struct pci_dev *dev) struct device *tmp; struct acpi_dmar_atsr *atsr; struct dmar_atsr_unit *atsru; + struct dmar_satc_unit *satcu; dev = pci_physfn(dev); + satcu = dmar_find_matched_satc_unit(dev); + if (satcu) + /* + * This device supports ATS as it is in SATC table. + * When IOMMU is in legacy mode, enabling ATS is done + * automatically by HW for the device that requires + * ATS, hence OS should not enable this device ATS + * to avoid duplicated TLB invalidation. + */ + return !(satcu->atc_required && !sm_supported(iommu)); + for (bus = dev->bus; bus; bus = bus->parent) { bridge = bus->self; /* If it's an integrated device, allow ATS */ @@ -4550,7 +4586,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev) if (dev_is_pci(dev)) { if (ecap_dev_iotlb_support(iommu->ecap) && pci_ats_supported(pdev) && - dmar_find_matched_atsr_unit(pdev)) + dmar_ats_supported(pdev, iommu)) info->ats_supported = 1; if (sm_supported(iommu)) { diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 4909d6c9ac21..2f9891cb3d00 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -693,7 +693,6 @@ static inline int nr_pte_to_next_page(struct dma_pte *pte) } extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev); -extern int dmar_find_matched_atsr_unit(struct pci_dev *dev); extern int dmar_enable_qi(struct intel_iommu *iommu); extern void dmar_disable_qi(struct intel_iommu *iommu); -- cgit v1.2.3-59-g8ed1b From 97dfad194ca8de04c7292d4f4c8dc493c0d20f85 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 17 Feb 2022 14:24:15 +0000 Subject: iommu/arm-smmu: Account for PMU interrupts In preparation for SMMUv2 PMU support, rejig our IRQ setup code to account for PMU interrupts as additional resources. We can simplify the whole flow by only storing the context IRQs, since the global IRQs are devres-managed and we never refer to them beyond the initial request. CC: Lad Prabhakar Signed-off-by: Robin Murphy Link: https://lore.kernel.org/r/b2a40caaf1622eb35c555074a0d72f4f0513cff9.1645106346.git.robin.murphy@arm.com Signed-off-by: Will Deacon --- drivers/iommu/arm/arm-smmu/arm-smmu.c | 93 +++++++++++++++++------------------ drivers/iommu/arm/arm-smmu/arm-smmu.h | 5 +- 2 files changed, 46 insertions(+), 52 deletions(-) diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index 4bc75c4ce402..e50fcf37af58 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -807,7 +807,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, * Request context fault interrupt. Do this last to avoid the * handler seeing a half-initialised domain state. */ - irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; + irq = smmu->irqs[cfg->irptndx]; if (smmu->impl && smmu->impl->context_fault) context_fault = smmu->impl->context_fault; @@ -858,7 +858,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) arm_smmu_write_context_bank(smmu, cfg->cbndx); if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) { - irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; + irq = smmu->irqs[cfg->irptndx]; devm_free_irq(smmu->dev, irq, domain); } @@ -1951,8 +1951,8 @@ static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu) return ret; } -static int arm_smmu_device_acpi_probe(struct platform_device *pdev, - struct arm_smmu_device *smmu) +static int arm_smmu_device_acpi_probe(struct arm_smmu_device *smmu, + u32 *global_irqs, u32 *pmu_irqs) { struct device *dev = smmu->dev; struct acpi_iort_node *node = @@ -1968,7 +1968,8 @@ static int arm_smmu_device_acpi_probe(struct platform_device *pdev, return ret; /* Ignore the configuration access interrupt */ - smmu->num_global_irqs = 1; + *global_irqs = 1; + *pmu_irqs = 0; if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; @@ -1976,25 +1977,24 @@ static int arm_smmu_device_acpi_probe(struct platform_device *pdev, return 0; } #else -static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev, - struct arm_smmu_device *smmu) +static inline int arm_smmu_device_acpi_probe(struct arm_smmu_device *smmu, + u32 *global_irqs, u32 *pmu_irqs) { return -ENODEV; } #endif -static int arm_smmu_device_dt_probe(struct platform_device *pdev, - struct arm_smmu_device *smmu) +static int arm_smmu_device_dt_probe(struct arm_smmu_device *smmu, + u32 *global_irqs, u32 *pmu_irqs) { const struct arm_smmu_match_data *data; - struct device *dev = &pdev->dev; + struct device *dev = smmu->dev; bool legacy_binding; - if (of_property_read_u32(dev->of_node, "#global-interrupts", - &smmu->num_global_irqs)) { - dev_err(dev, "missing #global-interrupts property\n"); - return -ENODEV; - } + if (of_property_read_u32(dev->of_node, "#global-interrupts", global_irqs)) + return dev_err_probe(dev, -ENODEV, + "missing #global-interrupts property\n"); + *pmu_irqs = 0; data = of_device_get_match_data(dev); smmu->version = data->version; @@ -2073,6 +2073,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev) struct arm_smmu_device *smmu; struct device *dev = &pdev->dev; int num_irqs, i, err; + u32 global_irqs, pmu_irqs; irqreturn_t (*global_fault)(int irq, void *dev); smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); @@ -2083,10 +2084,9 @@ static int arm_smmu_device_probe(struct platform_device *pdev) smmu->dev = dev; if (dev->of_node) - err = arm_smmu_device_dt_probe(pdev, smmu); + err = arm_smmu_device_dt_probe(smmu, &global_irqs, &pmu_irqs); else - err = arm_smmu_device_acpi_probe(pdev, smmu); - + err = arm_smmu_device_acpi_probe(smmu, &global_irqs, &pmu_irqs); if (err) return err; @@ -2105,31 +2105,25 @@ static int arm_smmu_device_probe(struct platform_device *pdev) if (IS_ERR(smmu)) return PTR_ERR(smmu); - num_irqs = 0; - while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) { - num_irqs++; - if (num_irqs > smmu->num_global_irqs) - smmu->num_context_irqs++; - } + num_irqs = platform_irq_count(pdev); - if (!smmu->num_context_irqs) { - dev_err(dev, "found %d interrupts but expected at least %d\n", - num_irqs, smmu->num_global_irqs + 1); - return -ENODEV; - } + smmu->num_context_irqs = num_irqs - global_irqs - pmu_irqs; + if (smmu->num_context_irqs <= 0) + return dev_err_probe(dev, -ENODEV, + "found %d interrupts but expected at least %d\n", + num_irqs, global_irqs + pmu_irqs + 1); - smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs), - GFP_KERNEL); - if (!smmu->irqs) { - dev_err(dev, "failed to allocate %d irqs\n", num_irqs); - return -ENOMEM; - } + smmu->irqs = devm_kcalloc(dev, smmu->num_context_irqs, + sizeof(*smmu->irqs), GFP_KERNEL); + if (!smmu->irqs) + return dev_err_probe(dev, -ENOMEM, "failed to allocate %d irqs\n", + smmu->num_context_irqs); - for (i = 0; i < num_irqs; ++i) { - int irq = platform_get_irq(pdev, i); + for (i = 0; i < smmu->num_context_irqs; i++) { + int irq = platform_get_irq(pdev, global_irqs + pmu_irqs + i); if (irq < 0) - return -ENODEV; + return irq; smmu->irqs[i] = irq; } @@ -2165,17 +2159,18 @@ static int arm_smmu_device_probe(struct platform_device *pdev) else global_fault = arm_smmu_global_fault; - for (i = 0; i < smmu->num_global_irqs; ++i) { - err = devm_request_irq(smmu->dev, smmu->irqs[i], - global_fault, - IRQF_SHARED, - "arm-smmu global fault", - smmu); - if (err) { - dev_err(dev, "failed to request global IRQ %d (%u)\n", - i, smmu->irqs[i]); - return err; - } + for (i = 0; i < global_irqs; i++) { + int irq = platform_get_irq(pdev, i); + + if (irq < 0) + return irq; + + err = devm_request_irq(dev, irq, global_fault, IRQF_SHARED, + "arm-smmu global fault", smmu); + if (err) + return dev_err_probe(dev, err, + "failed to request global IRQ %d (%u)\n", + i, irq); } err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL, diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h index 432de2f742c3..2b9b42fb6f30 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.h +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h @@ -318,11 +318,10 @@ struct arm_smmu_device { unsigned long pa_size; unsigned long pgsize_bitmap; - u32 num_global_irqs; - u32 num_context_irqs; + int num_context_irqs; + int num_clks; unsigned int *irqs; struct clk_bulk_data *clks; - int num_clks; spinlock_t global_sync_lock; -- cgit v1.2.3-59-g8ed1b From 434d2defa93b59b8e845b49bd6882cefd009f516 Mon Sep 17 00:00:00 2001 From: Vasant Hegde Date: Tue, 1 Mar 2022 14:26:23 +0530 Subject: iommu/amd: Call memunmap in error path Unmap old_devtb in error path. Signed-off-by: Vasant Hegde Link: https://lore.kernel.org/r/20220301085626.87680-3-vasant.hegde@amd.com Signed-off-by: Joerg Roedel --- drivers/iommu/amd/init.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 6b5af568f3d5..f7e7d208063c 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -980,6 +980,7 @@ static bool copy_device_table(void) get_order(dev_table_size)); if (old_dev_tbl_cpy == NULL) { pr_err("Failed to allocate memory for copying old device table!\n"); + memunmap(old_devtb); return false; } @@ -1010,6 +1011,7 @@ static bool copy_device_table(void) if ((int_ctl != DTE_IRQ_REMAP_INTCTL) || (int_tab_len != DTE_INTTABLEN)) { pr_err("Wrong old irq remapping flag: %#x\n", devid); + memunmap(old_devtb); return false; } -- cgit v1.2.3-59-g8ed1b From 3bf01426a574b1fe0ec21aabb7148aff777e623f Mon Sep 17 00:00:00 2001 From: Vasant Hegde Date: Tue, 1 Mar 2022 14:26:24 +0530 Subject: iommu/amd: Clean up function declarations Remove unused declarations and add static keyword as needed. Signed-off-by: Suravee Suthikulpanit Signed-off-by: Vasant Hegde Link: https://lore.kernel.org/r/20220301085626.87680-4-vasant.hegde@amd.com Signed-off-by: Joerg Roedel --- drivers/iommu/amd/amd_iommu.h | 4 ---- drivers/iommu/amd/init.c | 2 +- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index 416815a525d6..94d33626d692 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -14,10 +14,6 @@ extern irqreturn_t amd_iommu_int_thread(int irq, void *data); extern irqreturn_t amd_iommu_int_handler(int irq, void *data); extern void amd_iommu_apply_erratum_63(u16 devid); -extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); -extern int amd_iommu_init_devices(void); -extern void amd_iommu_uninit_devices(void); -extern void amd_iommu_init_notifier(void); extern int amd_iommu_init_api(void); #ifdef CONFIG_AMD_IOMMU_DEBUGFS diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index f7e7d208063c..07d6dd302989 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -661,7 +661,7 @@ static int __init alloc_command_buffer(struct amd_iommu *iommu) * This function resets the command buffer if the IOMMU stopped fetching * commands from it. */ -void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) +static void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) { iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); -- cgit v1.2.3-59-g8ed1b From c1d5b57a1ebb97beb34ab8fe6347fb2ba65674b8 Mon Sep 17 00:00:00 2001 From: Vasant Hegde Date: Tue, 1 Mar 2022 14:26:25 +0530 Subject: iommu/amd: Remove unused struct fault.devid This variable has not been used since it was introduced. Signed-off-by: Suravee Suthikulpanit Signed-off-by: Vasant Hegde Link: https://lore.kernel.org/r/20220301085626.87680-5-vasant.hegde@amd.com Signed-off-by: Joerg Roedel --- drivers/iommu/amd/iommu_v2.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c index 58da08cc3d01..2daf37c21b85 100644 --- a/drivers/iommu/amd/iommu_v2.c +++ b/drivers/iommu/amd/iommu_v2.c @@ -71,7 +71,6 @@ struct fault { struct pasid_state *state; struct mm_struct *mm; u64 address; - u16 devid; u32 pasid; u16 tag; u16 finish; -- cgit v1.2.3-59-g8ed1b From 9f968fc70d85558b37cd721a4c08143f69ff9990 Mon Sep 17 00:00:00 2001 From: Suravee Suthikulpanit Date: Tue, 1 Mar 2022 14:26:26 +0530 Subject: iommu/amd: Improve amd_iommu_v2_exit() During module exit, the current logic loops through all possible 16-bit device ID space to search for existing devices and clean up device state structures. This can be simplified by looping through the device state list. Also, refactor various clean up logic into free_device_state() for better reusability. Signed-off-by: Vasant Hegde Signed-off-by: Suravee Suthikulpanit Link: https://lore.kernel.org/r/20220301085626.87680-6-vasant.hegde@amd.com Signed-off-by: Joerg Roedel --- drivers/iommu/amd/iommu_v2.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c index 2daf37c21b85..c72969ac4956 100644 --- a/drivers/iommu/amd/iommu_v2.c +++ b/drivers/iommu/amd/iommu_v2.c @@ -24,7 +24,6 @@ MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Joerg Roedel "); -#define MAX_DEVICES 0x10000 #define PRI_QUEUE_SIZE 512 struct pri_queue { @@ -124,6 +123,15 @@ static void free_device_state(struct device_state *dev_state) { struct iommu_group *group; + /* Get rid of any remaining pasid states */ + free_pasid_states(dev_state); + + /* + * Wait until the last reference is dropped before freeing + * the device state. + */ + wait_event(dev_state->wq, !atomic_read(&dev_state->count)); + /* * First detach device from domain - No more PRI requests will arrive * from that device after it is unbound from the IOMMUv2 domain. @@ -849,15 +857,7 @@ void amd_iommu_free_device(struct pci_dev *pdev) spin_unlock_irqrestore(&state_lock, flags); - /* Get rid of any remaining pasid states */ - free_pasid_states(dev_state); - put_device_state(dev_state); - /* - * Wait until the last reference is dropped before freeing - * the device state. - */ - wait_event(dev_state->wq, !atomic_read(&dev_state->count)); free_device_state(dev_state); } EXPORT_SYMBOL(amd_iommu_free_device); @@ -954,8 +954,8 @@ out: static void __exit amd_iommu_v2_exit(void) { - struct device_state *dev_state; - int i; + struct device_state *dev_state, *next; + unsigned long flags; if (!amd_iommu_v2_supported()) return; @@ -968,18 +968,18 @@ static void __exit amd_iommu_v2_exit(void) * The loop below might call flush_workqueue(), so call * destroy_workqueue() after it */ - for (i = 0; i < MAX_DEVICES; ++i) { - dev_state = get_device_state(i); - - if (dev_state == NULL) - continue; + spin_lock_irqsave(&state_lock, flags); + list_for_each_entry_safe(dev_state, next, &state_list, list) { WARN_ON_ONCE(1); put_device_state(dev_state); - amd_iommu_free_device(dev_state->pdev); + list_del(&dev_state->list); + free_device_state(dev_state); } + spin_unlock_irqrestore(&state_lock, flags); + destroy_workqueue(iommu_wq); } -- cgit v1.2.3-59-g8ed1b