From 80eaa9f558134a31c10dddb156d347b9c983290e Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 27 May 2019 13:52:48 +0200 Subject: iommu/ipmmu-vmsa: Link IOMMUs and devices in sysfs As of commit 7af9a5fdb9e0ca33 ("iommu/ipmmu-vmsa: Use iommu_device_sysfs_add()/remove()"), IOMMU devices show up under /sys/class/iommu/, but their "devices" subdirectories are empty. Likewise, devices tied to an IOMMU do not have an "iommu" backlink. Make sure all links are created, on both arm32 and arm64. Signed-off-by: Geert Uytterhoeven Reviewed-by: Laurent Pinchart Reviewed-by: Yoshihiro Shimoda Tested-by: Yoshihiro Shimoda Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 9a380c10655e..9f2b781e20a0 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -885,27 +885,37 @@ error: static int ipmmu_add_device(struct device *dev) { + struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); struct iommu_group *group; + int ret; /* * Only let through devices that have been verified in xlate() */ - if (!to_ipmmu(dev)) + if (!mmu) return -ENODEV; - if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)) - return ipmmu_init_arm_mapping(dev); + if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)) { + ret = ipmmu_init_arm_mapping(dev); + if (ret) + return ret; + } else { + group = iommu_group_get_for_dev(dev); + if (IS_ERR(group)) + return PTR_ERR(group); - group = iommu_group_get_for_dev(dev); - if (IS_ERR(group)) - return PTR_ERR(group); + iommu_group_put(group); + } - iommu_group_put(group); + iommu_device_link(&mmu->iommu, dev); return 0; } static void ipmmu_remove_device(struct device *dev) { + struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); + + iommu_device_unlink(&mmu->iommu, dev); arm_iommu_detach_device(dev); iommu_group_remove_device(dev); } -- cgit v1.2.3-59-g8ed1b From 82576aa8af49a1aa31909a8500b972084582a118 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 27 May 2019 13:52:49 +0200 Subject: iommu/ipmmu-vmsa: Prepare to handle 40-bit error addresses On R-Car Gen3, the faulting virtual address is a 40-bit address, and comprised of two registers. Read the upper address part, and combine both parts, when running on a 64-bit system. Signed-off-by: Geert Uytterhoeven Reviewed-by: Simon Horman Reviewed-by: Yoshihiro Shimoda Tested-by: Yoshihiro Shimoda Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 9f2b781e20a0..f2061bd1dc7b 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -186,7 +186,8 @@ static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev) #define IMMAIR_ATTR_IDX_WBRWA 1 #define IMMAIR_ATTR_IDX_DEV 2 -#define IMEAR 0x0030 +#define IMELAR 0x0030 /* IMEAR on R-Car Gen2 */ +#define IMEUAR 0x0034 /* R-Car Gen3 only */ #define IMPCTR 0x0200 #define IMPSTR 0x0208 @@ -522,14 +523,16 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) { const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF; struct ipmmu_vmsa_device *mmu = domain->mmu; + unsigned long iova; u32 status; - u32 iova; status = ipmmu_ctx_read_root(domain, IMSTR); if (!(status & err_mask)) return IRQ_NONE; - iova = ipmmu_ctx_read_root(domain, IMEAR); + iova = ipmmu_ctx_read_root(domain, IMELAR); + if (IS_ENABLED(CONFIG_64BIT)) + iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32; /* * Clear the error status flags. Unlike traditional interrupt flag @@ -541,10 +544,10 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) /* Log fatal errors. */ if (status & IMSTR_MHIT) - dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%08x\n", + dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%lx\n", iova); if (status & IMSTR_ABORT) - dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%08x\n", + dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%lx\n", iova); if (!(status & (IMSTR_PF | IMSTR_TF))) @@ -560,7 +563,7 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) return IRQ_HANDLED; dev_err_ratelimited(mmu->dev, - "Unhandled fault: status 0x%08x iova 0x%08x\n", + "Unhandled fault: status 0x%08x iova 0x%lx\n", status, iova); return IRQ_HANDLED; -- cgit v1.2.3-59-g8ed1b From b43e0d8a458cf267c98f914a4e0a4cafe2e8fd76 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 27 May 2019 13:52:50 +0200 Subject: iommu/ipmmu-vmsa: Make IPMMU_CTX_MAX unsigned Make the IPMMU_CTX_MAX constant unsigned, to match the type of ipmmu_features.number_of_contexts. This allows to use plain min() instead of type-casting min_t(). Signed-off-by: Geert Uytterhoeven Reviewed-by: Laurent Pinchart Reviewed-by: Simon Horman Reviewed-by: Yoshihiro Shimoda Tested-by: Yoshihiro Shimoda Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index f2061bd1dc7b..87acf86f295f 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -36,7 +36,7 @@ #define arm_iommu_detach_device(...) do {} while (0) #endif -#define IPMMU_CTX_MAX 8 +#define IPMMU_CTX_MAX 8U struct ipmmu_features { bool use_ns_alias_offset; @@ -1060,8 +1060,7 @@ static int ipmmu_probe(struct platform_device *pdev) if (mmu->features->use_ns_alias_offset) mmu->base += IM_NS_ALIAS_OFFSET; - mmu->num_ctx = min_t(unsigned int, IPMMU_CTX_MAX, - mmu->features->number_of_contexts); + mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts); irq = platform_get_irq(pdev, 0); -- cgit v1.2.3-59-g8ed1b From b7f3f047aea47d82aeb251fb38a694e6d890d139 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 27 May 2019 13:52:51 +0200 Subject: iommu/ipmmu-vmsa: Move num_utlbs to SoC-specific features The maximum number of micro-TLBs per IPMMU instance is not fixed, but depends on the SoC type. Hence move it from struct ipmmu_vmsa_device to struct ipmmu_features, and set up the correct value for both R-Car Gen2 and Gen3 SoCs. Note that currently no code uses this value. Signed-off-by: Geert Uytterhoeven Reviewed-by: Laurent Pinchart Reviewed-by: Simon Horman Reviewed-by: Yoshihiro Shimoda Tested-by: Yoshihiro Shimoda Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 87acf86f295f..3fa57627b1e3 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -42,6 +42,7 @@ struct ipmmu_features { bool use_ns_alias_offset; bool has_cache_leaf_nodes; unsigned int number_of_contexts; + unsigned int num_utlbs; bool setup_imbuscr; bool twobit_imttbcr_sl0; bool reserved_context; @@ -53,7 +54,6 @@ struct ipmmu_vmsa_device { struct iommu_device iommu; struct ipmmu_vmsa_device *root; const struct ipmmu_features *features; - unsigned int num_utlbs; unsigned int num_ctx; spinlock_t lock; /* Protects ctx and domains[] */ DECLARE_BITMAP(ctx, IPMMU_CTX_MAX); @@ -972,6 +972,7 @@ static const struct ipmmu_features ipmmu_features_default = { .use_ns_alias_offset = true, .has_cache_leaf_nodes = false, .number_of_contexts = 1, /* software only tested with one context */ + .num_utlbs = 32, .setup_imbuscr = true, .twobit_imttbcr_sl0 = false, .reserved_context = false, @@ -981,6 +982,7 @@ static const struct ipmmu_features ipmmu_features_rcar_gen3 = { .use_ns_alias_offset = false, .has_cache_leaf_nodes = true, .number_of_contexts = 8, + .num_utlbs = 48, .setup_imbuscr = false, .twobit_imttbcr_sl0 = true, .reserved_context = true, @@ -1033,7 +1035,6 @@ static int ipmmu_probe(struct platform_device *pdev) } mmu->dev = &pdev->dev; - mmu->num_utlbs = 48; spin_lock_init(&mmu->lock); bitmap_zero(mmu->ctx, IPMMU_CTX_MAX); mmu->features = of_device_get_match_data(&pdev->dev); -- cgit v1.2.3-59-g8ed1b From 892db541cc68eb39d7813af34f33ce74a0014a1d Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 27 May 2019 13:52:52 +0200 Subject: iommu/ipmmu-vmsa: Extract hardware context initialization ipmmu_domain_init_context() takes care of (1) initializing the software domain, and (2) initializing the hardware context for the domain. Extract the code to initialize the hardware context into a new subroutine ipmmu_domain_setup_context(), to prepare for later reuse. Signed-off-by: Geert Uytterhoeven Reviewed-by: Simon Horman Reviewed-by: Yoshihiro Shimoda Tested-by: Yoshihiro Shimoda Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 91 ++++++++++++++++++++++++---------------------- 1 file changed, 48 insertions(+), 43 deletions(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 3fa57627b1e3..56e84bcc9532 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -404,52 +404,10 @@ static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu, spin_unlock_irqrestore(&mmu->lock, flags); } -static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) +static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain) { u64 ttbr; u32 tmp; - int ret; - - /* - * Allocate the page table operations. - * - * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory - * access, Long-descriptor format" that the NStable bit being set in a - * table descriptor will result in the NStable and NS bits of all child - * entries being ignored and considered as being set. The IPMMU seems - * not to comply with this, as it generates a secure access page fault - * if any of the NStable and NS bits isn't set when running in - * non-secure mode. - */ - domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS; - domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K; - domain->cfg.ias = 32; - domain->cfg.oas = 40; - domain->cfg.tlb = &ipmmu_gather_ops; - domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32); - domain->io_domain.geometry.force_aperture = true; - /* - * TODO: Add support for coherent walk through CCI with DVM and remove - * cache handling. For now, delegate it to the io-pgtable code. - */ - domain->cfg.iommu_dev = domain->mmu->root->dev; - - /* - * Find an unused context. - */ - ret = ipmmu_domain_allocate_context(domain->mmu->root, domain); - if (ret < 0) - return ret; - - domain->context_id = ret; - - domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg, - domain); - if (!domain->iop) { - ipmmu_domain_free_context(domain->mmu->root, - domain->context_id); - return -EINVAL; - } /* TTBR0 */ ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0]; @@ -495,7 +453,54 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) */ ipmmu_ctx_write_all(domain, IMCTR, IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN); +} + +static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) +{ + int ret; + + /* + * Allocate the page table operations. + * + * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory + * access, Long-descriptor format" that the NStable bit being set in a + * table descriptor will result in the NStable and NS bits of all child + * entries being ignored and considered as being set. The IPMMU seems + * not to comply with this, as it generates a secure access page fault + * if any of the NStable and NS bits isn't set when running in + * non-secure mode. + */ + domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS; + domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K; + domain->cfg.ias = 32; + domain->cfg.oas = 40; + domain->cfg.tlb = &ipmmu_gather_ops; + domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32); + domain->io_domain.geometry.force_aperture = true; + /* + * TODO: Add support for coherent walk through CCI with DVM and remove + * cache handling. For now, delegate it to the io-pgtable code. + */ + domain->cfg.iommu_dev = domain->mmu->root->dev; + + /* + * Find an unused context. + */ + ret = ipmmu_domain_allocate_context(domain->mmu->root, domain); + if (ret < 0) + return ret; + + domain->context_id = ret; + + domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg, + domain); + if (!domain->iop) { + ipmmu_domain_free_context(domain->mmu->root, + domain->context_id); + return -EINVAL; + } + ipmmu_domain_setup_context(domain); return 0; } -- cgit v1.2.3-59-g8ed1b From da38e9ec9c2d0ebec1499b5961baffd08f8ca062 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 27 May 2019 13:52:53 +0200 Subject: iommu/ipmmu-vmsa: Add suspend/resume support During PSCI system suspend, R-Car Gen3 SoCs are powered down, and all IPMMU state is lost. Hence after s2ram, devices wired behind an IPMMU, and configured to use it, will see their DMA operations hang. To fix this, restore all IPMMU contexts, and re-enable all active micro-TLBs during system resume. Signed-off-by: Geert Uytterhoeven Reviewed-by: Yoshihiro Shimoda Tested-by: Yoshihiro Shimoda Signed-off-by: Joerg Roedel --- drivers/iommu/ipmmu-vmsa.c | 47 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 46 insertions(+), 1 deletion(-) (limited to 'drivers/iommu') diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 56e84bcc9532..408ad0b25919 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -36,7 +36,10 @@ #define arm_iommu_detach_device(...) do {} while (0) #endif -#define IPMMU_CTX_MAX 8U +#define IPMMU_CTX_MAX 8U +#define IPMMU_CTX_INVALID -1 + +#define IPMMU_UTLB_MAX 48U struct ipmmu_features { bool use_ns_alias_offset; @@ -58,6 +61,7 @@ struct ipmmu_vmsa_device { spinlock_t lock; /* Protects ctx and domains[] */ DECLARE_BITMAP(ctx, IPMMU_CTX_MAX); struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX]; + s8 utlb_ctx[IPMMU_UTLB_MAX]; struct iommu_group *group; struct dma_iommu_mapping *mapping; @@ -335,6 +339,7 @@ static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain, ipmmu_write(mmu, IMUCTR(utlb), IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH | IMUCTR_MMUEN); + mmu->utlb_ctx[utlb] = domain->context_id; } /* @@ -346,6 +351,7 @@ static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain, struct ipmmu_vmsa_device *mmu = domain->mmu; ipmmu_write(mmu, IMUCTR(utlb), 0); + mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID; } static void ipmmu_tlb_flush_all(void *cookie) @@ -1043,6 +1049,7 @@ static int ipmmu_probe(struct platform_device *pdev) spin_lock_init(&mmu->lock); bitmap_zero(mmu->ctx, IPMMU_CTX_MAX); mmu->features = of_device_get_match_data(&pdev->dev); + memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs); dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); /* Map I/O memory and request IRQ. */ @@ -1158,10 +1165,48 @@ static int ipmmu_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP +static int ipmmu_resume_noirq(struct device *dev) +{ + struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev); + unsigned int i; + + /* Reset root MMU and restore contexts */ + if (ipmmu_is_root(mmu)) { + ipmmu_device_reset(mmu); + + for (i = 0; i < mmu->num_ctx; i++) { + if (!mmu->domains[i]) + continue; + + ipmmu_domain_setup_context(mmu->domains[i]); + } + } + + /* Re-enable active micro-TLBs */ + for (i = 0; i < mmu->features->num_utlbs; i++) { + if (mmu->utlb_ctx[i] == IPMMU_CTX_INVALID) + continue; + + ipmmu_utlb_enable(mmu->root->domains[mmu->utlb_ctx[i]], i); + } + + return 0; +} + +static const struct dev_pm_ops ipmmu_pm = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq) +}; +#define DEV_PM_OPS &ipmmu_pm +#else +#define DEV_PM_OPS NULL +#endif /* CONFIG_PM_SLEEP */ + static struct platform_driver ipmmu_driver = { .driver = { .name = "ipmmu-vmsa", .of_match_table = of_match_ptr(ipmmu_of_ids), + .pm = DEV_PM_OPS, }, .probe = ipmmu_probe, .remove = ipmmu_remove, -- cgit v1.2.3-59-g8ed1b