aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorThierry Reding <treding@nvidia.com>2015-03-27 11:07:27 +0100
committerJoerg Roedel <jroedel@suse.de>2015-03-31 16:35:35 +0200
commit804cb54cbb3ddac58218e830d64816617bdb6da8 (patch)
tree2dcd8489f27b51dcdf99fe43267262ae80aced1a /drivers/iommu
parentiommu/tegra: gart: Set aperture at domain initialization time (diff)
downloadlinux-dev-804cb54cbb3ddac58218e830d64816617bdb6da8.tar.xz
linux-dev-804cb54cbb3ddac58218e830d64816617bdb6da8.zip
iommu/tegra: smmu: Compute PFN mask at runtime
The SMMU on Tegra30 and Tegra114 supports addressing up to 4 GiB of physical memory. On Tegra124 the addressable physical memory was extended to 16 GiB. The page frame number stored in PTEs therefore requires 20 or 22 bits, depending on SoC generation. In order to cope with this, compute the proper value at runtime. Reported-by: Joseph Lo <josephl@nvidia.com> Cc: Hiroshi Doyu <hdoyu@nvidia.com> Signed-off-by: Thierry Reding <treding@nvidia.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/tegra-smmu.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index d8418b54619e..76887a73b47a 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -6,6 +6,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/iommu.h>
#include <linux/kernel.h>
@@ -24,6 +25,8 @@ struct tegra_smmu {
struct tegra_mc *mc;
const struct tegra_smmu_soc *soc;
+ unsigned long pfn_mask;
+
unsigned long *asids;
struct mutex lock;
@@ -105,8 +108,6 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
#define SMMU_PDE_SHIFT 22
#define SMMU_PTE_SHIFT 12
-#define SMMU_PFN_MASK 0x000fffff
-
#define SMMU_PD_READABLE (1 << 31)
#define SMMU_PD_WRITABLE (1 << 30)
#define SMMU_PD_NONSECURE (1 << 29)
@@ -486,7 +487,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
smmu_flush_tlb_section(smmu, as->id, iova);
smmu_flush(smmu);
} else {
- page = pfn_to_page(pd[pde] & SMMU_PFN_MASK);
+ page = pfn_to_page(pd[pde] & smmu->pfn_mask);
pt = page_address(page);
}
@@ -508,7 +509,7 @@ static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova)
u32 *pd = page_address(as->pd), *pt;
struct page *page;
- page = pfn_to_page(pd[pde] & SMMU_PFN_MASK);
+ page = pfn_to_page(pd[pde] & as->smmu->pfn_mask);
pt = page_address(page);
/*
@@ -583,7 +584,7 @@ static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
u32 *pte;
pte = as_get_pte(as, iova, &page);
- pfn = *pte & SMMU_PFN_MASK;
+ pfn = *pte & as->smmu->pfn_mask;
return PFN_PHYS(pfn);
}
@@ -707,6 +708,10 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
smmu->dev = dev;
smmu->mc = mc;
+ smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
+ dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
+ mc->soc->num_address_bits, smmu->pfn_mask);
+
value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
if (soc->supports_request_limit)