aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd_iommu_v2.c2
-rw-r--r--drivers/iommu/arm-smmu-v3.c127
-rw-r--r--drivers/iommu/arm-smmu.c376
-rw-r--r--drivers/iommu/dma-iommu.c290
-rw-r--r--drivers/iommu/dmar.c35
-rw-r--r--drivers/iommu/exynos-iommu.c32
-rw-r--r--drivers/iommu/fsl_pamu.h1
-rw-r--r--drivers/iommu/intel-iommu.c41
-rw-r--r--drivers/iommu/intel_irq_remapping.c15
-rw-r--r--drivers/iommu/io-pgtable-arm.c2
-rw-r--r--drivers/iommu/iommu.c78
-rw-r--r--drivers/iommu/iova.c89
-rw-r--r--drivers/iommu/mtk_iommu_v1.c26
-rw-r--r--drivers/iommu/of_iommu.c126
-rw-r--r--drivers/iommu/omap-iommu.c190
-rw-r--r--drivers/iommu/omap-iommu.h34
-rw-r--r--drivers/iommu/rockchip-iommu.c31
-rw-r--r--drivers/iommu/tegra-smmu.c1
18 files changed, 959 insertions, 537 deletions
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 063343909b0d..6629c472eafd 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -696,9 +696,9 @@ out_clear_state:
out_unregister:
mmu_notifier_unregister(&pasid_state->mn, mm);
+ mmput(mm);
out_free:
- mmput(mm);
free_pasid_state(pasid_state);
out:
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 591bb96047c9..380969aa60d5 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -554,9 +554,14 @@ struct arm_smmu_s2_cfg {
};
struct arm_smmu_strtab_ent {
- bool valid;
-
- bool bypass; /* Overrides s1/s2 config */
+ /*
+ * An STE is "assigned" if the master emitting the corresponding SID
+ * is attached to a domain. The behaviour of an unassigned STE is
+ * determined by the disable_bypass parameter, whereas an assigned
+ * STE behaves according to s1_cfg/s2_cfg, which themselves are
+ * configured according to the domain type.
+ */
+ bool assigned;
struct arm_smmu_s1_cfg *s1_cfg;
struct arm_smmu_s2_cfg *s2_cfg;
};
@@ -632,6 +637,7 @@ enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_S1 = 0,
ARM_SMMU_DOMAIN_S2,
ARM_SMMU_DOMAIN_NESTED,
+ ARM_SMMU_DOMAIN_BYPASS,
};
struct arm_smmu_domain {
@@ -1005,9 +1011,9 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
* This is hideously complicated, but we only really care about
* three cases at the moment:
*
- * 1. Invalid (all zero) -> bypass (init)
- * 2. Bypass -> translation (attach)
- * 3. Translation -> bypass (detach)
+ * 1. Invalid (all zero) -> bypass/fault (init)
+ * 2. Bypass/fault -> translation/bypass (attach)
+ * 3. Translation/bypass -> bypass/fault (detach)
*
* Given that we can't update the STE atomically and the SMMU
* doesn't read the thing in a defined order, that leaves us
@@ -1046,11 +1052,15 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
}
/* Nuke the existing STE_0 value, as we're going to rewrite it */
- val = ste->valid ? STRTAB_STE_0_V : 0;
+ val = STRTAB_STE_0_V;
+
+ /* Bypass/fault */
+ if (!ste->assigned || !(ste->s1_cfg || ste->s2_cfg)) {
+ if (!ste->assigned && disable_bypass)
+ val |= STRTAB_STE_0_CFG_ABORT;
+ else
+ val |= STRTAB_STE_0_CFG_BYPASS;
- if (ste->bypass) {
- val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
- : STRTAB_STE_0_CFG_BYPASS;
dst[0] = cpu_to_le64(val);
dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING
<< STRTAB_STE_1_SHCFG_SHIFT);
@@ -1111,10 +1121,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
{
unsigned int i;
- struct arm_smmu_strtab_ent ste = {
- .valid = true,
- .bypass = true,
- };
+ struct arm_smmu_strtab_ent ste = { .assigned = false };
for (i = 0; i < nent; ++i) {
arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste);
@@ -1378,7 +1385,9 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{
struct arm_smmu_domain *smmu_domain;
- if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
+ if (type != IOMMU_DOMAIN_UNMANAGED &&
+ type != IOMMU_DOMAIN_DMA &&
+ type != IOMMU_DOMAIN_IDENTITY)
return NULL;
/*
@@ -1509,6 +1518,11 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
+ if (domain->type == IOMMU_DOMAIN_IDENTITY) {
+ smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
+ return 0;
+ }
+
/* Restrict the stage to what we can actually support */
if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
@@ -1579,7 +1593,7 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
return step;
}
-static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
+static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
{
int i;
struct arm_smmu_master_data *master = fwspec->iommu_priv;
@@ -1591,17 +1605,14 @@ static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
}
-
- return 0;
}
static void arm_smmu_detach_dev(struct device *dev)
{
struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv;
- master->ste.bypass = true;
- if (arm_smmu_install_ste_for_dev(dev->iommu_fwspec) < 0)
- dev_warn(dev, "failed to install bypass STE\n");
+ master->ste.assigned = false;
+ arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
}
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
@@ -1620,7 +1631,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
ste = &master->ste;
/* Already attached to a different domain? */
- if (!ste->bypass)
+ if (ste->assigned)
arm_smmu_detach_dev(dev);
mutex_lock(&smmu_domain->init_mutex);
@@ -1641,10 +1652,12 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
goto out_unlock;
}
- ste->bypass = false;
- ste->valid = true;
+ ste->assigned = true;
- if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
+ if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) {
+ ste->s1_cfg = NULL;
+ ste->s2_cfg = NULL;
+ } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
ste->s1_cfg = &smmu_domain->s1_cfg;
ste->s2_cfg = NULL;
arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
@@ -1653,10 +1666,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
ste->s2_cfg = &smmu_domain->s2_cfg;
}
- ret = arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
- if (ret < 0)
- ste->valid = false;
-
+ arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
out_unlock:
mutex_unlock(&smmu_domain->init_mutex);
return ret;
@@ -1704,6 +1714,9 @@ arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
+ if (domain->type == IOMMU_DOMAIN_IDENTITY)
+ return iova;
+
if (!ops)
return 0;
@@ -1807,7 +1820,7 @@ static void arm_smmu_remove_device(struct device *dev)
master = fwspec->iommu_priv;
smmu = master->smmu;
- if (master && master->ste.valid)
+ if (master && master->ste.assigned)
arm_smmu_detach_dev(dev);
iommu_group_remove_device(dev);
iommu_device_unlink(&smmu->iommu, dev);
@@ -1837,6 +1850,9 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ if (domain->type != IOMMU_DOMAIN_UNMANAGED)
+ return -EINVAL;
+
switch (attr) {
case DOMAIN_ATTR_NESTING:
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
@@ -1852,6 +1868,9 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
int ret = 0;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ if (domain->type != IOMMU_DOMAIN_UNMANAGED)
+ return -EINVAL;
+
mutex_lock(&smmu_domain->init_mutex);
switch (attr) {
@@ -1893,6 +1912,8 @@ static void arm_smmu_get_resv_regions(struct device *dev,
return;
list_add_tail(&region->list, head);
+
+ iommu_dma_get_resv_regions(dev, head);
}
static void arm_smmu_put_resv_regions(struct device *dev,
@@ -2761,51 +2782,9 @@ static struct platform_driver arm_smmu_driver = {
.probe = arm_smmu_device_probe,
.remove = arm_smmu_device_remove,
};
+module_platform_driver(arm_smmu_driver);
-static int __init arm_smmu_init(void)
-{
- static bool registered;
- int ret = 0;
-
- if (!registered) {
- ret = platform_driver_register(&arm_smmu_driver);
- registered = !ret;
- }
- return ret;
-}
-
-static void __exit arm_smmu_exit(void)
-{
- return platform_driver_unregister(&arm_smmu_driver);
-}
-
-subsys_initcall(arm_smmu_init);
-module_exit(arm_smmu_exit);
-
-static int __init arm_smmu_of_init(struct device_node *np)
-{
- int ret = arm_smmu_init();
-
- if (ret)
- return ret;
-
- if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
- return -ENODEV;
-
- return 0;
-}
-IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", arm_smmu_of_init);
-
-#ifdef CONFIG_ACPI
-static int __init acpi_smmu_v3_init(struct acpi_table_header *table)
-{
- if (iort_node_match(ACPI_IORT_NODE_SMMU_V3))
- return arm_smmu_init();
-
- return 0;
-}
-IORT_ACPI_DECLARE(arm_smmu_v3, ACPI_SIG_IORT, acpi_smmu_v3_init);
-#endif
+IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", NULL);
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index b493c99e17f7..7ec30b08b3bd 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -162,6 +162,7 @@
#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
#define sTLBGSTATUS_GSACTIVE (1 << 0)
#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
+#define TLB_SPIN_COUNT 10
/* Stream mapping registers */
#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
@@ -216,8 +217,7 @@ enum arm_smmu_s2cr_privcfg {
#define CBA2R_VMID_MASK 0xffff
/* Translation context bank */
-#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
-#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
+#define ARM_SMMU_CB(smmu, n) ((smmu)->cb_base + ((n) << (smmu)->pgshift))
#define ARM_SMMU_CB_SCTLR 0x0
#define ARM_SMMU_CB_ACTLR 0x4
@@ -238,6 +238,8 @@ enum arm_smmu_s2cr_privcfg {
#define ARM_SMMU_CB_S1_TLBIVAL 0x620
#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
+#define ARM_SMMU_CB_TLBSYNC 0x7f0
+#define ARM_SMMU_CB_TLBSTATUS 0x7f4
#define ARM_SMMU_CB_ATS1PR 0x800
#define ARM_SMMU_CB_ATSR 0x8f0
@@ -344,7 +346,7 @@ struct arm_smmu_device {
struct device *dev;
void __iomem *base;
- unsigned long size;
+ void __iomem *cb_base;
unsigned long pgshift;
#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
@@ -404,18 +406,20 @@ enum arm_smmu_context_fmt {
struct arm_smmu_cfg {
u8 cbndx;
u8 irptndx;
+ union {
+ u16 asid;
+ u16 vmid;
+ };
u32 cbar;
enum arm_smmu_context_fmt fmt;
};
#define INVALID_IRPTNDX 0xff
-#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
-#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
-
enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_S1 = 0,
ARM_SMMU_DOMAIN_S2,
ARM_SMMU_DOMAIN_NESTED,
+ ARM_SMMU_DOMAIN_BYPASS,
};
struct arm_smmu_domain {
@@ -569,49 +573,67 @@ static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
}
/* Wait for any pending TLB invalidations to complete */
-static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
+static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
+ void __iomem *sync, void __iomem *status)
{
- int count = 0;
- void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
-
- writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
- while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
- & sTLBGSTATUS_GSACTIVE) {
- cpu_relax();
- if (++count == TLB_LOOP_TIMEOUT) {
- dev_err_ratelimited(smmu->dev,
- "TLB sync timed out -- SMMU may be deadlocked\n");
- return;
+ unsigned int spin_cnt, delay;
+
+ writel_relaxed(0, sync);
+ for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
+ for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
+ if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
+ return;
+ cpu_relax();
}
- udelay(1);
+ udelay(delay);
}
+ dev_err_ratelimited(smmu->dev,
+ "TLB sync timed out -- SMMU may be deadlocked\n");
}
-static void arm_smmu_tlb_sync(void *cookie)
+static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
+{
+ void __iomem *base = ARM_SMMU_GR0(smmu);
+
+ __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
+ base + ARM_SMMU_GR0_sTLBGSTATUS);
+}
+
+static void arm_smmu_tlb_sync_context(void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
- __arm_smmu_tlb_sync(smmu_domain->smmu);
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
+
+ __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
+ base + ARM_SMMU_CB_TLBSTATUS);
}
-static void arm_smmu_tlb_inv_context(void *cookie)
+static void arm_smmu_tlb_sync_vmid(void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+
+ arm_smmu_tlb_sync_global(smmu_domain->smmu);
+}
+
+static void arm_smmu_tlb_inv_context_s1(void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
- bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
- void __iomem *base;
+ void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
- if (stage1) {
- base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
- writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
- base + ARM_SMMU_CB_S1_TLBIASID);
- } else {
- base = ARM_SMMU_GR0(smmu);
- writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
- base + ARM_SMMU_GR0_TLBIVMID);
- }
+ writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
+ arm_smmu_tlb_sync_context(cookie);
+}
+
+static void arm_smmu_tlb_inv_context_s2(void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ void __iomem *base = ARM_SMMU_GR0(smmu);
- __arm_smmu_tlb_sync(smmu);
+ writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
+ arm_smmu_tlb_sync_global(smmu);
}
static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
@@ -619,31 +641,28 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
{
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
- void __iomem *reg;
+ void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
if (stage1) {
- reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
iova &= ~12UL;
- iova |= ARM_SMMU_CB_ASID(smmu, cfg);
+ iova |= cfg->asid;
do {
writel_relaxed(iova, reg);
iova += granule;
} while (size -= granule);
} else {
iova >>= 12;
- iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
+ iova |= (u64)cfg->asid << 48;
do {
writeq_relaxed(iova, reg);
iova += granule >> 12;
} while (size -= granule);
}
- } else if (smmu->version == ARM_SMMU_V2) {
- reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ } else {
reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
ARM_SMMU_CB_S2_TLBIIPAS2;
iova >>= 12;
@@ -651,16 +670,40 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
smmu_write_atomic_lq(iova, reg);
iova += granule >> 12;
} while (size -= granule);
- } else {
- reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
- writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
}
}
-static const struct iommu_gather_ops arm_smmu_gather_ops = {
- .tlb_flush_all = arm_smmu_tlb_inv_context,
+/*
+ * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
+ * almost negligible, but the benefit of getting the first one in as far ahead
+ * of the sync as possible is significant, hence we don't just make this a
+ * no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think.
+ */
+static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
+ size_t granule, bool leaf, void *cookie)
+{
+ struct arm_smmu_domain *smmu_domain = cookie;
+ void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
+
+ writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
+}
+
+static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
- .tlb_sync = arm_smmu_tlb_sync,
+ .tlb_sync = arm_smmu_tlb_sync_context,
+};
+
+static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
+ .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
+ .tlb_sync = arm_smmu_tlb_sync_context,
+};
+
+static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
+ .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync,
+ .tlb_sync = arm_smmu_tlb_sync_vmid,
};
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
@@ -673,7 +716,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
struct arm_smmu_device *smmu = smmu_domain->smmu;
void __iomem *cb_base;
- cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
if (!(fsr & FSR_FAULT))
@@ -726,7 +769,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
gr1_base = ARM_SMMU_GR1(smmu);
stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
- cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
if (smmu->version > ARM_SMMU_V1) {
if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
@@ -735,7 +778,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
reg = CBA2R_RW64_32BIT;
/* 16-bit VMIDs live in CBA2R */
if (smmu->features & ARM_SMMU_FEAT_VMID16)
- reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
+ reg |= cfg->vmid << CBA2R_VMID_SHIFT;
writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
}
@@ -754,34 +797,15 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
(CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
} else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
/* 8-bit VMIDs live in CBAR */
- reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
+ reg |= cfg->vmid << CBAR_VMID_SHIFT;
}
writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
- /* TTBRs */
- if (stage1) {
- u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
-
- if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
- reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
- reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
- writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
- } else {
- reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
- reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
- writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
- reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
- reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
- writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
- }
- } else {
- reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
- writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
- }
-
- /* TTBCR */
+ /*
+ * TTBCR
+ * We must write this before the TTBRs, since it determines the
+ * access behaviour of some fields (in particular, ASID[15:8]).
+ */
if (stage1) {
if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
reg = pgtbl_cfg->arm_v7s_cfg.tcr;
@@ -800,6 +824,27 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
}
writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
+ /* TTBRs */
+ if (stage1) {
+ if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
+ reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
+ reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
+ writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
+ } else {
+ reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
+ reg64 |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
+ writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
+ reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
+ reg64 |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
+ writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
+ }
+ } else {
+ reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
+ writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
+ }
+
/* MAIRs (stage-1 only) */
if (stage1) {
if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
@@ -833,11 +878,18 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
enum io_pgtable_fmt fmt;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+ const struct iommu_gather_ops *tlb_ops;
mutex_lock(&smmu_domain->init_mutex);
if (smmu_domain->smmu)
goto out_unlock;
+ if (domain->type == IOMMU_DOMAIN_IDENTITY) {
+ smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
+ smmu_domain->smmu = smmu;
+ goto out_unlock;
+ }
+
/*
* Mapping the requested stage onto what we support is surprisingly
* complicated, mainly because the spec allows S1+S2 SMMUs without
@@ -904,6 +956,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
ias = min(ias, 32UL);
oas = min(oas, 32UL);
}
+ tlb_ops = &arm_smmu_s1_tlb_ops;
break;
case ARM_SMMU_DOMAIN_NESTED:
/*
@@ -922,12 +975,15 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
ias = min(ias, 40UL);
oas = min(oas, 40UL);
}
+ if (smmu->version == ARM_SMMU_V2)
+ tlb_ops = &arm_smmu_s2_tlb_ops_v2;
+ else
+ tlb_ops = &arm_smmu_s2_tlb_ops_v1;
break;
default:
ret = -EINVAL;
goto out_unlock;
}
-
ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
smmu->num_context_banks);
if (ret < 0)
@@ -941,11 +997,16 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
cfg->irptndx = cfg->cbndx;
}
+ if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
+ cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base;
+ else
+ cfg->asid = cfg->cbndx + smmu->cavium_id_base;
+
pgtbl_cfg = (struct io_pgtable_cfg) {
.pgsize_bitmap = smmu->pgsize_bitmap,
.ias = ias,
.oas = oas,
- .tlb = &arm_smmu_gather_ops,
+ .tlb = tlb_ops,
.iommu_dev = smmu->dev,
};
@@ -998,14 +1059,14 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
void __iomem *cb_base;
int irq;
- if (!smmu)
+ if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
return;
/*
* Disable the context bank and free the page tables before freeing
* it.
*/
- cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
if (cfg->irptndx != INVALID_IRPTNDX) {
@@ -1021,7 +1082,9 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{
struct arm_smmu_domain *smmu_domain;
- if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
+ if (type != IOMMU_DOMAIN_UNMANAGED &&
+ type != IOMMU_DOMAIN_DMA &&
+ type != IOMMU_DOMAIN_IDENTITY)
return NULL;
/*
* Allocate the domain and initialise some of its data structures.
@@ -1250,10 +1313,15 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
{
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_s2cr *s2cr = smmu->s2crs;
- enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
u8 cbndx = smmu_domain->cfg.cbndx;
+ enum arm_smmu_s2cr_type type;
int i, idx;
+ if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
+ type = S2CR_TYPE_BYPASS;
+ else
+ type = S2CR_TYPE_TRANS;
+
for_each_cfg_sme(fwspec, i, idx) {
if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
continue;
@@ -1356,7 +1424,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
u64 phys;
unsigned long va;
- cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+ cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
/* ATS1 registers can only be written atomically */
va = iova & ~0xfffUL;
@@ -1391,6 +1459,9 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
+ if (domain->type == IOMMU_DOMAIN_IDENTITY)
+ return iova;
+
if (!ops)
return 0;
@@ -1467,7 +1538,7 @@ static int arm_smmu_add_device(struct device *dev)
}
if (mask & ~smmu->smr_mask_mask) {
dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
- sid, smmu->smr_mask_mask);
+ mask, smmu->smr_mask_mask);
goto out_free;
}
}
@@ -1549,6 +1620,9 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ if (domain->type != IOMMU_DOMAIN_UNMANAGED)
+ return -EINVAL;
+
switch (attr) {
case DOMAIN_ATTR_NESTING:
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
@@ -1564,6 +1638,9 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
int ret = 0;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ if (domain->type != IOMMU_DOMAIN_UNMANAGED)
+ return -EINVAL;
+
mutex_lock(&smmu_domain->init_mutex);
switch (attr) {
@@ -1590,13 +1667,15 @@ out_unlock:
static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
{
- u32 fwid = 0;
+ u32 mask, fwid = 0;
if (args->args_count > 0)
fwid |= (u16)args->args[0];
if (args->args_count > 1)
fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
+ else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
+ fwid |= (u16)mask << SMR_MASK_SHIFT;
return iommu_fwspec_add_ids(dev, &fwid, 1);
}
@@ -1613,6 +1692,8 @@ static void arm_smmu_get_resv_regions(struct device *dev,
return;
list_add_tail(&region->list, head);
+
+ iommu_dma_get_resv_regions(dev, head);
}
static void arm_smmu_put_resv_regions(struct device *dev,
@@ -1683,7 +1764,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
/* Make sure all context banks are disabled and clear CB_FSR */
for (i = 0; i < smmu->num_context_banks; ++i) {
- cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
+ cb_base = ARM_SMMU_CB(smmu, i);
writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
/*
@@ -1729,7 +1810,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
reg |= sCR0_EXIDENABLE;
/* Push the button */
- __arm_smmu_tlb_sync(smmu);
+ arm_smmu_tlb_sync_global(smmu);
writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
}
@@ -1863,11 +1944,11 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
/* Check for size mismatch of SMMU address space from mapped region */
size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
- size *= 2 << smmu->pgshift;
- if (smmu->size != size)
+ size <<= smmu->pgshift;
+ if (smmu->cb_base != gr0_base + size)
dev_warn(smmu->dev,
- "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
- size, smmu->size);
+ "SMMU address space size (0x%lx) differs from mapped region size (0x%tx)!\n",
+ size * 2, (smmu->cb_base - gr0_base) * 2);
smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
@@ -1887,6 +1968,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
atomic_add_return(smmu->num_context_banks,
&cavium_smmu_context_count);
smmu->cavium_id_base -= smmu->num_context_banks;
+ dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
}
/* ID2 */
@@ -2075,6 +2157,23 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev,
return 0;
}
+static void arm_smmu_bus_init(void)
+{
+ /* Oh, for a proper bus abstraction */
+ if (!iommu_present(&platform_bus_type))
+ bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
+#ifdef CONFIG_ARM_AMBA
+ if (!iommu_present(&amba_bustype))
+ bus_set_iommu(&amba_bustype, &arm_smmu_ops);
+#endif
+#ifdef CONFIG_PCI
+ if (!iommu_present(&pci_bus_type)) {
+ pci_request_acs();
+ bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
+ }
+#endif
+}
+
static int arm_smmu_device_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -2103,7 +2202,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
smmu->base = devm_ioremap_resource(dev, res);
if (IS_ERR(smmu->base))
return PTR_ERR(smmu->base);
- smmu->size = resource_size(res);
+ smmu->cb_base = smmu->base + resource_size(res) / 2;
num_irqs = 0;
while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
@@ -2180,21 +2279,30 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
arm_smmu_device_reset(smmu);
arm_smmu_test_smr_masks(smmu);
- /* Oh, for a proper bus abstraction */
- if (!iommu_present(&platform_bus_type))
- bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
-#ifdef CONFIG_ARM_AMBA
- if (!iommu_present(&amba_bustype))
- bus_set_iommu(&amba_bustype, &arm_smmu_ops);
-#endif
-#ifdef CONFIG_PCI
- if (!iommu_present(&pci_bus_type)) {
- pci_request_acs();
- bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
- }
-#endif
+ /*
+ * For ACPI and generic DT bindings, an SMMU will be probed before
+ * any device which might need it, so we want the bus ops in place
+ * ready to handle default domain setup as soon as any SMMU exists.
+ */
+ if (!using_legacy_binding)
+ arm_smmu_bus_init();
+
+ return 0;
+}
+
+/*
+ * With the legacy DT binding in play, though, we have no guarantees about
+ * probe order, but then we're also not doing default domains, so we can
+ * delay setting bus ops until we're sure every possible SMMU is ready,
+ * and that way ensure that no add_device() calls get missed.
+ */
+static int arm_smmu_legacy_bus_init(void)
+{
+ if (using_legacy_binding)
+ arm_smmu_bus_init();
return 0;
}
+device_initcall_sync(arm_smmu_legacy_bus_init);
static int arm_smmu_device_remove(struct platform_device *pdev)
{
@@ -2219,56 +2327,14 @@ static struct platform_driver arm_smmu_driver = {
.probe = arm_smmu_device_probe,
.remove = arm_smmu_device_remove,
};
-
-static int __init arm_smmu_init(void)
-{
- static bool registered;
- int ret = 0;
-
- if (!registered) {
- ret = platform_driver_register(&arm_smmu_driver);
- registered = !ret;
- }
- return ret;
-}
-
-static void __exit arm_smmu_exit(void)
-{
- return platform_driver_unregister(&arm_smmu_driver);
-}
-
-subsys_initcall(arm_smmu_init);
-module_exit(arm_smmu_exit);
-
-static int __init arm_smmu_of_init(struct device_node *np)
-{
- int ret = arm_smmu_init();
-
- if (ret)
- return ret;
-
- if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
- return -ENODEV;
-
- return 0;
-}
-IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
-IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
-IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
-IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
-IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
-IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
-
-#ifdef CONFIG_ACPI
-static int __init arm_smmu_acpi_init(struct acpi_table_header *table)
-{
- if (iort_node_match(ACPI_IORT_NODE_SMMU))
- return arm_smmu_init();
-
- return 0;
-}
-IORT_ACPI_DECLARE(arm_smmu, ACPI_SIG_IORT, arm_smmu_acpi_init);
-#endif
+module_platform_driver(arm_smmu_driver);
+
+IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", NULL);
+IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", NULL);
+IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", NULL);
+IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", NULL);
+IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", NULL);
+IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", NULL);
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 48d36ce59efb..62618e77bedc 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -61,15 +61,6 @@ static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
return PAGE_SIZE;
}
-static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain)
-{
- struct iommu_dma_cookie *cookie = domain->iova_cookie;
-
- if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
- return &cookie->iovad;
- return NULL;
-}
-
static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
{
struct iommu_dma_cookie *cookie;
@@ -167,22 +158,99 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
}
EXPORT_SYMBOL(iommu_put_dma_cookie);
-static void iova_reserve_pci_windows(struct pci_dev *dev,
- struct iova_domain *iovad)
+/**
+ * iommu_dma_get_resv_regions - Reserved region driver helper
+ * @dev: Device from iommu_get_resv_regions()
+ * @list: Reserved region list from iommu_get_resv_regions()
+ *
+ * IOMMU drivers can use this to implement their .get_resv_regions callback
+ * for general non-IOMMU-specific reservations. Currently, this covers host
+ * bridge windows for PCI devices.
+ */
+void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
{
- struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
+ struct pci_host_bridge *bridge;
struct resource_entry *window;
- unsigned long lo, hi;
+ if (!dev_is_pci(dev))
+ return;
+
+ bridge = pci_find_host_bridge(to_pci_dev(dev)->bus);
resource_list_for_each_entry(window, &bridge->windows) {
- if (resource_type(window->res) != IORESOURCE_MEM &&
- resource_type(window->res) != IORESOURCE_IO)
+ struct iommu_resv_region *region;
+ phys_addr_t start;
+ size_t length;
+
+ if (resource_type(window->res) != IORESOURCE_MEM)
+ continue;
+
+ start = window->res->start - window->offset;
+ length = window->res->end - window->res->start + 1;
+ region = iommu_alloc_resv_region(start, length, 0,
+ IOMMU_RESV_RESERVED);
+ if (!region)
+ return;
+
+ list_add_tail(&region->list, list);
+ }
+}
+EXPORT_SYMBOL(iommu_dma_get_resv_regions);
+
+static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
+ phys_addr_t start, phys_addr_t end)
+{
+ struct iova_domain *iovad = &cookie->iovad;
+ struct iommu_dma_msi_page *msi_page;
+ int i, num_pages;
+
+ start -= iova_offset(iovad, start);
+ num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
+
+ msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
+ if (!msi_page)
+ return -ENOMEM;
+
+ for (i = 0; i < num_pages; i++) {
+ msi_page[i].phys = start;
+ msi_page[i].iova = start;
+ INIT_LIST_HEAD(&msi_page[i].list);
+ list_add(&msi_page[i].list, &cookie->msi_page_list);
+ start += iovad->granule;
+ }
+
+ return 0;
+}
+
+static int iova_reserve_iommu_regions(struct device *dev,
+ struct iommu_domain *domain)
+{
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ struct iommu_resv_region *region;
+ LIST_HEAD(resv_regions);
+ int ret = 0;
+
+ iommu_get_resv_regions(dev, &resv_regions);
+ list_for_each_entry(region, &resv_regions, list) {
+ unsigned long lo, hi;
+
+ /* We ARE the software that manages these! */
+ if (region->type == IOMMU_RESV_SW_MSI)
continue;
- lo = iova_pfn(iovad, window->res->start - window->offset);
- hi = iova_pfn(iovad, window->res->end - window->offset);
+ lo = iova_pfn(iovad, region->start);
+ hi = iova_pfn(iovad, region->start + region->length - 1);
reserve_iova(iovad, lo, hi);
+
+ if (region->type == IOMMU_RESV_MSI)
+ ret = cookie_init_hw_msi_region(cookie, region->start,
+ region->start + region->length);
+ if (ret)
+ break;
}
+ iommu_put_resv_regions(dev, &resv_regions);
+
+ return ret;
}
/**
@@ -203,7 +271,6 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
unsigned long order, base_pfn, end_pfn;
- bool pci = dev && dev_is_pci(dev);
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
return -EINVAL;
@@ -233,7 +300,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
* leave the cache limit at the top of their range to save an rb_last()
* traversal on every allocation.
*/
- if (pci)
+ if (dev && dev_is_pci(dev))
end_pfn &= DMA_BIT_MASK(32) >> order;
/* start_pfn is always nonzero for an already-initialised domain */
@@ -248,12 +315,15 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
* area cache limit down for the benefit of the smaller one.
*/
iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn);
- } else {
- init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
- if (pci)
- iova_reserve_pci_windows(to_pci_dev(dev), iovad);
+
+ return 0;
}
- return 0;
+
+ init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
+ if (!dev)
+ return 0;
+
+ return iova_reserve_iommu_regions(dev, domain);
}
EXPORT_SYMBOL(iommu_dma_init_domain);
@@ -286,48 +356,67 @@ int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
}
}
-static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size,
- dma_addr_t dma_limit, struct device *dev)
+static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
+ size_t size, dma_addr_t dma_limit, struct device *dev)
{
- struct iova_domain *iovad = cookie_iovad(domain);
- unsigned long shift = iova_shift(iovad);
- unsigned long length = iova_align(iovad, size) >> shift;
- struct iova *iova = NULL;
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ unsigned long shift, iova_len, iova = 0;
+
+ if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
+ cookie->msi_iova += size;
+ return cookie->msi_iova - size;
+ }
+
+ shift = iova_shift(iovad);
+ iova_len = size >> shift;
+ /*
+ * Freeing non-power-of-two-sized allocations back into the IOVA caches
+ * will come back to bite us badly, so we have to waste a bit of space
+ * rounding up anything cacheable to make sure that can't happen. The
+ * order of the unadjusted size will still match upon freeing.
+ */
+ if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
+ iova_len = roundup_pow_of_two(iova_len);
if (domain->geometry.force_aperture)
dma_limit = min(dma_limit, domain->geometry.aperture_end);
/* Try to get PCI devices a SAC address */
if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
- iova = alloc_iova(iovad, length, DMA_BIT_MASK(32) >> shift,
- true);
- /*
- * Enforce size-alignment to be safe - there could perhaps be an
- * attribute to control this per-device, or at least per-domain...
- */
+ iova = alloc_iova_fast(iovad, iova_len, DMA_BIT_MASK(32) >> shift);
+
if (!iova)
- iova = alloc_iova(iovad, length, dma_limit >> shift, true);
+ iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift);
- return iova;
+ return (dma_addr_t)iova << shift;
}
-/* The IOVA allocator knows what we mapped, so just unmap whatever that was */
-static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr)
+static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
+ dma_addr_t iova, size_t size)
{
- struct iova_domain *iovad = cookie_iovad(domain);
- unsigned long shift = iova_shift(iovad);
- unsigned long pfn = dma_addr >> shift;
- struct iova *iova = find_iova(iovad, pfn);
- size_t size;
+ struct iova_domain *iovad = &cookie->iovad;
- if (WARN_ON(!iova))
- return;
+ /* The MSI case is only ever cleaning up its most recent allocation */
+ if (cookie->type == IOMMU_DMA_MSI_COOKIE)
+ cookie->msi_iova -= size;
+ else
+ free_iova_fast(iovad, iova_pfn(iovad, iova),
+ size >> iova_shift(iovad));
+}
+
+static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
+ size_t size)
+{
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ size_t iova_off = iova_offset(iovad, dma_addr);
- size = iova_size(iova) << shift;
- size -= iommu_unmap(domain, pfn << shift, size);
- /* ...and if we can't, then something is horribly, horribly wrong */
- WARN_ON(size > 0);
- __free_iova(iovad, iova);
+ dma_addr -= iova_off;
+ size = iova_align(iovad, size + iova_off);
+
+ WARN_ON(iommu_unmap(domain, dma_addr, size) != size);
+ iommu_dma_free_iova(cookie, dma_addr, size);
}
static void __iommu_dma_free_pages(struct page **pages, int count)
@@ -409,7 +498,7 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count,
void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
dma_addr_t *handle)
{
- __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle);
+ __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size);
__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
*handle = DMA_ERROR_CODE;
}
@@ -437,11 +526,11 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
void (*flush_page)(struct device *, const void *, phys_addr_t))
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct iova_domain *iovad = cookie_iovad(domain);
- struct iova *iova;
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
struct page **pages;
struct sg_table sgt;
- dma_addr_t dma_addr;
+ dma_addr_t iova;
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
*handle = DMA_ERROR_CODE;
@@ -461,11 +550,11 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
if (!pages)
return NULL;
- iova = __alloc_iova(domain, size, dev->coherent_dma_mask, dev);
+ size = iova_align(iovad, size);
+ iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
if (!iova)
goto out_free_pages;
- size = iova_align(iovad, size);
if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
goto out_free_iova;
@@ -481,19 +570,18 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
sg_miter_stop(&miter);
}
- dma_addr = iova_dma_addr(iovad, iova);
- if (iommu_map_sg(domain, dma_addr, sgt.sgl, sgt.orig_nents, prot)
+ if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
< size)
goto out_free_sg;
- *handle = dma_addr;
+ *handle = iova;
sg_free_table(&sgt);
return pages;
out_free_sg:
sg_free_table(&sgt);
out_free_iova:
- __free_iova(iovad, iova);
+ iommu_dma_free_iova(cookie, iova, size);
out_free_pages:
__iommu_dma_free_pages(pages, count);
return NULL;
@@ -527,22 +615,25 @@ int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
size_t size, int prot)
{
- dma_addr_t dma_addr;
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct iova_domain *iovad = cookie_iovad(domain);
- size_t iova_off = iova_offset(iovad, phys);
- size_t len = iova_align(iovad, size + iova_off);
- struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev), dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ size_t iova_off = 0;
+ dma_addr_t iova;
+ if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
+ iova_off = iova_offset(&cookie->iovad, phys);
+ size = iova_align(&cookie->iovad, size + iova_off);
+ }
+
+ iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
if (!iova)
return DMA_ERROR_CODE;
- dma_addr = iova_dma_addr(iovad, iova);
- if (iommu_map(domain, dma_addr, phys - iova_off, len, prot)) {
- __free_iova(iovad, iova);
+ if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
+ iommu_dma_free_iova(cookie, iova, size);
return DMA_ERROR_CODE;
}
- return dma_addr + iova_off;
+ return iova + iova_off;
}
dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
@@ -554,7 +645,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
- __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
+ __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
}
/*
@@ -643,10 +734,10 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, int prot)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct iova_domain *iovad = cookie_iovad(domain);
- struct iova *iova;
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
struct scatterlist *s, *prev = NULL;
- dma_addr_t dma_addr;
+ dma_addr_t iova;
size_t iova_len = 0;
unsigned long mask = dma_get_seg_boundary(dev);
int i;
@@ -690,7 +781,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
prev = s;
}
- iova = __alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
+ iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
if (!iova)
goto out_restore_sg;
@@ -698,14 +789,13 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
* We'll leave any physical concatenation to the IOMMU driver's
* implementation - it knows better than we do.
*/
- dma_addr = iova_dma_addr(iovad, iova);
- if (iommu_map_sg(domain, dma_addr, sg, nents, prot) < iova_len)
+ if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
goto out_free_iova;
- return __finalise_sg(dev, sg, nents, dma_addr);
+ return __finalise_sg(dev, sg, nents, iova);
out_free_iova:
- __free_iova(iovad, iova);
+ iommu_dma_free_iova(cookie, iova, iova_len);
out_restore_sg:
__invalidate_sg(sg, nents);
return 0;
@@ -714,11 +804,21 @@ out_restore_sg:
void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs)
{
+ dma_addr_t start, end;
+ struct scatterlist *tmp;
+ int i;
/*
* The scatterlist segments are mapped into a single
* contiguous IOVA allocation, so this is incredibly easy.
*/
- __iommu_dma_unmap(iommu_get_domain_for_dev(dev), sg_dma_address(sg));
+ start = sg_dma_address(sg);
+ for_each_sg(sg_next(sg), tmp, nents - 1, i) {
+ if (sg_dma_len(tmp) == 0)
+ break;
+ sg = tmp;
+ }
+ end = sg_dma_address(sg) + sg_dma_len(sg);
+ __iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start);
}
dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
@@ -731,7 +831,7 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
+ __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
}
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
@@ -744,8 +844,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iommu_dma_msi_page *msi_page;
- struct iova_domain *iovad = cookie_iovad(domain);
- struct iova *iova;
+ dma_addr_t iova;
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
size_t size = cookie_msi_granule(cookie);
@@ -758,29 +857,16 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
if (!msi_page)
return NULL;
- msi_page->phys = msi_addr;
- if (iovad) {
- iova = __alloc_iova(domain, size, dma_get_mask(dev), dev);
- if (!iova)
- goto out_free_page;
- msi_page->iova = iova_dma_addr(iovad, iova);
- } else {
- msi_page->iova = cookie->msi_iova;
- cookie->msi_iova += size;
- }
-
- if (iommu_map(domain, msi_page->iova, msi_addr, size, prot))
- goto out_free_iova;
+ iova = __iommu_dma_map(dev, msi_addr, size, prot);
+ if (iommu_dma_mapping_error(dev, iova))
+ goto out_free_page;
INIT_LIST_HEAD(&msi_page->list);
+ msi_page->phys = msi_addr;
+ msi_page->iova = iova;
list_add(&msi_page->list, &cookie->msi_page_list);
return msi_page;
-out_free_iova:
- if (iovad)
- __free_iova(iovad, iova);
- else
- cookie->msi_iova -= size;
out_free_page:
kfree(msi_page);
return NULL;
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 36e3f430d265..cbf7763d8091 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -311,7 +311,7 @@ static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
((void *)drhd) + drhd->header.length,
dmaru->segment,
dmaru->devices, dmaru->devices_cnt);
- if (ret != 0)
+ if (ret)
break;
}
if (ret >= 0)
@@ -391,7 +391,7 @@ static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_hardware_unit *drhd;
struct dmar_drhd_unit *dmaru;
- int ret = 0;
+ int ret;
drhd = (struct acpi_dmar_hardware_unit *)header;
dmaru = dmar_find_dmaru(drhd);
@@ -551,17 +551,16 @@ static int __init dmar_table_detect(void)
status = AE_NOT_FOUND;
}
- return (ACPI_SUCCESS(status) ? 1 : 0);
+ return ACPI_SUCCESS(status) ? 0 : -ENOENT;
}
static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
size_t len, struct dmar_res_callback *cb)
{
- int ret = 0;
struct acpi_dmar_header *iter, *next;
struct acpi_dmar_header *end = ((void *)start) + len;
- for (iter = start; iter < end && ret == 0; iter = next) {
+ for (iter = start; iter < end; iter = next) {
next = (void *)iter + iter->length;
if (iter->length == 0) {
/* Avoid looping forever on bad ACPI tables */
@@ -570,8 +569,7 @@ static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
} else if (next > end) {
/* Avoid passing table end */
pr_warn(FW_BUG "Record passes table end\n");
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
if (cb->print_entry)
@@ -582,15 +580,19 @@ static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
pr_debug("Unknown DMAR structure type %d\n",
iter->type);
} else if (cb->cb[iter->type]) {
+ int ret;
+
ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
+ if (ret)
+ return ret;
} else if (!cb->ignore_unhandled) {
pr_warn("No handler for DMAR structure type %d\n",
iter->type);
- ret = -EINVAL;
+ return -EINVAL;
}
}
- return ret;
+ return 0;
}
static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
@@ -607,8 +609,8 @@ static int __init
parse_dmar_table(void)
{
struct acpi_table_dmar *dmar;
- int ret = 0;
int drhd_count = 0;
+ int ret;
struct dmar_res_callback cb = {
.print_entry = true,
.ignore_unhandled = true,
@@ -891,17 +893,17 @@ int __init detect_intel_iommu(void)
down_write(&dmar_global_lock);
ret = dmar_table_detect();
- if (ret)
- ret = !dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
- &validate_drhd_cb);
- if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
+ if (!ret)
+ ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
+ &validate_drhd_cb);
+ if (!ret && !no_iommu && !iommu_detected && !dmar_disabled) {
iommu_detected = 1;
/* Make sure ACS will be enabled */
pci_request_acs();
}
#ifdef CONFIG_X86
- if (ret)
+ if (!ret)
x86_init.iommu.iommu_init = intel_iommu_init;
#endif
@@ -911,10 +913,9 @@ int __init detect_intel_iommu(void)
}
up_write(&dmar_global_lock);
- return ret ? 1 : -ENODEV;
+ return ret ? ret : 1;
}
-
static void unmap_iommu(struct intel_iommu *iommu)
{
iounmap(iommu->reg);
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index c01bfcdb2383..2395478dde75 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -171,6 +171,9 @@ static u32 lv2ent_offset(sysmmu_iova_t iova)
#define REG_V5_PT_BASE_PFN 0x00C
#define REG_V5_MMU_FLUSH_ALL 0x010
#define REG_V5_MMU_FLUSH_ENTRY 0x014
+#define REG_V5_MMU_FLUSH_RANGE 0x018
+#define REG_V5_MMU_FLUSH_START 0x020
+#define REG_V5_MMU_FLUSH_END 0x024
#define REG_V5_INT_STATUS 0x060
#define REG_V5_INT_CLEAR 0x064
#define REG_V5_FAULT_AR_VA 0x070
@@ -319,14 +322,23 @@ static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
{
unsigned int i;
- for (i = 0; i < num_inv; i++) {
- if (MMU_MAJ_VER(data->version) < 5)
+ if (MMU_MAJ_VER(data->version) < 5) {
+ for (i = 0; i < num_inv; i++) {
writel((iova & SPAGE_MASK) | 1,
data->sfrbase + REG_MMU_FLUSH_ENTRY);
- else
+ iova += SPAGE_SIZE;
+ }
+ } else {
+ if (num_inv == 1) {
writel((iova & SPAGE_MASK) | 1,
data->sfrbase + REG_V5_MMU_FLUSH_ENTRY);
- iova += SPAGE_SIZE;
+ } else {
+ writel((iova & SPAGE_MASK),
+ data->sfrbase + REG_V5_MMU_FLUSH_START);
+ writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE,
+ data->sfrbase + REG_V5_MMU_FLUSH_END);
+ writel(1, data->sfrbase + REG_V5_MMU_FLUSH_RANGE);
+ }
}
}
@@ -747,16 +759,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
goto err_counter;
/* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
- for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
- domain->pgtable[i + 0] = ZERO_LV2LINK;
- domain->pgtable[i + 1] = ZERO_LV2LINK;
- domain->pgtable[i + 2] = ZERO_LV2LINK;
- domain->pgtable[i + 3] = ZERO_LV2LINK;
- domain->pgtable[i + 4] = ZERO_LV2LINK;
- domain->pgtable[i + 5] = ZERO_LV2LINK;
- domain->pgtable[i + 6] = ZERO_LV2LINK;
- domain->pgtable[i + 7] = ZERO_LV2LINK;
- }
+ for (i = 0; i < NUM_LV1ENTRIES; i++)
+ domain->pgtable[i] = ZERO_LV2LINK;
handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
DMA_TO_DEVICE);
diff --git a/drivers/iommu/fsl_pamu.h b/drivers/iommu/fsl_pamu.h
index aab723f91f12..c3434f29c967 100644
--- a/drivers/iommu/fsl_pamu.h
+++ b/drivers/iommu/fsl_pamu.h
@@ -20,6 +20,7 @@
#define __FSL_PAMU_H
#include <linux/iommu.h>
+#include <linux/pci.h>
#include <asm/fsl_pamu_stash.h>
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index d412a313a372..fc2765ccdb57 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -183,6 +183,7 @@ static int rwbf_quirk;
* (used when kernel is launched w/ TXT)
*/
static int force_on = 0;
+int intel_iommu_tboot_noforce;
/*
* 0: Present
@@ -607,6 +608,10 @@ static int __init intel_iommu_setup(char *str)
"Intel-IOMMU: enable pre-production PASID support\n");
intel_iommu_pasid28 = 1;
iommu_identity_mapping |= IDENTMAP_GFX;
+ } else if (!strncmp(str, "tboot_noforce", 13)) {
+ printk(KERN_INFO
+ "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
+ intel_iommu_tboot_noforce = 1;
}
str += strcspn(str, ",");
@@ -2050,11 +2055,14 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
if (context_copied(context)) {
u16 did_old = context_domain_id(context);
- if (did_old >= 0 && did_old < cap_ndoms(iommu->cap))
+ if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) {
iommu->flush.flush_context(iommu, did_old,
(((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
+ iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
+ DMA_TLB_DSI_FLUSH);
+ }
}
pgd = domain->pgd;
@@ -4730,6 +4738,15 @@ static int intel_iommu_cpu_dead(unsigned int cpu)
return 0;
}
+static void intel_disable_iommus(void)
+{
+ struct intel_iommu *iommu = NULL;
+ struct dmar_drhd_unit *drhd;
+
+ for_each_iommu(iommu, drhd)
+ iommu_disable_translation(iommu);
+}
+
static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
{
return container_of(dev, struct intel_iommu, iommu.dev);
@@ -4840,8 +4857,28 @@ int __init intel_iommu_init(void)
goto out_free_dmar;
}
- if (no_iommu || dmar_disabled)
+ if (no_iommu || dmar_disabled) {
+ /*
+ * We exit the function here to ensure IOMMU's remapping and
+ * mempool aren't setup, which means that the IOMMU's PMRs
+ * won't be disabled via the call to init_dmars(). So disable
+ * it explicitly here. The PMRs were setup by tboot prior to
+ * calling SENTER, but the kernel is expected to reset/tear
+ * down the PMRs.
+ */
+ if (intel_iommu_tboot_noforce) {
+ for_each_iommu(iommu, drhd)
+ iommu_disable_protect_mem_regions(iommu);
+ }
+
+ /*
+ * Make sure the IOMMUs are switched off, even when we
+ * boot into a kexec kernel and the previous kernel left
+ * them enabled
+ */
+ intel_disable_iommus();
goto out_free_dmar;
+ }
if (list_empty(&dmar_rmrr_units))
pr_info("No RMRR found\n");
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index ac596928f6b4..a190cbd76ef7 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -408,14 +408,6 @@ static int iommu_load_old_irte(struct intel_iommu *iommu)
size_t size;
u64 irta;
- if (!is_kdump_kernel()) {
- pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n",
- iommu->name);
- clear_ir_pre_enabled(iommu);
- iommu_disable_irq_remapping(iommu);
- return -EINVAL;
- }
-
/* Check whether the old ir-table has the same size as ours */
irta = dmar_readq(iommu->reg + DMAR_IRTA_REG);
if ((irta & INTR_REMAP_TABLE_REG_SIZE_MASK)
@@ -567,7 +559,12 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
init_ir_status(iommu);
if (ir_pre_enabled(iommu)) {
- if (iommu_load_old_irte(iommu))
+ if (!is_kdump_kernel()) {
+ pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n",
+ iommu->name);
+ clear_ir_pre_enabled(iommu);
+ iommu_disable_irq_remapping(iommu);
+ } else if (iommu_load_old_irte(iommu))
pr_err("Failed to copy IR table for %s from previous kernel\n",
iommu->name);
else
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index f9bc6ebb8140..6e5df5e0a3bd 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -74,7 +74,7 @@
/* Calculate the block/page mapping size at level l for pagetable in d. */
#define ARM_LPAE_BLOCK_SIZE(l,d) \
- (1 << (ilog2(sizeof(arm_lpae_iopte)) + \
+ (1ULL << (ilog2(sizeof(arm_lpae_iopte)) + \
((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
/* Page table bits */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 3b67144dead2..cf7ca7e70777 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -36,6 +36,7 @@
static struct kset *iommu_group_kset;
static DEFINE_IDA(iommu_group_ida);
+static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA;
struct iommu_callback_data {
const struct iommu_ops *ops;
@@ -112,6 +113,18 @@ static int __iommu_attach_group(struct iommu_domain *domain,
static void __iommu_detach_group(struct iommu_domain *domain,
struct iommu_group *group);
+static int __init iommu_set_def_domain_type(char *str)
+{
+ bool pt;
+
+ if (!str || strtobool(str, &pt))
+ return -EINVAL;
+
+ iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA;
+ return 0;
+}
+early_param("iommu.passthrough", iommu_set_def_domain_type);
+
static ssize_t iommu_group_attr_show(struct kobject *kobj,
struct attribute *__attr, char *buf)
{
@@ -1015,10 +1028,19 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
* IOMMU driver.
*/
if (!group->default_domain) {
- group->default_domain = __iommu_domain_alloc(dev->bus,
- IOMMU_DOMAIN_DMA);
+ struct iommu_domain *dom;
+
+ dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
+ if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
+ dev_warn(dev,
+ "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
+ iommu_def_domain_type);
+ dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
+ }
+
+ group->default_domain = dom;
if (!group->domain)
- group->domain = group->default_domain;
+ group->domain = dom;
}
ret = iommu_group_add_device(group, dev);
@@ -1083,8 +1105,12 @@ static int iommu_bus_notifier(struct notifier_block *nb,
* result in ADD/DEL notifiers to group->notifier
*/
if (action == BUS_NOTIFY_ADD_DEVICE) {
- if (ops->add_device)
- return ops->add_device(dev);
+ if (ops->add_device) {
+ int ret;
+
+ ret = ops->add_device(dev);
+ return (ret) ? NOTIFY_DONE : NOTIFY_OK;
+ }
} else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
if (ops->remove_device && dev->iommu_group) {
ops->remove_device(dev);
@@ -1652,6 +1678,48 @@ void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
}
EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
+/**
+ * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
+ * @domain: the iommu domain where the fault has happened
+ * @dev: the device where the fault has happened
+ * @iova: the faulting address
+ * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
+ *
+ * This function should be called by the low-level IOMMU implementations
+ * whenever IOMMU faults happen, to allow high-level users, that are
+ * interested in such events, to know about them.
+ *
+ * This event may be useful for several possible use cases:
+ * - mere logging of the event
+ * - dynamic TLB/PTE loading
+ * - if restarting of the faulting device is required
+ *
+ * Returns 0 on success and an appropriate error code otherwise (if dynamic
+ * PTE/TLB loading will one day be supported, implementations will be able
+ * to tell whether it succeeded or not according to this return value).
+ *
+ * Specifically, -ENOSYS is returned if a fault handler isn't installed
+ * (though fault handlers can also return -ENOSYS, in case they want to
+ * elicit the default behavior of the IOMMU drivers).
+ */
+int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
+ unsigned long iova, int flags)
+{
+ int ret = -ENOSYS;
+
+ /*
+ * if upper layers showed interest and installed a fault handler,
+ * invoke it.
+ */
+ if (domain->handler)
+ ret = domain->handler(domain, dev, iova, flags,
+ domain->handler_token);
+
+ trace_io_page_fault(dev, iova, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(report_iommu_fault);
+
static int __init iommu_init(void)
{
iommu_group_kset = kset_create_and_add("iommu_groups",
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index b7268a14184f..5c88ba70e4e0 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -100,6 +100,34 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
}
}
+/* Insert the iova into domain rbtree by holding writer lock */
+static void
+iova_insert_rbtree(struct rb_root *root, struct iova *iova,
+ struct rb_node *start)
+{
+ struct rb_node **new, *parent = NULL;
+
+ new = (start) ? &start : &(root->rb_node);
+ /* Figure out where to put new node */
+ while (*new) {
+ struct iova *this = rb_entry(*new, struct iova, node);
+
+ parent = *new;
+
+ if (iova->pfn_lo < this->pfn_lo)
+ new = &((*new)->rb_left);
+ else if (iova->pfn_lo > this->pfn_lo)
+ new = &((*new)->rb_right);
+ else {
+ WARN_ON(1); /* this should not happen */
+ return;
+ }
+ }
+ /* Add new node and rebalance tree. */
+ rb_link_node(&iova->node, parent, new);
+ rb_insert_color(&iova->node, root);
+}
+
/*
* Computes the padding size required, to make the start address
* naturally aligned on the power-of-two order of its size
@@ -138,7 +166,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
break; /* found a free slot */
}
adjust_limit_pfn:
- limit_pfn = curr_iova->pfn_lo - 1;
+ limit_pfn = curr_iova->pfn_lo ? (curr_iova->pfn_lo - 1) : 0;
move_left:
prev = curr;
curr = rb_prev(curr);
@@ -157,35 +185,8 @@ move_left:
new->pfn_lo = limit_pfn - (size + pad_size) + 1;
new->pfn_hi = new->pfn_lo + size - 1;
- /* Insert the new_iova into domain rbtree by holding writer lock */
- /* Add new node and rebalance tree. */
- {
- struct rb_node **entry, *parent = NULL;
-
- /* If we have 'prev', it's a valid place to start the
- insertion. Otherwise, start from the root. */
- if (prev)
- entry = &prev;
- else
- entry = &iovad->rbroot.rb_node;
-
- /* Figure out where to put new node */
- while (*entry) {
- struct iova *this = rb_entry(*entry, struct iova, node);
- parent = *entry;
-
- if (new->pfn_lo < this->pfn_lo)
- entry = &((*entry)->rb_left);
- else if (new->pfn_lo > this->pfn_lo)
- entry = &((*entry)->rb_right);
- else
- BUG(); /* this should not happen */
- }
-
- /* Add new node and rebalance tree. */
- rb_link_node(&new->node, parent, entry);
- rb_insert_color(&new->node, &iovad->rbroot);
- }
+ /* If we have 'prev', it's a valid place to start the insertion. */
+ iova_insert_rbtree(&iovad->rbroot, new, prev);
__cached_rbnode_insert_update(iovad, saved_pfn, new);
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
@@ -194,28 +195,6 @@ move_left:
return 0;
}
-static void
-iova_insert_rbtree(struct rb_root *root, struct iova *iova)
-{
- struct rb_node **new = &(root->rb_node), *parent = NULL;
- /* Figure out where to put new node */
- while (*new) {
- struct iova *this = rb_entry(*new, struct iova, node);
-
- parent = *new;
-
- if (iova->pfn_lo < this->pfn_lo)
- new = &((*new)->rb_left);
- else if (iova->pfn_lo > this->pfn_lo)
- new = &((*new)->rb_right);
- else
- BUG(); /* this should not happen */
- }
- /* Add new node and rebalance tree. */
- rb_link_node(&iova->node, parent, new);
- rb_insert_color(&iova->node, root);
-}
-
static struct kmem_cache *iova_cache;
static unsigned int iova_cache_users;
static DEFINE_MUTEX(iova_cache_mutex);
@@ -505,7 +484,7 @@ __insert_new_range(struct iova_domain *iovad,
iova = alloc_and_init_iova(pfn_lo, pfn_hi);
if (iova)
- iova_insert_rbtree(&iovad->rbroot, iova);
+ iova_insert_rbtree(&iovad->rbroot, iova, NULL);
return iova;
}
@@ -612,11 +591,11 @@ split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
rb_erase(&iova->node, &iovad->rbroot);
if (prev) {
- iova_insert_rbtree(&iovad->rbroot, prev);
+ iova_insert_rbtree(&iovad->rbroot, prev, NULL);
iova->pfn_lo = pfn_lo;
}
if (next) {
- iova_insert_rbtree(&iovad->rbroot, next);
+ iova_insert_rbtree(&iovad->rbroot, next, NULL);
iova->pfn_hi = pfn_hi;
}
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 19e010083408..bc1efbfb9ddf 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -18,6 +18,7 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/dma-iommu.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -431,9 +432,10 @@ err_release_mapping:
static int mtk_iommu_add_device(struct device *dev)
{
- struct iommu_group *group;
struct of_phandle_args iommu_spec;
struct of_phandle_iterator it;
+ struct mtk_iommu_data *data;
+ struct iommu_group *group;
int err;
of_for_each_phandle(&it, err, dev->of_node, "iommus",
@@ -450,6 +452,9 @@ static int mtk_iommu_add_device(struct device *dev)
if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
return -ENODEV; /* Not a iommu client device */
+ data = dev->iommu_fwspec->iommu_priv;
+ iommu_device_link(&data->iommu, dev);
+
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
return PTR_ERR(group);
@@ -460,9 +465,14 @@ static int mtk_iommu_add_device(struct device *dev)
static void mtk_iommu_remove_device(struct device *dev)
{
+ struct mtk_iommu_data *data;
+
if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
return;
+ data = dev->iommu_fwspec->iommu_priv;
+ iommu_device_unlink(&data->iommu, dev);
+
iommu_group_remove_device(dev);
iommu_fwspec_free(dev);
}
@@ -627,6 +637,17 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
+ dev_name(&pdev->dev));
+ if (ret)
+ return ret;
+
+ iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
+
+ ret = iommu_device_register(&data->iommu);
+ if (ret)
+ return ret;
+
if (!iommu_present(&platform_bus_type))
bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
@@ -637,6 +658,9 @@ static int mtk_iommu_remove(struct platform_device *pdev)
{
struct mtk_iommu_data *data = platform_get_drvdata(pdev);
+ iommu_device_sysfs_remove(&data->iommu);
+ iommu_device_unregister(&data->iommu);
+
if (iommu_present(&platform_bus_type))
bus_set_iommu(&platform_bus_type, NULL);
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 2683e9fc0dcf..9f44ee8ea1bc 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -96,6 +96,49 @@ int of_get_dma_window(struct device_node *dn, const char *prefix, int index,
}
EXPORT_SYMBOL_GPL(of_get_dma_window);
+static bool of_iommu_driver_present(struct device_node *np)
+{
+ /*
+ * If the IOMMU still isn't ready by the time we reach init, assume
+ * it never will be. We don't want to defer indefinitely, nor attempt
+ * to dereference __iommu_of_table after it's been freed.
+ */
+ if (system_state > SYSTEM_BOOTING)
+ return false;
+
+ return of_match_node(&__iommu_of_table, np);
+}
+
+static const struct iommu_ops
+*of_iommu_xlate(struct device *dev, struct of_phandle_args *iommu_spec)
+{
+ const struct iommu_ops *ops;
+ struct fwnode_handle *fwnode = &iommu_spec->np->fwnode;
+ int err;
+
+ ops = iommu_ops_from_fwnode(fwnode);
+ if ((ops && !ops->of_xlate) ||
+ (!ops && !of_iommu_driver_present(iommu_spec->np)))
+ return NULL;
+
+ err = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops);
+ if (err)
+ return ERR_PTR(err);
+ /*
+ * The otherwise-empty fwspec handily serves to indicate the specific
+ * IOMMU device we're waiting for, which will be useful if we ever get
+ * a proper probe-ordering dependency mechanism in future.
+ */
+ if (!ops)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ err = ops->of_xlate(dev, iommu_spec);
+ if (err)
+ return ERR_PTR(err);
+
+ return ops;
+}
+
static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data)
{
struct of_phandle_args *iommu_spec = data;
@@ -105,10 +148,11 @@ static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data)
}
static const struct iommu_ops
-*of_pci_iommu_configure(struct pci_dev *pdev, struct device_node *bridge_np)
+*of_pci_iommu_init(struct pci_dev *pdev, struct device_node *bridge_np)
{
const struct iommu_ops *ops;
struct of_phandle_args iommu_spec;
+ int err;
/*
* Start by tracing the RID alias down the PCI topology as
@@ -123,56 +167,76 @@ static const struct iommu_ops
* bus into the system beyond, and which IOMMU it ends up at.
*/
iommu_spec.np = NULL;
- if (of_pci_map_rid(bridge_np, iommu_spec.args[0], "iommu-map",
- "iommu-map-mask", &iommu_spec.np, iommu_spec.args))
- return NULL;
+ err = of_pci_map_rid(bridge_np, iommu_spec.args[0], "iommu-map",
+ "iommu-map-mask", &iommu_spec.np,
+ iommu_spec.args);
+ if (err)
+ return err == -ENODEV ? NULL : ERR_PTR(err);
- ops = iommu_ops_from_fwnode(&iommu_spec.np->fwnode);
- if (!ops || !ops->of_xlate ||
- iommu_fwspec_init(&pdev->dev, &iommu_spec.np->fwnode, ops) ||
- ops->of_xlate(&pdev->dev, &iommu_spec))
- ops = NULL;
+ ops = of_iommu_xlate(&pdev->dev, &iommu_spec);
of_node_put(iommu_spec.np);
return ops;
}
-const struct iommu_ops *of_iommu_configure(struct device *dev,
- struct device_node *master_np)
+static const struct iommu_ops
+*of_platform_iommu_init(struct device *dev, struct device_node *np)
{
struct of_phandle_args iommu_spec;
- struct device_node *np;
const struct iommu_ops *ops = NULL;
int idx = 0;
- if (dev_is_pci(dev))
- return of_pci_iommu_configure(to_pci_dev(dev), master_np);
-
/*
* We don't currently walk up the tree looking for a parent IOMMU.
* See the `Notes:' section of
* Documentation/devicetree/bindings/iommu/iommu.txt
*/
- while (!of_parse_phandle_with_args(master_np, "iommus",
- "#iommu-cells", idx,
- &iommu_spec)) {
- np = iommu_spec.np;
- ops = iommu_ops_from_fwnode(&np->fwnode);
-
- if (!ops || !ops->of_xlate ||
- iommu_fwspec_init(dev, &np->fwnode, ops) ||
- ops->of_xlate(dev, &iommu_spec))
- goto err_put_node;
-
- of_node_put(np);
+ while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells",
+ idx, &iommu_spec)) {
+ ops = of_iommu_xlate(dev, &iommu_spec);
+ of_node_put(iommu_spec.np);
idx++;
+ if (IS_ERR_OR_NULL(ops))
+ break;
}
return ops;
+}
+
+const struct iommu_ops *of_iommu_configure(struct device *dev,
+ struct device_node *master_np)
+{
+ const struct iommu_ops *ops;
+ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+
+ if (!master_np)
+ return NULL;
+
+ if (fwspec) {
+ if (fwspec->ops)
+ return fwspec->ops;
+
+ /* In the deferred case, start again from scratch */
+ iommu_fwspec_free(dev);
+ }
-err_put_node:
- of_node_put(np);
- return NULL;
+ if (dev_is_pci(dev))
+ ops = of_pci_iommu_init(to_pci_dev(dev), master_np);
+ else
+ ops = of_platform_iommu_init(dev, master_np);
+ /*
+ * If we have reason to believe the IOMMU driver missed the initial
+ * add_device callback for dev, replay it to get things in order.
+ */
+ if (!IS_ERR_OR_NULL(ops) && ops->add_device &&
+ dev->bus && !dev->iommu_group) {
+ int err = ops->add_device(dev);
+
+ if (err)
+ ops = ERR_PTR(err);
+ }
+
+ return ops;
}
static int __init of_iommu_init(void)
@@ -183,7 +247,7 @@ static int __init of_iommu_init(void)
for_each_matching_node_and_match(np, matches, &match) {
const of_iommu_init_fn init_fn = match->data;
- if (init_fn(np))
+ if (init_fn && init_fn(np))
pr_err("Failed to initialise IOMMU %s\n",
of_node_full_name(np));
}
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index e2583cce2cc1..95dfca36ccb9 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -36,28 +36,14 @@
#include "omap-iopgtable.h"
#include "omap-iommu.h"
+static const struct iommu_ops omap_iommu_ops;
+
#define to_iommu(dev) \
((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)))
/* bitmap of the page sizes currently supported */
#define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
-/**
- * struct omap_iommu_domain - omap iommu domain
- * @pgtable: the page table
- * @iommu_dev: an omap iommu device attached to this domain. only a single
- * iommu device can be attached for now.
- * @dev: Device using this domain.
- * @lock: domain lock, should be taken when attaching/detaching
- */
-struct omap_iommu_domain {
- u32 *pgtable;
- struct omap_iommu *iommu_dev;
- struct device *dev;
- spinlock_t lock;
- struct iommu_domain domain;
-};
-
#define MMU_LOCK_BASE_SHIFT 10
#define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT)
#define MMU_LOCK_BASE(x) \
@@ -818,33 +804,14 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
return IRQ_NONE;
}
-static int device_match_by_alias(struct device *dev, void *data)
-{
- struct omap_iommu *obj = to_iommu(dev);
- const char *name = data;
-
- pr_debug("%s: %s %s\n", __func__, obj->name, name);
-
- return strcmp(obj->name, name) == 0;
-}
-
/**
* omap_iommu_attach() - attach iommu device to an iommu domain
- * @name: name of target omap iommu device
+ * @obj: target omap iommu device
* @iopgd: page table
**/
-static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
+static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd)
{
int err;
- struct device *dev;
- struct omap_iommu *obj;
-
- dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
- device_match_by_alias);
- if (!dev)
- return ERR_PTR(-ENODEV);
-
- obj = to_iommu(dev);
spin_lock(&obj->iommu_lock);
@@ -857,11 +824,13 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
spin_unlock(&obj->iommu_lock);
dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
- return obj;
+
+ return 0;
err_enable:
spin_unlock(&obj->iommu_lock);
- return ERR_PTR(err);
+
+ return err;
}
/**
@@ -928,28 +897,26 @@ static int omap_iommu_probe(struct platform_device *pdev)
int irq;
struct omap_iommu *obj;
struct resource *res;
- struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct device_node *of = pdev->dev.of_node;
+ if (!of) {
+ pr_err("%s: only DT-based devices are supported\n", __func__);
+ return -ENODEV;
+ }
+
obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
if (!obj)
return -ENOMEM;
- if (of) {
- obj->name = dev_name(&pdev->dev);
- obj->nr_tlb_entries = 32;
- err = of_property_read_u32(of, "ti,#tlb-entries",
- &obj->nr_tlb_entries);
- if (err && err != -EINVAL)
- return err;
- if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8)
- return -EINVAL;
- if (of_find_property(of, "ti,iommu-bus-err-back", NULL))
- obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN;
- } else {
- obj->nr_tlb_entries = pdata->nr_tlb_entries;
- obj->name = pdata->name;
- }
+ obj->name = dev_name(&pdev->dev);
+ obj->nr_tlb_entries = 32;
+ err = of_property_read_u32(of, "ti,#tlb-entries", &obj->nr_tlb_entries);
+ if (err && err != -EINVAL)
+ return err;
+ if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8)
+ return -EINVAL;
+ if (of_find_property(of, "ti,iommu-bus-err-back", NULL))
+ obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN;
obj->dev = &pdev->dev;
obj->ctx = (void *)obj + sizeof(*obj);
@@ -976,19 +943,46 @@ static int omap_iommu_probe(struct platform_device *pdev)
return err;
platform_set_drvdata(pdev, obj);
+ obj->group = iommu_group_alloc();
+ if (IS_ERR(obj->group))
+ return PTR_ERR(obj->group);
+
+ err = iommu_device_sysfs_add(&obj->iommu, obj->dev, NULL, obj->name);
+ if (err)
+ goto out_group;
+
+ iommu_device_set_ops(&obj->iommu, &omap_iommu_ops);
+
+ err = iommu_device_register(&obj->iommu);
+ if (err)
+ goto out_sysfs;
+
pm_runtime_irq_safe(obj->dev);
pm_runtime_enable(obj->dev);
omap_iommu_debugfs_add(obj);
dev_info(&pdev->dev, "%s registered\n", obj->name);
+
return 0;
+
+out_sysfs:
+ iommu_device_sysfs_remove(&obj->iommu);
+out_group:
+ iommu_group_put(obj->group);
+ return err;
}
static int omap_iommu_remove(struct platform_device *pdev)
{
struct omap_iommu *obj = platform_get_drvdata(pdev);
+ iommu_group_put(obj->group);
+ obj->group = NULL;
+
+ iommu_device_sysfs_remove(&obj->iommu);
+ iommu_device_unregister(&obj->iommu);
+
omap_iommu_debugfs_remove(obj);
pm_runtime_disable(obj->dev);
@@ -1077,11 +1071,11 @@ static int
omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
- struct omap_iommu *oiommu;
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+ struct omap_iommu *oiommu;
int ret = 0;
- if (!arch_data || !arch_data->name) {
+ if (!arch_data || !arch_data->iommu_dev) {
dev_err(dev, "device doesn't have an associated iommu\n");
return -EINVAL;
}
@@ -1095,15 +1089,16 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
goto out;
}
+ oiommu = arch_data->iommu_dev;
+
/* get a handle to and enable the omap iommu */
- oiommu = omap_iommu_attach(arch_data->name, omap_domain->pgtable);
- if (IS_ERR(oiommu)) {
- ret = PTR_ERR(oiommu);
+ ret = omap_iommu_attach(oiommu, omap_domain->pgtable);
+ if (ret) {
dev_err(dev, "can't get omap iommu: %d\n", ret);
goto out;
}
- omap_domain->iommu_dev = arch_data->iommu_dev = oiommu;
+ omap_domain->iommu_dev = oiommu;
omap_domain->dev = dev;
oiommu->domain = domain;
@@ -1116,7 +1111,6 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
struct device *dev)
{
struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
- struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
/* only a single device is supported per domain for now */
if (omap_domain->iommu_dev != oiommu) {
@@ -1128,7 +1122,7 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
omap_iommu_detach(oiommu);
- omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
+ omap_domain->iommu_dev = NULL;
omap_domain->dev = NULL;
oiommu->domain = NULL;
}
@@ -1232,8 +1226,11 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
static int omap_iommu_add_device(struct device *dev)
{
struct omap_iommu_arch_data *arch_data;
+ struct omap_iommu *oiommu;
+ struct iommu_group *group;
struct device_node *np;
struct platform_device *pdev;
+ int ret;
/*
* Allocate the archdata iommu structure for DT-based devices.
@@ -1254,15 +1251,41 @@ static int omap_iommu_add_device(struct device *dev)
return -EINVAL;
}
+ oiommu = platform_get_drvdata(pdev);
+ if (!oiommu) {
+ of_node_put(np);
+ return -EINVAL;
+ }
+
arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL);
if (!arch_data) {
of_node_put(np);
return -ENOMEM;
}
- arch_data->name = kstrdup(dev_name(&pdev->dev), GFP_KERNEL);
+ ret = iommu_device_link(&oiommu->iommu, dev);
+ if (ret) {
+ kfree(arch_data);
+ of_node_put(np);
+ return ret;
+ }
+
+ arch_data->iommu_dev = oiommu;
dev->archdata.iommu = arch_data;
+ /*
+ * IOMMU group initialization calls into omap_iommu_device_group, which
+ * needs a valid dev->archdata.iommu pointer
+ */
+ group = iommu_group_get_for_dev(dev);
+ if (IS_ERR(group)) {
+ iommu_device_unlink(&oiommu->iommu, dev);
+ dev->archdata.iommu = NULL;
+ kfree(arch_data);
+ return PTR_ERR(group);
+ }
+ iommu_group_put(group);
+
of_node_put(np);
return 0;
@@ -1275,8 +1298,23 @@ static void omap_iommu_remove_device(struct device *dev)
if (!dev->of_node || !arch_data)
return;
- kfree(arch_data->name);
+ iommu_device_unlink(&arch_data->iommu_dev->iommu, dev);
+ iommu_group_remove_device(dev);
+
+ dev->archdata.iommu = NULL;
kfree(arch_data);
+
+}
+
+static struct iommu_group *omap_iommu_device_group(struct device *dev)
+{
+ struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+ struct iommu_group *group = NULL;
+
+ if (arch_data->iommu_dev)
+ group = arch_data->iommu_dev->group;
+
+ return group;
}
static const struct iommu_ops omap_iommu_ops = {
@@ -1290,6 +1328,7 @@ static const struct iommu_ops omap_iommu_ops = {
.iova_to_phys = omap_iommu_iova_to_phys,
.add_device = omap_iommu_add_device,
.remove_device = omap_iommu_remove_device,
+ .device_group = omap_iommu_device_group,
.pgsize_bitmap = OMAP_IOMMU_PGSIZES,
};
@@ -1299,6 +1338,7 @@ static int __init omap_iommu_init(void)
const unsigned long flags = SLAB_HWCACHE_ALIGN;
size_t align = 1 << 10; /* L2 pagetable alignement */
struct device_node *np;
+ int ret;
np = of_find_matching_node(NULL, omap_iommu_of_match);
if (!np)
@@ -1312,11 +1352,25 @@ static int __init omap_iommu_init(void)
return -ENOMEM;
iopte_cachep = p;
- bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
-
omap_iommu_debugfs_init();
- return platform_driver_register(&omap_iommu_driver);
+ ret = platform_driver_register(&omap_iommu_driver);
+ if (ret) {
+ pr_err("%s: failed to register driver\n", __func__);
+ goto fail_driver;
+ }
+
+ ret = bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
+ if (ret)
+ goto fail_bus;
+
+ return 0;
+
+fail_bus:
+ platform_driver_unregister(&omap_iommu_driver);
+fail_driver:
+ kmem_cache_destroy(iopte_cachep);
+ return ret;
}
subsys_initcall(omap_iommu_init);
/* must be ready before omap3isp is probed */
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h
index 59628e5017b4..6e70515e6038 100644
--- a/drivers/iommu/omap-iommu.h
+++ b/drivers/iommu/omap-iommu.h
@@ -14,6 +14,7 @@
#define _OMAP_IOMMU_H
#include <linux/bitops.h>
+#include <linux/iommu.h>
#define for_each_iotlb_cr(obj, n, __i, cr) \
for (__i = 0; \
@@ -27,6 +28,23 @@ struct iotlb_entry {
u32 endian, elsz, mixed;
};
+/**
+ * struct omap_iommu_domain - omap iommu domain
+ * @pgtable: the page table
+ * @iommu_dev: an omap iommu device attached to this domain. only a single
+ * iommu device can be attached for now.
+ * @dev: Device using this domain.
+ * @lock: domain lock, should be taken when attaching/detaching
+ * @domain: generic domain handle used by iommu core code
+ */
+struct omap_iommu_domain {
+ u32 *pgtable;
+ struct omap_iommu *iommu_dev;
+ struct device *dev;
+ spinlock_t lock;
+ struct iommu_domain domain;
+};
+
struct omap_iommu {
const char *name;
void __iomem *regbase;
@@ -50,6 +68,22 @@ struct omap_iommu {
int has_bus_err_back;
u32 id;
+
+ struct iommu_device iommu;
+ struct iommu_group *group;
+};
+
+/**
+ * struct omap_iommu_arch_data - omap iommu private data
+ * @iommu_dev: handle of the iommu device
+ *
+ * This is an omap iommu private data object, which binds an iommu user
+ * to its iommu device. This object should be placed at the iommu user's
+ * dev_archdata so generic IOMMU API can be used without having to
+ * utilize omap-specific plumbing anymore.
+ */
+struct omap_iommu_arch_data {
+ struct omap_iommu *iommu_dev;
};
struct cr_regs {
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 9afcbf79f0b0..4ba48a26b389 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-iommu.h>
+#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -90,6 +91,7 @@ struct rk_iommu {
void __iomem **bases;
int num_mmu;
int irq;
+ struct iommu_device iommu;
struct list_head node; /* entry in rk_iommu_domain.iommus */
struct iommu_domain *domain; /* domain to which iommu is attached */
};
@@ -1032,6 +1034,7 @@ static int rk_iommu_group_set_iommudata(struct iommu_group *group,
static int rk_iommu_add_device(struct device *dev)
{
struct iommu_group *group;
+ struct rk_iommu *iommu;
int ret;
if (!rk_iommu_is_dev_iommu_master(dev))
@@ -1054,6 +1057,10 @@ static int rk_iommu_add_device(struct device *dev)
if (ret)
goto err_remove_device;
+ iommu = rk_iommu_from_dev(dev);
+ if (iommu)
+ iommu_device_link(&iommu->iommu, dev);
+
iommu_group_put(group);
return 0;
@@ -1067,9 +1074,15 @@ err_put_group:
static void rk_iommu_remove_device(struct device *dev)
{
+ struct rk_iommu *iommu;
+
if (!rk_iommu_is_dev_iommu_master(dev))
return;
+ iommu = rk_iommu_from_dev(dev);
+ if (iommu)
+ iommu_device_unlink(&iommu->iommu, dev);
+
iommu_group_remove_device(dev);
}
@@ -1117,7 +1130,7 @@ static int rk_iommu_probe(struct platform_device *pdev)
struct rk_iommu *iommu;
struct resource *res;
int num_res = pdev->num_resources;
- int i;
+ int err, i;
iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
if (!iommu)
@@ -1150,11 +1163,25 @@ static int rk_iommu_probe(struct platform_device *pdev)
return -ENXIO;
}
- return 0;
+ err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
+ if (err)
+ return err;
+
+ iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops);
+ err = iommu_device_register(&iommu->iommu);
+
+ return err;
}
static int rk_iommu_remove(struct platform_device *pdev)
{
+ struct rk_iommu *iommu = platform_get_drvdata(pdev);
+
+ if (iommu) {
+ iommu_device_sysfs_remove(&iommu->iommu);
+ iommu_device_unregister(&iommu->iommu);
+ }
+
return 0;
}
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 9305964250ac..eeb19f560a05 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -15,6 +15,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/dma-mapping.h>
#include <soc/tegra/ahb.h>
#include <soc/tegra/mc.h>