aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/intel-iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/intel-iommu.c')
-rw-r--r--drivers/iommu/intel-iommu.c526
1 files changed, 417 insertions, 109 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 68d43beccb7e..a98a7b27aca1 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -15,8 +15,11 @@
* Shaohua Li <shaohua.li@intel.com>,
* Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
* Fenghua Yu <fenghua.yu@intel.com>
+ * Joerg Roedel <jroedel@suse.de>
*/
+#define pr_fmt(fmt) "DMAR: " fmt
+
#include <linux/init.h>
#include <linux/bitmap.h>
#include <linux/debugfs.h>
@@ -40,6 +43,7 @@
#include <linux/pci-ats.h>
#include <linux/memblock.h>
#include <linux/dma-contiguous.h>
+#include <linux/crash_dump.h>
#include <asm/irq_remapping.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
@@ -190,7 +194,29 @@ struct root_entry {
};
#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
+/*
+ * Take a root_entry and return the Lower Context Table Pointer (LCTP)
+ * if marked present.
+ */
+static phys_addr_t root_entry_lctp(struct root_entry *re)
+{
+ if (!(re->lo & 1))
+ return 0;
+
+ return re->lo & VTD_PAGE_MASK;
+}
+
+/*
+ * Take a root_entry and return the Upper Context Table Pointer (UCTP)
+ * if marked present.
+ */
+static phys_addr_t root_entry_uctp(struct root_entry *re)
+{
+ if (!(re->hi & 1))
+ return 0;
+ return re->hi & VTD_PAGE_MASK;
+}
/*
* low 64 bits:
* 0: present
@@ -207,10 +233,38 @@ struct context_entry {
u64 hi;
};
-static inline bool context_present(struct context_entry *context)
+static inline void context_clear_pasid_enable(struct context_entry *context)
+{
+ context->lo &= ~(1ULL << 11);
+}
+
+static inline bool context_pasid_enabled(struct context_entry *context)
+{
+ return !!(context->lo & (1ULL << 11));
+}
+
+static inline void context_set_copied(struct context_entry *context)
+{
+ context->hi |= (1ull << 3);
+}
+
+static inline bool context_copied(struct context_entry *context)
+{
+ return !!(context->hi & (1ULL << 3));
+}
+
+static inline bool __context_present(struct context_entry *context)
{
return (context->lo & 1);
}
+
+static inline bool context_present(struct context_entry *context)
+{
+ return context_pasid_enabled(context) ?
+ __context_present(context) :
+ __context_present(context) && !context_copied(context);
+}
+
static inline void context_set_present(struct context_entry *context)
{
context->lo |= 1;
@@ -247,6 +301,11 @@ static inline void context_set_domain_id(struct context_entry *context,
context->hi |= (value & ((1 << 16) - 1)) << 8;
}
+static inline int context_domain_id(struct context_entry *c)
+{
+ return((c->hi >> 8) & 0xffff);
+}
+
static inline void context_clear_entry(struct context_entry *context)
{
context->lo = 0;
@@ -422,6 +481,14 @@ static int dmar_map_gfx = 1;
static int dmar_forcedac;
static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
+static int intel_iommu_ecs = 1;
+
+/* We only actually use ECS when PASID support (on the new bit 40)
+ * is also advertised. Some early implementations — the ones with
+ * PASID support on bit 28 — have issues even when we *only* use
+ * extended root/context tables. */
+#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
+ ecap_pasid(iommu->ecap))
int intel_iommu_gfx_mapped;
EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
@@ -432,6 +499,25 @@ static LIST_HEAD(device_domain_list);
static const struct iommu_ops intel_iommu_ops;
+static bool translation_pre_enabled(struct intel_iommu *iommu)
+{
+ return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
+}
+
+static void clear_translation_pre_enabled(struct intel_iommu *iommu)
+{
+ iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
+}
+
+static void init_translation_status(struct intel_iommu *iommu)
+{
+ u32 gsts;
+
+ gsts = readl(iommu->reg + DMAR_GSTS_REG);
+ if (gsts & DMA_GSTS_TES)
+ iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
+}
+
/* Convert generic 'struct iommu_domain to private struct dmar_domain */
static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
{
@@ -445,26 +531,26 @@ static int __init intel_iommu_setup(char *str)
while (*str) {
if (!strncmp(str, "on", 2)) {
dmar_disabled = 0;
- printk(KERN_INFO "Intel-IOMMU: enabled\n");
+ pr_info("IOMMU enabled\n");
} else if (!strncmp(str, "off", 3)) {
dmar_disabled = 1;
- printk(KERN_INFO "Intel-IOMMU: disabled\n");
+ pr_info("IOMMU disabled\n");
} else if (!strncmp(str, "igfx_off", 8)) {
dmar_map_gfx = 0;
- printk(KERN_INFO
- "Intel-IOMMU: disable GFX device mapping\n");
+ pr_info("Disable GFX device mapping\n");
} else if (!strncmp(str, "forcedac", 8)) {
- printk(KERN_INFO
- "Intel-IOMMU: Forcing DAC for PCI devices\n");
+ pr_info("Forcing DAC for PCI devices\n");
dmar_forcedac = 1;
} else if (!strncmp(str, "strict", 6)) {
- printk(KERN_INFO
- "Intel-IOMMU: disable batched IOTLB flush\n");
+ pr_info("Disable batched IOTLB flush\n");
intel_iommu_strict = 1;
} else if (!strncmp(str, "sp_off", 6)) {
- printk(KERN_INFO
- "Intel-IOMMU: disable supported super page\n");
+ pr_info("Disable supported super page\n");
intel_iommu_superpage = 0;
+ } else if (!strncmp(str, "ecs_off", 7)) {
+ printk(KERN_INFO
+ "Intel-IOMMU: disable extended context table support\n");
+ intel_iommu_ecs = 0;
}
str += strcspn(str, ",");
@@ -669,7 +755,7 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
struct context_entry *context;
u64 *entry;
- if (ecap_ecs(iommu->ecap)) {
+ if (ecs_enabled(iommu)) {
if (devfn >= 0x80) {
devfn -= 0x80;
entry = &root->hi;
@@ -696,6 +782,11 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
return &context[devfn];
}
+static int iommu_dummy(struct device *dev)
+{
+ return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
+}
+
static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
{
struct dmar_drhd_unit *drhd = NULL;
@@ -705,6 +796,9 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
u16 segment = 0;
int i;
+ if (iommu_dummy(dev))
+ return NULL;
+
if (dev_is_pci(dev)) {
pdev = to_pci_dev(dev);
segment = pci_domain_nr(pdev->bus);
@@ -798,7 +892,7 @@ static void free_context_table(struct intel_iommu *iommu)
if (context)
free_pgtable_page(context);
- if (!ecap_ecs(iommu->ecap))
+ if (!ecs_enabled(iommu))
continue;
context = iommu_context_addr(iommu, i, 0x80, 0);
@@ -1112,7 +1206,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
root = (struct root_entry *)alloc_pgtable_page(iommu->node);
if (!root) {
- pr_err("IOMMU: allocating root entry for %s failed\n",
+ pr_err("Allocating root entry for %s failed\n",
iommu->name);
return -ENOMEM;
}
@@ -1133,7 +1227,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
unsigned long flag;
addr = virt_to_phys(iommu->root_entry);
- if (ecap_ecs(iommu->ecap))
+ if (ecs_enabled(iommu))
addr |= DMA_RTADDR_RTT;
raw_spin_lock_irqsave(&iommu->register_lock, flag);
@@ -1250,9 +1344,9 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
/* check IOTLB invalidation granularity */
if (DMA_TLB_IAIG(val) == 0)
- printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
+ pr_err("Flush IOTLB failed\n");
if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
- pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
+ pr_debug("TLB flush request %Lx, actual %Lx\n",
(unsigned long long)DMA_TLB_IIRG(type),
(unsigned long long)DMA_TLB_IAIG(val));
}
@@ -1423,8 +1517,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
unsigned long nlongs;
ndomains = cap_ndoms(iommu->cap);
- pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
- iommu->seq_id, ndomains);
+ pr_debug("%s: Number of Domains supported <%ld>\n",
+ iommu->name, ndomains);
nlongs = BITS_TO_LONGS(ndomains);
spin_lock_init(&iommu->lock);
@@ -1434,15 +1528,15 @@ static int iommu_init_domains(struct intel_iommu *iommu)
*/
iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
if (!iommu->domain_ids) {
- pr_err("IOMMU%d: allocating domain id array failed\n",
- iommu->seq_id);
+ pr_err("%s: Allocating domain id array failed\n",
+ iommu->name);
return -ENOMEM;
}
iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
GFP_KERNEL);
if (!iommu->domains) {
- pr_err("IOMMU%d: allocating domain array failed\n",
- iommu->seq_id);
+ pr_err("%s: Allocating domain array failed\n",
+ iommu->name);
kfree(iommu->domain_ids);
iommu->domain_ids = NULL;
return -ENOMEM;
@@ -1547,7 +1641,7 @@ static int iommu_attach_domain(struct dmar_domain *domain,
num = __iommu_attach_domain(domain, iommu);
spin_unlock_irqrestore(&iommu->lock, flags);
if (num < 0)
- pr_err("IOMMU: no free domain ids\n");
+ pr_err("%s: No free domain ids\n", iommu->name);
return num;
}
@@ -1639,7 +1733,7 @@ static int dmar_init_reserved_ranges(void)
iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
IOVA_PFN(IOAPIC_RANGE_END));
if (!iova) {
- printk(KERN_ERR "Reserve IOAPIC range failed\n");
+ pr_err("Reserve IOAPIC range failed\n");
return -ENODEV;
}
@@ -1655,7 +1749,7 @@ static int dmar_init_reserved_ranges(void)
IOVA_PFN(r->start),
IOVA_PFN(r->end));
if (!iova) {
- printk(KERN_ERR "Reserve iova failed\n");
+ pr_err("Reserve iova failed\n");
return -ENODEV;
}
}
@@ -1702,7 +1796,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
sagaw = cap_sagaw(iommu->cap);
if (!test_bit(agaw, &sagaw)) {
/* hardware doesn't support it, choose a bigger one */
- pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw);
+ pr_debug("Hardware doesn't support agaw %d\n", agaw);
agaw = find_next_bit(&sagaw, 5, agaw);
if (agaw >= 5)
return -ENODEV;
@@ -1795,6 +1889,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
return 0;
}
+ context_clear_entry(context);
+
id = domain->id;
pgd = domain->pgd;
@@ -1803,7 +1899,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
id = iommu_attach_vm_domain(domain, iommu);
if (id < 0) {
spin_unlock_irqrestore(&iommu->lock, flags);
- pr_err("IOMMU: no free domain ids\n");
+ pr_err("%s: No free domain ids\n", iommu->name);
return -EFAULT;
}
}
@@ -2030,8 +2126,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
if (tmp) {
static int dumps = 5;
- printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
- iov_pfn, tmp, (unsigned long long)pteval);
+ pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
+ iov_pfn, tmp, (unsigned long long)pteval);
if (dumps) {
dumps--;
debug_dma_dump_mappings(NULL);
@@ -2303,7 +2399,7 @@ static int iommu_domain_identity_map(struct dmar_domain *domain,
if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
dma_to_mm_pfn(last_vpfn))) {
- printk(KERN_ERR "IOMMU: reserve iova failed\n");
+ pr_err("Reserving iova failed\n");
return -ENOMEM;
}
@@ -2336,15 +2432,14 @@ static int iommu_prepare_identity_map(struct device *dev,
range which is reserved in E820, so which didn't get set
up to start with in si_domain */
if (domain == si_domain && hw_pass_through) {
- printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
- dev_name(dev), start, end);
+ pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
+ dev_name(dev), start, end);
return 0;
}
- printk(KERN_INFO
- "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
- dev_name(dev), start, end);
-
+ pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
+ dev_name(dev), start, end);
+
if (end < start) {
WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
@@ -2401,12 +2496,11 @@ static inline void iommu_prepare_isa(void)
if (!pdev)
return;
- printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
+ pr_info("Prepare 0-16MiB unity mapping for LPC\n");
ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
if (ret)
- printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
- "floppy might not work\n");
+ pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
pci_dev_put(pdev);
}
@@ -2450,7 +2544,7 @@ static int __init si_domain_init(int hw)
return -EFAULT;
}
- pr_debug("IOMMU: identity mapping domain is domain %d\n",
+ pr_debug("Identity mapping domain is domain %d\n",
si_domain->id);
if (hw)
@@ -2650,8 +2744,8 @@ static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw
hw ? CONTEXT_TT_PASS_THROUGH :
CONTEXT_TT_MULTI_LEVEL);
if (!ret)
- pr_info("IOMMU: %s identity mapping for device %s\n",
- hw ? "hardware" : "software", dev_name(dev));
+ pr_info("%s identity mapping for device %s\n",
+ hw ? "Hardware" : "Software", dev_name(dev));
else if (ret == -ENODEV)
/* device not associated with an iommu */
ret = 0;
@@ -2669,10 +2763,6 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
int i;
int ret = 0;
- ret = si_domain_init(hw);
- if (ret)
- return -EFAULT;
-
for_each_pci_dev(pdev) {
ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
if (ret)
@@ -2686,7 +2776,7 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
if (dev->bus != &acpi_bus_type)
continue;
-
+
adev= to_acpi_device(dev);
mutex_lock(&adev->physical_node_lock);
list_for_each_entry(pn, &adev->physical_node_list, node) {
@@ -2728,19 +2818,200 @@ static void intel_iommu_init_qi(struct intel_iommu *iommu)
*/
iommu->flush.flush_context = __iommu_flush_context;
iommu->flush.flush_iotlb = __iommu_flush_iotlb;
- pr_info("IOMMU: %s using Register based invalidation\n",
+ pr_info("%s: Using Register based invalidation\n",
iommu->name);
} else {
iommu->flush.flush_context = qi_flush_context;
iommu->flush.flush_iotlb = qi_flush_iotlb;
- pr_info("IOMMU: %s using Queued invalidation\n", iommu->name);
+ pr_info("%s: Using Queued invalidation\n", iommu->name);
}
}
+static int copy_context_table(struct intel_iommu *iommu,
+ struct root_entry *old_re,
+ struct context_entry **tbl,
+ int bus, bool ext)
+{
+ struct context_entry *old_ce = NULL, *new_ce = NULL, ce;
+ int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
+ phys_addr_t old_ce_phys;
+
+ tbl_idx = ext ? bus * 2 : bus;
+
+ for (devfn = 0; devfn < 256; devfn++) {
+ /* First calculate the correct index */
+ idx = (ext ? devfn * 2 : devfn) % 256;
+
+ if (idx == 0) {
+ /* First save what we may have and clean up */
+ if (new_ce) {
+ tbl[tbl_idx] = new_ce;
+ __iommu_flush_cache(iommu, new_ce,
+ VTD_PAGE_SIZE);
+ pos = 1;
+ }
+
+ if (old_ce)
+ iounmap(old_ce);
+
+ ret = 0;
+ if (devfn < 0x80)
+ old_ce_phys = root_entry_lctp(old_re);
+ else
+ old_ce_phys = root_entry_uctp(old_re);
+
+ if (!old_ce_phys) {
+ if (ext && devfn == 0) {
+ /* No LCTP, try UCTP */
+ devfn = 0x7f;
+ continue;
+ } else {
+ goto out;
+ }
+ }
+
+ ret = -ENOMEM;
+ old_ce = ioremap_cache(old_ce_phys, PAGE_SIZE);
+ if (!old_ce)
+ goto out;
+
+ new_ce = alloc_pgtable_page(iommu->node);
+ if (!new_ce)
+ goto out_unmap;
+
+ ret = 0;
+ }
+
+ /* Now copy the context entry */
+ ce = old_ce[idx];
+
+ if (!__context_present(&ce))
+ continue;
+
+ did = context_domain_id(&ce);
+ if (did >= 0 && did < cap_ndoms(iommu->cap))
+ set_bit(did, iommu->domain_ids);
+
+ /*
+ * We need a marker for copied context entries. This
+ * marker needs to work for the old format as well as
+ * for extended context entries.
+ *
+ * Bit 67 of the context entry is used. In the old
+ * format this bit is available to software, in the
+ * extended format it is the PGE bit, but PGE is ignored
+ * by HW if PASIDs are disabled (and thus still
+ * available).
+ *
+ * So disable PASIDs first and then mark the entry
+ * copied. This means that we don't copy PASID
+ * translations from the old kernel, but this is fine as
+ * faults there are not fatal.
+ */
+ context_clear_pasid_enable(&ce);
+ context_set_copied(&ce);
+
+ new_ce[idx] = ce;
+ }
+
+ tbl[tbl_idx + pos] = new_ce;
+
+ __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
+
+out_unmap:
+ iounmap(old_ce);
+
+out:
+ return ret;
+}
+
+static int copy_translation_tables(struct intel_iommu *iommu)
+{
+ struct context_entry **ctxt_tbls;
+ struct root_entry *old_rt;
+ phys_addr_t old_rt_phys;
+ int ctxt_table_entries;
+ unsigned long flags;
+ u64 rtaddr_reg;
+ int bus, ret;
+ bool new_ext, ext;
+
+ rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
+ ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
+ new_ext = !!ecap_ecs(iommu->ecap);
+
+ /*
+ * The RTT bit can only be changed when translation is disabled,
+ * but disabling translation means to open a window for data
+ * corruption. So bail out and don't copy anything if we would
+ * have to change the bit.
+ */
+ if (new_ext != ext)
+ return -EINVAL;
+
+ old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
+ if (!old_rt_phys)
+ return -EINVAL;
+
+ old_rt = ioremap_cache(old_rt_phys, PAGE_SIZE);
+ if (!old_rt)
+ return -ENOMEM;
+
+ /* This is too big for the stack - allocate it from slab */
+ ctxt_table_entries = ext ? 512 : 256;
+ ret = -ENOMEM;
+ ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
+ if (!ctxt_tbls)
+ goto out_unmap;
+
+ for (bus = 0; bus < 256; bus++) {
+ ret = copy_context_table(iommu, &old_rt[bus],
+ ctxt_tbls, bus, ext);
+ if (ret) {
+ pr_err("%s: Failed to copy context table for bus %d\n",
+ iommu->name, bus);
+ continue;
+ }
+ }
+
+ spin_lock_irqsave(&iommu->lock, flags);
+
+ /* Context tables are copied, now write them to the root_entry table */
+ for (bus = 0; bus < 256; bus++) {
+ int idx = ext ? bus * 2 : bus;
+ u64 val;
+
+ if (ctxt_tbls[idx]) {
+ val = virt_to_phys(ctxt_tbls[idx]) | 1;
+ iommu->root_entry[bus].lo = val;
+ }
+
+ if (!ext || !ctxt_tbls[idx + 1])
+ continue;
+
+ val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
+ iommu->root_entry[bus].hi = val;
+ }
+
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
+ kfree(ctxt_tbls);
+
+ __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
+
+ ret = 0;
+
+out_unmap:
+ iounmap(old_rt);
+
+ return ret;
+}
+
static int __init init_dmars(void)
{
struct dmar_drhd_unit *drhd;
struct dmar_rmrr_unit *rmrr;
+ bool copied_tables = false;
struct device *dev;
struct intel_iommu *iommu;
int i, ret;
@@ -2761,8 +3032,7 @@ static int __init init_dmars(void)
g_num_of_iommus++;
continue;
}
- printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
- DMAR_UNITS_SUPPORTED);
+ pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
}
/* Preallocate enough resources for IOMMU hot-addition */
@@ -2772,7 +3042,7 @@ static int __init init_dmars(void)
g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
GFP_KERNEL);
if (!g_iommus) {
- printk(KERN_ERR "Allocating global iommu array failed\n");
+ pr_err("Allocating global iommu array failed\n");
ret = -ENOMEM;
goto error;
}
@@ -2787,10 +3057,21 @@ static int __init init_dmars(void)
for_each_active_iommu(iommu, drhd) {
g_iommus[iommu->seq_id] = iommu;
+ intel_iommu_init_qi(iommu);
+
ret = iommu_init_domains(iommu);
if (ret)
goto free_iommu;
+ init_translation_status(iommu);
+
+ if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
+ iommu_disable_translation(iommu);
+ clear_translation_pre_enabled(iommu);
+ pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
+ iommu->name);
+ }
+
/*
* TBD:
* we could share the same root & context tables
@@ -2799,13 +3080,41 @@ static int __init init_dmars(void)
ret = iommu_alloc_root_entry(iommu);
if (ret)
goto free_iommu;
+
+ if (translation_pre_enabled(iommu)) {
+ pr_info("Translation already enabled - trying to copy translation structures\n");
+
+ ret = copy_translation_tables(iommu);
+ if (ret) {
+ /*
+ * We found the IOMMU with translation
+ * enabled - but failed to copy over the
+ * old root-entry table. Try to proceed
+ * by disabling translation now and
+ * allocating a clean root-entry table.
+ * This might cause DMAR faults, but
+ * probably the dump will still succeed.
+ */
+ pr_err("Failed to copy translation tables from previous kernel for %s\n",
+ iommu->name);
+ iommu_disable_translation(iommu);
+ clear_translation_pre_enabled(iommu);
+ } else {
+ pr_info("Copied translation tables from previous kernel for %s\n",
+ iommu->name);
+ copied_tables = true;
+ }
+ }
+
+ iommu_flush_write_buffer(iommu);
+ iommu_set_root_entry(iommu);
+ iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+
if (!ecap_pass_through(iommu->ecap))
hw_pass_through = 0;
}
- for_each_active_iommu(iommu, drhd)
- intel_iommu_init_qi(iommu);
-
if (iommu_pass_through)
iommu_identity_mapping |= IDENTMAP_ALL;
@@ -2813,9 +3122,24 @@ static int __init init_dmars(void)
iommu_identity_mapping |= IDENTMAP_GFX;
#endif
+ if (iommu_identity_mapping) {
+ ret = si_domain_init(hw_pass_through);
+ if (ret)
+ goto free_iommu;
+ }
+
check_tylersburg_isoch();
/*
+ * If we copied translations from a previous kernel in the kdump
+ * case, we can not assign the devices to domains now, as that
+ * would eliminate the old mappings. So skip this part and defer
+ * the assignment to device driver initialization time.
+ */
+ if (copied_tables)
+ goto domains_done;
+
+ /*
* If pass through is not set or not enabled, setup context entries for
* identity mappings for rmrr, gfx, and isa and may fall back to static
* identity mapping if iommu_identity_mapping is set.
@@ -2823,7 +3147,7 @@ static int __init init_dmars(void)
if (iommu_identity_mapping) {
ret = iommu_prepare_static_identity_mapping(hw_pass_through);
if (ret) {
- printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
+ pr_crit("Failed to setup IOMMU pass-through\n");
goto free_iommu;
}
}
@@ -2841,20 +3165,21 @@ static int __init init_dmars(void)
* endfor
* endfor
*/
- printk(KERN_INFO "IOMMU: Setting RMRR:\n");
+ pr_info("Setting RMRR:\n");
for_each_rmrr_units(rmrr) {
/* some BIOS lists non-exist devices in DMAR table. */
for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
i, dev) {
ret = iommu_prepare_rmrr_dev(rmrr, dev);
if (ret)
- printk(KERN_ERR
- "IOMMU: mapping reserved region failed\n");
+ pr_err("Mapping reserved region failed\n");
}
}
iommu_prepare_isa();
+domains_done:
+
/*
* for each drhd
* enable fault log
@@ -2879,11 +3204,9 @@ static int __init init_dmars(void)
if (ret)
goto free_iommu;
- iommu_set_root_entry(iommu);
+ if (!translation_pre_enabled(iommu))
+ iommu_enable_translation(iommu);
- iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
- iommu_enable_translation(iommu);
iommu_disable_protect_mem_regions(iommu);
}
@@ -2924,7 +3247,7 @@ static struct iova *intel_alloc_iova(struct device *dev,
}
iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
if (unlikely(!iova)) {
- printk(KERN_ERR "Allocating %ld-page iova for %s failed",
+ pr_err("Allocating %ld-page iova for %s failed",
nrpages, dev_name(dev));
return NULL;
}
@@ -2939,7 +3262,7 @@ static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
if (!domain) {
- printk(KERN_ERR "Allocating domain for %s failed",
+ pr_err("Allocating domain for %s failed\n",
dev_name(dev));
return NULL;
}
@@ -2948,7 +3271,7 @@ static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
if (unlikely(!domain_context_mapped(dev))) {
ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
if (ret) {
- printk(KERN_ERR "Domain context map for %s failed",
+ pr_err("Domain context map for %s failed\n",
dev_name(dev));
return NULL;
}
@@ -2969,11 +3292,6 @@ static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
return __get_valid_domain_for_dev(dev);
}
-static int iommu_dummy(struct device *dev)
-{
- return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
-}
-
/* Check if the dev needs to go through non-identity map and unmap process.*/
static int iommu_no_mapping(struct device *dev)
{
@@ -2995,8 +3313,8 @@ static int iommu_no_mapping(struct device *dev)
* to non-identity mapping.
*/
domain_remove_one_dev_info(si_domain, dev);
- printk(KERN_INFO "32bit %s uses non-identity mapping\n",
- dev_name(dev));
+ pr_info("32bit %s uses non-identity mapping\n",
+ dev_name(dev));
return 0;
}
} else {
@@ -3011,8 +3329,8 @@ static int iommu_no_mapping(struct device *dev)
CONTEXT_TT_PASS_THROUGH :
CONTEXT_TT_MULTI_LEVEL);
if (!ret) {
- printk(KERN_INFO "64bit %s uses identity mapping\n",
- dev_name(dev));
+ pr_info("64bit %s uses identity mapping\n",
+ dev_name(dev));
return 1;
}
}
@@ -3081,7 +3399,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
error:
if (iova)
__free_iova(&domain->iovad, iova);
- printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
+ pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
dev_name(dev), size, (unsigned long long)paddr, dir);
return 0;
}
@@ -3396,7 +3714,7 @@ static inline int iommu_domain_cache_init(void)
NULL);
if (!iommu_domain_cache) {
- printk(KERN_ERR "Couldn't create iommu_domain cache\n");
+ pr_err("Couldn't create iommu_domain cache\n");
ret = -ENOMEM;
}
@@ -3413,7 +3731,7 @@ static inline int iommu_devinfo_cache_init(void)
SLAB_HWCACHE_ALIGN,
NULL);
if (!iommu_devinfo_cache) {
- printk(KERN_ERR "Couldn't create devinfo cache\n");
+ pr_err("Couldn't create devinfo cache\n");
ret = -ENOMEM;
}
@@ -3790,19 +4108,19 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
return 0;
if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
- pr_warn("IOMMU: %s doesn't support hardware pass through.\n",
+ pr_warn("%s: Doesn't support hardware pass through.\n",
iommu->name);
return -ENXIO;
}
if (!ecap_sc_support(iommu->ecap) &&
domain_update_iommu_snooping(iommu)) {
- pr_warn("IOMMU: %s doesn't support snooping.\n",
+ pr_warn("%s: Doesn't support snooping.\n",
iommu->name);
return -ENXIO;
}
sp = domain_update_iommu_superpage(iommu) - 1;
if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
- pr_warn("IOMMU: %s doesn't support large page.\n",
+ pr_warn("%s: Doesn't support large page.\n",
iommu->name);
return -ENXIO;
}
@@ -4033,7 +4351,7 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
start = mhp->start_pfn << PAGE_SHIFT;
end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
if (iommu_domain_identity_map(si_domain, start, end)) {
- pr_warn("dmar: failed to build identity map for [%llx-%llx]\n",
+ pr_warn("Failed to build identity map for [%llx-%llx]\n",
start, end);
return NOTIFY_BAD;
}
@@ -4051,7 +4369,7 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
iova = find_iova(&si_domain->iovad, start_vpfn);
if (iova == NULL) {
- pr_debug("dmar: failed get IOVA for PFN %lx\n",
+ pr_debug("Failed get IOVA for PFN %lx\n",
start_vpfn);
break;
}
@@ -4059,7 +4377,7 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
iova = split_and_remove_iova(&si_domain->iovad, iova,
start_vpfn, last_vpfn);
if (iova == NULL) {
- pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n",
+ pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
start_vpfn, last_vpfn);
return NOTIFY_BAD;
}
@@ -4168,13 +4486,6 @@ int __init intel_iommu_init(void)
goto out_free_dmar;
}
- /*
- * Disable translation if already enabled prior to OS handover.
- */
- for_each_active_iommu(iommu, drhd)
- if (iommu->gcmd & DMA_GCMD_TE)
- iommu_disable_translation(iommu);
-
if (dmar_dev_scope_init() < 0) {
if (force_on)
panic("tboot: Failed to initialize DMAR device scope\n");
@@ -4185,10 +4496,10 @@ int __init intel_iommu_init(void)
goto out_free_dmar;
if (list_empty(&dmar_rmrr_units))
- printk(KERN_INFO "DMAR: No RMRR found\n");
+ pr_info("No RMRR found\n");
if (list_empty(&dmar_atsr_units))
- printk(KERN_INFO "DMAR: No ATSR found\n");
+ pr_info("No ATSR found\n");
if (dmar_init_reserved_ranges()) {
if (force_on)
@@ -4202,12 +4513,11 @@ int __init intel_iommu_init(void)
if (ret) {
if (force_on)
panic("tboot: Failed to initialize DMARs\n");
- printk(KERN_ERR "IOMMU: dmar init failed\n");
+ pr_err("Initialization failed\n");
goto out_free_reserved_range;
}
up_write(&dmar_global_lock);
- printk(KERN_INFO
- "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
+ pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
init_timer(&unmap_timer);
#ifdef CONFIG_SWIOTLB
@@ -4349,13 +4659,11 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
if (!dmar_domain) {
- printk(KERN_ERR
- "intel_iommu_domain_init: dmar_domain == NULL\n");
+ pr_err("Can't allocate dmar_domain\n");
return NULL;
}
if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
- printk(KERN_ERR
- "intel_iommu_domain_init() failed\n");
+ pr_err("Domain initialization failed\n");
domain_exit(dmar_domain);
return NULL;
}
@@ -4414,7 +4722,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
addr_width = cap_mgaw(iommu->cap);
if (dmar_domain->max_addr > (1LL << addr_width)) {
- printk(KERN_ERR "%s: iommu width (%d) is not "
+ pr_err("%s: iommu width (%d) is not "
"sufficient for the mapped address (%llx)\n",
__func__, addr_width, dmar_domain->max_addr);
return -EFAULT;
@@ -4468,7 +4776,7 @@ static int intel_iommu_map(struct iommu_domain *domain,
/* check if minimum agaw is sufficient for mapped address */
end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
if (end < max_addr) {
- printk(KERN_ERR "%s: iommu width (%d) is not "
+ pr_err("%s: iommu width (%d) is not "
"sufficient for the mapped address (%llx)\n",
__func__, dmar_domain->gaw, max_addr);
return -EFAULT;
@@ -4609,7 +4917,7 @@ static const struct iommu_ops intel_iommu_ops = {
static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
{
/* G4x/GM45 integrated gfx dmar support is totally busted. */
- printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
+ pr_info("Disabling IOMMU for graphics on this chipset\n");
dmar_map_gfx = 0;
}
@@ -4627,7 +4935,7 @@ static void quirk_iommu_rwbf(struct pci_dev *dev)
* Mobile 4 Series Chipset neglects to set RWBF capability,
* but needs it. Same seems to hold for the desktop versions.
*/
- printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
+ pr_info("Forcing write-buffer flush capability\n");
rwbf_quirk = 1;
}
@@ -4657,11 +4965,11 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
return;
if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
- printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
+ pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
dmar_map_gfx = 0;
} else if (dmar_map_gfx) {
/* we have to ensure the gfx device is idle before we flush */
- printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
+ pr_info("Disabling batched IOTLB flush on Ironlake\n");
intel_iommu_strict = 1;
}
}
@@ -4723,7 +5031,7 @@ static void __init check_tylersburg_isoch(void)
iommu_identity_mapping |= IDENTMAP_AZALIA;
return;
}
-
- printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
+
+ pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
vtisochctrl);
}