aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/amd_iommu_init.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/amd_iommu_init.c')
-rw-r--r--drivers/iommu/amd_iommu_init.c94
1 files changed, 56 insertions, 38 deletions
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 568c52317757..6be3853a5d97 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -71,6 +71,8 @@
#define IVHD_FLAG_ISOC_EN_MASK 0x08
#define IVMD_FLAG_EXCL_RANGE 0x08
+#define IVMD_FLAG_IW 0x04
+#define IVMD_FLAG_IR 0x02
#define IVMD_FLAG_UNITY_MAP 0x01
#define ACPI_DEVFLAG_INITPASS 0x01
@@ -147,7 +149,7 @@ bool amd_iommu_dump;
bool amd_iommu_irq_remap __read_mostly;
int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
-static int amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
+static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
static bool amd_iommu_detected;
static bool __initdata amd_iommu_disabled;
@@ -440,7 +442,7 @@ static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
return NULL;
}
- return (u8 __iomem *)ioremap_nocache(address, end);
+ return (u8 __iomem *)ioremap(address, end);
}
static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
@@ -714,7 +716,7 @@ static void iommu_enable_ppr_log(struct amd_iommu *iommu)
writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
- iommu_feature_enable(iommu, CONTROL_PPFLOG_EN);
+ iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
iommu_feature_enable(iommu, CONTROL_PPR_EN);
}
@@ -1116,21 +1118,17 @@ static int __init add_early_maps(void)
*/
static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
{
- struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
-
if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
return;
- if (iommu) {
- /*
- * We only can configure exclusion ranges per IOMMU, not
- * per device. But we can enable the exclusion range per
- * device. This is done here
- */
- set_dev_entry_bit(devid, DEV_ENTRY_EX);
- iommu->exclusion_start = m->range_start;
- iommu->exclusion_length = m->range_length;
- }
+ /*
+ * Treat per-device exclusion ranges as r/w unity-mapped regions
+ * since some buggy BIOSes might lead to the overwritten exclusion
+ * range (exclusion_start and exclusion_length members). This
+ * happens when there are multiple exclusion ranges (IVMD entries)
+ * defined in ACPI table.
+ */
+ m->flags = (IVMD_FLAG_IW | IVMD_FLAG_IR | IVMD_FLAG_UNITY_MAP);
}
/*
@@ -1523,8 +1521,6 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
- if (((h->efr_attr & (0x1 << IOMMU_FEAT_XTSUP_SHIFT)) == 0))
- amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
break;
case 0x11:
case 0x40:
@@ -1534,8 +1530,15 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
- if (((h->efr_reg & (0x1 << IOMMU_EFR_XTSUP_SHIFT)) == 0))
- amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
+ /*
+ * Note: Since iommu_update_intcapxt() leverages
+ * the IOMMU MMIO access to MSI capability block registers
+ * for MSI address lo/hi/data, we need to check both
+ * EFR[XtSup] and EFR[MsiCapMmioSup] for x2APIC support.
+ */
+ if ((h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) &&
+ (h->efr_reg & BIT(IOMMU_EFR_MSICAPMMIOSUP_SHIFT)))
+ amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
break;
default:
return -EINVAL;
@@ -1655,27 +1658,39 @@ static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
static void init_iommu_perf_ctr(struct amd_iommu *iommu)
{
struct pci_dev *pdev = iommu->dev;
- u64 val = 0xabcd, val2 = 0;
+ u64 val = 0xabcd, val2 = 0, save_reg = 0;
if (!iommu_feature(iommu, FEATURE_PC))
return;
amd_iommu_pc_present = true;
+ /* save the value to restore, if writable */
+ if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false))
+ goto pc_false;
+
/* Check if the performance counters can be written to */
if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
(iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
- (val != val2)) {
- pci_err(pdev, "Unable to write to IOMMU perf counter.\n");
- amd_iommu_pc_present = false;
- return;
- }
+ (val != val2))
+ goto pc_false;
+
+ /* restore */
+ if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true))
+ goto pc_false;
pci_info(pdev, "IOMMU performance counters supported\n");
val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
iommu->max_banks = (u8) ((val >> 12) & 0x3f);
iommu->max_counters = (u8) ((val >> 7) & 0xf);
+
+ return;
+
+pc_false:
+ pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
+ amd_iommu_pc_present = false;
+ return;
}
static ssize_t amd_iommu_show_cap(struct device *dev,
@@ -1715,7 +1730,6 @@ static const struct attribute_group *amd_iommu_groups[] = {
static int __init iommu_init_pci(struct amd_iommu *iommu)
{
int cap_ptr = iommu->cap_ptr;
- u32 range, misc, low, high;
int ret;
iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
@@ -1728,19 +1742,12 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
&iommu->cap);
- pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
- &range);
- pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
- &misc);
if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
amd_iommu_iotlb_sup = false;
/* read extended feature bits */
- low = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
- high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
-
- iommu->features = ((u64)high << 32) | low;
+ iommu->features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
if (iommu_feature(iommu, FEATURE_GT)) {
int glxval;
@@ -1984,8 +1991,8 @@ static int iommu_init_intcapxt(struct amd_iommu *iommu)
struct irq_affinity_notify *notify = &iommu->intcapxt_notify;
/**
- * IntCapXT requires XTSup=1, which can be inferred
- * amd_iommu_xt_mode.
+ * IntCapXT requires XTSup=1 and MsiCapMmioSup=1,
+ * which can be inferred from amd_iommu_xt_mode.
*/
if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE)
return 0;
@@ -2032,7 +2039,7 @@ enable_faults:
iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
if (iommu->ppr_log != NULL)
- iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
+ iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
iommu_ga_log_enable(iommu);
@@ -2516,6 +2523,7 @@ static int __init early_amd_iommu_init(void)
struct acpi_table_header *ivrs_base;
acpi_status status;
int i, remap_cache_sz, ret = 0;
+ u32 pci_id;
if (!amd_iommu_detected)
return -ENODEV;
@@ -2603,6 +2611,16 @@ static int __init early_amd_iommu_init(void)
if (ret)
goto out;
+ /* Disable IOMMU if there's Stoney Ridge graphics */
+ for (i = 0; i < 32; i++) {
+ pci_id = read_pci_config(0, i, 0, 0);
+ if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
+ pr_info("Disable IOMMU on Stoney Ridge\n");
+ amd_iommu_disabled = true;
+ break;
+ }
+ }
+
/* Disable any previously enabled IOMMUs */
if (!is_kdump_kernel() || amd_iommu_disabled)
disable_iommus();
@@ -2711,7 +2729,7 @@ static int __init state_next(void)
ret = early_amd_iommu_init();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
- pr_info("AMD IOMMU disabled on kernel command-line\n");
+ pr_info("AMD IOMMU disabled\n");
init_state = IOMMU_CMDLINE_DISABLED;
ret = -EINVAL;
}