aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/amd_iommu.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-04-11 18:50:41 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-11 18:50:41 -0700
commite5c372280b9cc1057647b4307757b7885a732a7e (patch)
treefa063d378da4ac144e740a5b145de0e9dfa1b66b /drivers/iommu/amd_iommu.c
parentMerge tag 'pm-4.17-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm (diff)
parentMerge branches 'x86/amd', 'x86/vt-d', 'arm/rockchip', 'arm/omap', 'arm/mediatek', 'arm/exynos', 'arm/renesas', 'arm/smmu' and 'core' into next (diff)
downloadlinux-dev-e5c372280b9cc1057647b4307757b7885a732a7e.tar.xz
linux-dev-e5c372280b9cc1057647b4307757b7885a732a7e.zip
Merge tag 'iommu-updates-v4.17' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU updates from Joerg Roedel: - OF_IOMMU support for the Rockchip iommu driver so that it can use generic DT bindings - rework of locking in the AMD IOMMU interrupt remapping code to make it work better in RT kernels - support for improved iotlb flushing in the AMD IOMMU driver - support for 52-bit physical and virtual addressing in the ARM-SMMU - various other small fixes and cleanups * tag 'iommu-updates-v4.17' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (53 commits) iommu/io-pgtable-arm: Avoid warning with 32-bit phys_addr_t iommu/rockchip: Support sharing IOMMU between masters iommu/rockchip: Add runtime PM support iommu/rockchip: Fix error handling in init iommu/rockchip: Use OF_IOMMU to attach devices automatically iommu/rockchip: Use IOMMU device for dma mapping operations dt-bindings: iommu/rockchip: Add clock property iommu/rockchip: Control clocks needed to access the IOMMU iommu/rockchip: Fix TLB flush of secondary IOMMUs iommu/rockchip: Use iopoll helpers to wait for hardware iommu/rockchip: Fix error handling in attach iommu/rockchip: Request irqs in rk_iommu_probe() iommu/rockchip: Fix error handling in probe iommu/rockchip: Prohibit unbind and remove iommu/amd: Return proper error code in irq_remapping_alloc() iommu/amd: Make amd_iommu_devtable_lock a spin_lock iommu/amd: Drop the lock while allocating new irq remap table iommu/amd: Factor out setting the remap table for a devid iommu/amd: Use `table' instead `irt' as variable name in amd_iommu_update_ga() iommu/amd: Remove the special case from alloc_irq_table() ...
Diffstat (limited to 'drivers/iommu/amd_iommu.c')
-rw-r--r--drivers/iommu/amd_iommu.c320
1 files changed, 187 insertions, 133 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 83819d0cbf90..2a99f0f14795 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -81,11 +81,12 @@
*/
#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
-static DEFINE_RWLOCK(amd_iommu_devtable_lock);
+static DEFINE_SPINLOCK(amd_iommu_devtable_lock);
+static DEFINE_SPINLOCK(pd_bitmap_lock);
+static DEFINE_SPINLOCK(iommu_table_lock);
/* List of all available dev_data structures */
-static LIST_HEAD(dev_data_list);
-static DEFINE_SPINLOCK(dev_data_list_lock);
+static LLIST_HEAD(dev_data_list);
LIST_HEAD(ioapic_map);
LIST_HEAD(hpet_map);
@@ -204,40 +205,33 @@ static struct dma_ops_domain* to_dma_ops_domain(struct protection_domain *domain
static struct iommu_dev_data *alloc_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
- unsigned long flags;
dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
if (!dev_data)
return NULL;
dev_data->devid = devid;
-
- spin_lock_irqsave(&dev_data_list_lock, flags);
- list_add_tail(&dev_data->dev_data_list, &dev_data_list);
- spin_unlock_irqrestore(&dev_data_list_lock, flags);
-
ratelimit_default_init(&dev_data->rs);
+ llist_add(&dev_data->dev_data_list, &dev_data_list);
return dev_data;
}
static struct iommu_dev_data *search_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
- unsigned long flags;
+ struct llist_node *node;
- spin_lock_irqsave(&dev_data_list_lock, flags);
- list_for_each_entry(dev_data, &dev_data_list, dev_data_list) {
+ if (llist_empty(&dev_data_list))
+ return NULL;
+
+ node = dev_data_list.first;
+ llist_for_each_entry(dev_data, node, dev_data_list) {
if (dev_data->devid == devid)
- goto out_unlock;
+ return dev_data;
}
- dev_data = NULL;
-
-out_unlock:
- spin_unlock_irqrestore(&dev_data_list_lock, flags);
-
- return dev_data;
+ return NULL;
}
static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
@@ -311,6 +305,8 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
if (dev_data == NULL) {
dev_data = alloc_dev_data(devid);
+ if (!dev_data)
+ return NULL;
if (translation_pre_enabled(iommu))
dev_data->defer_attach = true;
@@ -548,6 +544,7 @@ static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
{
+ struct device *dev = iommu->iommu.dev;
int type, devid, domid, flags;
volatile u32 *event = __evt;
int count = 0;
@@ -574,53 +571,53 @@ retry:
amd_iommu_report_page_fault(devid, domid, address, flags);
return;
} else {
- printk(KERN_ERR "AMD-Vi: Event logged [");
+ dev_err(dev, "AMD-Vi: Event logged [");
}
switch (type) {
case EVENT_TYPE_ILL_DEV:
- printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
- "address=0x%016llx flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
- address, flags);
+ dev_err(dev, "ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
+ "address=0x%016llx flags=0x%04x]\n",
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ address, flags);
dump_dte_entry(devid);
break;
case EVENT_TYPE_DEV_TAB_ERR:
- printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
- "address=0x%016llx flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
- address, flags);
+ dev_err(dev, "DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
+ "address=0x%016llx flags=0x%04x]\n",
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ address, flags);
break;
case EVENT_TYPE_PAGE_TAB_ERR:
- printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
- "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
- domid, address, flags);
+ dev_err(dev, "PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
+ "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ domid, address, flags);
break;
case EVENT_TYPE_ILL_CMD:
- printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
+ dev_err(dev, "ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
dump_command(address);
break;
case EVENT_TYPE_CMD_HARD_ERR:
- printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
- "flags=0x%04x]\n", address, flags);
+ dev_err(dev, "COMMAND_HARDWARE_ERROR address=0x%016llx "
+ "flags=0x%04x]\n", address, flags);
break;
case EVENT_TYPE_IOTLB_INV_TO:
- printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
- "address=0x%016llx]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
- address);
+ dev_err(dev, "IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
+ "address=0x%016llx]\n",
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ address);
break;
case EVENT_TYPE_INV_DEV_REQ:
- printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
- "address=0x%016llx flags=0x%04x]\n",
- PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
- address, flags);
+ dev_err(dev, "INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
+ "address=0x%016llx flags=0x%04x]\n",
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ address, flags);
break;
default:
- printk(KERN_ERR "UNKNOWN type=0x%02x event[0]=0x%08x "
- "event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n",
- type, event[0], event[1], event[2], event[3]);
+ dev_err(dev, KERN_ERR "UNKNOWN event[0]=0x%08x event[1]=0x%08x "
+ "event[2]=0x%08x event[3]=0x%08x\n",
+ event[0], event[1], event[2], event[3]);
}
memset(__evt, 0, 4 * sizeof(u32));
@@ -1057,9 +1054,9 @@ static int iommu_queue_command_sync(struct amd_iommu *iommu,
unsigned long flags;
int ret;
- spin_lock_irqsave(&iommu->lock, flags);
+ raw_spin_lock_irqsave(&iommu->lock, flags);
ret = __iommu_queue_command_sync(iommu, cmd, sync);
- spin_unlock_irqrestore(&iommu->lock, flags);
+ raw_spin_unlock_irqrestore(&iommu->lock, flags);
return ret;
}
@@ -1085,7 +1082,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
build_completion_wait(&cmd, (u64)&iommu->cmd_sem);
- spin_lock_irqsave(&iommu->lock, flags);
+ raw_spin_lock_irqsave(&iommu->lock, flags);
iommu->cmd_sem = 0;
@@ -1096,7 +1093,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
ret = wait_on_sem(&iommu->cmd_sem);
out_unlock:
- spin_unlock_irqrestore(&iommu->lock, flags);
+ raw_spin_unlock_irqrestore(&iommu->lock, flags);
return ret;
}
@@ -1606,29 +1603,26 @@ static void del_domain_from_list(struct protection_domain *domain)
static u16 domain_id_alloc(void)
{
- unsigned long flags;
int id;
- write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+ spin_lock(&pd_bitmap_lock);
id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
BUG_ON(id == 0);
if (id > 0 && id < MAX_DOMAIN_ID)
__set_bit(id, amd_iommu_pd_alloc_bitmap);
else
id = 0;
- write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+ spin_unlock(&pd_bitmap_lock);
return id;
}
static void domain_id_free(int id)
{
- unsigned long flags;
-
- write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+ spin_lock(&pd_bitmap_lock);
if (id > 0 && id < MAX_DOMAIN_ID)
__clear_bit(id, amd_iommu_pd_alloc_bitmap);
- write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+ spin_unlock(&pd_bitmap_lock);
}
#define DEFINE_FREE_PT_FN(LVL, FN) \
@@ -2104,9 +2098,9 @@ static int attach_device(struct device *dev,
}
skip_ats_check:
- write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+ spin_lock_irqsave(&amd_iommu_devtable_lock, flags);
ret = __attach_device(dev_data, domain);
- write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+ spin_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
/*
* We might boot into a crash-kernel here. The crashed kernel
@@ -2156,9 +2150,9 @@ static void detach_device(struct device *dev)
domain = dev_data->domain;
/* lock device table */
- write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+ spin_lock_irqsave(&amd_iommu_devtable_lock, flags);
__detach_device(dev_data);
- write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+ spin_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
if (!dev_is_pci(dev))
return;
@@ -2795,7 +2789,7 @@ static void cleanup_domain(struct protection_domain *domain)
struct iommu_dev_data *entry;
unsigned long flags;
- write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+ spin_lock_irqsave(&amd_iommu_devtable_lock, flags);
while (!list_empty(&domain->dev_list)) {
entry = list_first_entry(&domain->dev_list,
@@ -2803,7 +2797,7 @@ static void cleanup_domain(struct protection_domain *domain)
__detach_device(entry);
}
- write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+ spin_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
}
static void protection_domain_free(struct protection_domain *domain)
@@ -3025,15 +3019,12 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
size_t unmap_size;
if (domain->mode == PAGE_MODE_NONE)
- return -EINVAL;
+ return 0;
mutex_lock(&domain->api_lock);
unmap_size = iommu_unmap_page(domain, iova, page_size);
mutex_unlock(&domain->api_lock);
- domain_flush_tlb_pde(domain);
- domain_flush_complete(domain);
-
return unmap_size;
}
@@ -3151,6 +3142,19 @@ static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
return dev_data->defer_attach;
}
+static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
+{
+ struct protection_domain *dom = to_pdomain(domain);
+
+ domain_flush_tlb_pde(dom);
+ domain_flush_complete(dom);
+}
+
+static void amd_iommu_iotlb_range_add(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+}
+
const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
.domain_alloc = amd_iommu_domain_alloc,
@@ -3169,6 +3173,9 @@ const struct iommu_ops amd_iommu_ops = {
.apply_resv_region = amd_iommu_apply_resv_region,
.is_attach_deferred = amd_iommu_is_attach_deferred,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
+ .flush_iotlb_all = amd_iommu_flush_iotlb_all,
+ .iotlb_range_add = amd_iommu_iotlb_range_add,
+ .iotlb_sync = amd_iommu_flush_iotlb_all,
};
/*****************************************************************************
@@ -3570,14 +3577,62 @@ static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
amd_iommu_dev_table[devid].data[2] = dte;
}
-static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
+static struct irq_remap_table *get_irq_table(u16 devid)
+{
+ struct irq_remap_table *table;
+
+ if (WARN_ONCE(!amd_iommu_rlookup_table[devid],
+ "%s: no iommu for devid %x\n", __func__, devid))
+ return NULL;
+
+ table = irq_lookup_table[devid];
+ if (WARN_ONCE(!table, "%s: no table for devid %x\n", __func__, devid))
+ return NULL;
+
+ return table;
+}
+
+static struct irq_remap_table *__alloc_irq_table(void)
+{
+ struct irq_remap_table *table;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return NULL;
+
+ table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_KERNEL);
+ if (!table->table) {
+ kfree(table);
+ return NULL;
+ }
+ raw_spin_lock_init(&table->lock);
+
+ if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
+ memset(table->table, 0,
+ MAX_IRQS_PER_TABLE * sizeof(u32));
+ else
+ memset(table->table, 0,
+ (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
+ return table;
+}
+
+static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
+ struct irq_remap_table *table)
+{
+ irq_lookup_table[devid] = table;
+ set_dte_irq_entry(devid, table);
+ iommu_flush_dte(iommu, devid);
+}
+
+static struct irq_remap_table *alloc_irq_table(u16 devid)
{
struct irq_remap_table *table = NULL;
+ struct irq_remap_table *new_table = NULL;
struct amd_iommu *iommu;
unsigned long flags;
u16 alias;
- write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+ spin_lock_irqsave(&iommu_table_lock, flags);
iommu = amd_iommu_rlookup_table[devid];
if (!iommu)
@@ -3590,60 +3645,45 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
alias = amd_iommu_alias_table[devid];
table = irq_lookup_table[alias];
if (table) {
- irq_lookup_table[devid] = table;
- set_dte_irq_entry(devid, table);
- iommu_flush_dte(iommu, devid);
- goto out;
+ set_remap_table_entry(iommu, devid, table);
+ goto out_wait;
}
+ spin_unlock_irqrestore(&iommu_table_lock, flags);
/* Nothing there yet, allocate new irq remapping table */
- table = kzalloc(sizeof(*table), GFP_ATOMIC);
- if (!table)
- goto out_unlock;
-
- /* Initialize table spin-lock */
- spin_lock_init(&table->lock);
+ new_table = __alloc_irq_table();
+ if (!new_table)
+ return NULL;
- if (ioapic)
- /* Keep the first 32 indexes free for IOAPIC interrupts */
- table->min_index = 32;
+ spin_lock_irqsave(&iommu_table_lock, flags);
- table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_ATOMIC);
- if (!table->table) {
- kfree(table);
- table = NULL;
+ table = irq_lookup_table[devid];
+ if (table)
goto out_unlock;
- }
- if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
- memset(table->table, 0,
- MAX_IRQS_PER_TABLE * sizeof(u32));
- else
- memset(table->table, 0,
- (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
-
- if (ioapic) {
- int i;
-
- for (i = 0; i < 32; ++i)
- iommu->irte_ops->set_allocated(table, i);
+ table = irq_lookup_table[alias];
+ if (table) {
+ set_remap_table_entry(iommu, devid, table);
+ goto out_wait;
}
- irq_lookup_table[devid] = table;
- set_dte_irq_entry(devid, table);
- iommu_flush_dte(iommu, devid);
- if (devid != alias) {
- irq_lookup_table[alias] = table;
- set_dte_irq_entry(alias, table);
- iommu_flush_dte(iommu, alias);
- }
+ table = new_table;
+ new_table = NULL;
-out:
+ set_remap_table_entry(iommu, devid, table);
+ if (devid != alias)
+ set_remap_table_entry(iommu, alias, table);
+
+out_wait:
iommu_completion_wait(iommu);
out_unlock:
- write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+ spin_unlock_irqrestore(&iommu_table_lock, flags);
+ if (new_table) {
+ kmem_cache_free(amd_iommu_irq_cache, new_table->table);
+ kfree(new_table);
+ }
return table;
}
@@ -3657,14 +3697,14 @@ static int alloc_irq_index(u16 devid, int count, bool align)
if (!iommu)
return -ENODEV;
- table = get_irq_table(devid, false);
+ table = alloc_irq_table(devid);
if (!table)
return -ENODEV;
if (align)
alignment = roundup_pow_of_two(count);
- spin_lock_irqsave(&table->lock, flags);
+ raw_spin_lock_irqsave(&table->lock, flags);
/* Scan table for free entries */
for (index = ALIGN(table->min_index, alignment), c = 0;
@@ -3691,7 +3731,7 @@ static int alloc_irq_index(u16 devid, int count, bool align)
index = -ENOSPC;
out:
- spin_unlock_irqrestore(&table->lock, flags);
+ raw_spin_unlock_irqrestore(&table->lock, flags);
return index;
}
@@ -3708,11 +3748,11 @@ static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
if (iommu == NULL)
return -EINVAL;
- table = get_irq_table(devid, false);
+ table = get_irq_table(devid);
if (!table)
return -ENOMEM;
- spin_lock_irqsave(&table->lock, flags);
+ raw_spin_lock_irqsave(&table->lock, flags);
entry = (struct irte_ga *)table->table;
entry = &entry[index];
@@ -3723,7 +3763,7 @@ static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
if (data)
data->ref = entry;
- spin_unlock_irqrestore(&table->lock, flags);
+ raw_spin_unlock_irqrestore(&table->lock, flags);
iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu);
@@ -3741,13 +3781,13 @@ static int modify_irte(u16 devid, int index, union irte *irte)
if (iommu == NULL)
return -EINVAL;
- table = get_irq_table(devid, false);
+ table = get_irq_table(devid);
if (!table)
return -ENOMEM;
- spin_lock_irqsave(&table->lock, flags);
+ raw_spin_lock_irqsave(&table->lock, flags);
table->table[index] = irte->val;
- spin_unlock_irqrestore(&table->lock, flags);
+ raw_spin_unlock_irqrestore(&table->lock, flags);
iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu);
@@ -3765,13 +3805,13 @@ static void free_irte(u16 devid, int index)
if (iommu == NULL)
return;
- table = get_irq_table(devid, false);
+ table = get_irq_table(devid);
if (!table)
return;
- spin_lock_irqsave(&table->lock, flags);
+ raw_spin_lock_irqsave(&table->lock, flags);
iommu->irte_ops->clear_allocated(table, index);
- spin_unlock_irqrestore(&table->lock, flags);
+ raw_spin_unlock_irqrestore(&table->lock, flags);
iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu);
@@ -3852,10 +3892,8 @@ static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
u8 vector, u32 dest_apicid)
{
struct irte_ga *irte = (struct irte_ga *) entry;
- struct iommu_dev_data *dev_data = search_dev_data(devid);
- if (!dev_data || !dev_data->use_vapic ||
- !irte->lo.fields_remap.guest_mode) {
+ if (!irte->lo.fields_remap.guest_mode) {
irte->hi.fields.vector = vector;
irte->lo.fields_remap.destination = dest_apicid;
modify_irte_ga(devid, index, irte, NULL);
@@ -4061,7 +4099,7 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
struct amd_ir_data *data = NULL;
struct irq_cfg *cfg;
int i, ret, devid;
- int index = -1;
+ int index;
if (!info)
return -EINVAL;
@@ -4085,10 +4123,26 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
return ret;
if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
- if (get_irq_table(devid, true))
+ struct irq_remap_table *table;
+ struct amd_iommu *iommu;
+
+ table = alloc_irq_table(devid);
+ if (table) {
+ if (!table->min_index) {
+ /*
+ * Keep the first 32 indexes free for IOAPIC
+ * interrupts.
+ */
+ table->min_index = 32;
+ iommu = amd_iommu_rlookup_table[devid];
+ for (i = 0; i < 32; ++i)
+ iommu->irte_ops->set_allocated(table, i);
+ }
+ WARN_ON(table->min_index != 32);
index = info->ioapic_pin;
- else
- ret = -ENOMEM;
+ } else {
+ index = -ENOMEM;
+ }
} else {
bool align = (info->type == X86_IRQ_ALLOC_TYPE_MSI);
@@ -4354,7 +4408,7 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data)
{
unsigned long flags;
struct amd_iommu *iommu;
- struct irq_remap_table *irt;
+ struct irq_remap_table *table;
struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
int devid = ir_data->irq_2_irte.devid;
struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
@@ -4368,11 +4422,11 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data)
if (!iommu)
return -ENODEV;
- irt = get_irq_table(devid, false);
- if (!irt)
+ table = get_irq_table(devid);
+ if (!table)
return -ENODEV;
- spin_lock_irqsave(&irt->lock, flags);
+ raw_spin_lock_irqsave(&table->lock, flags);
if (ref->lo.fields_vapic.guest_mode) {
if (cpu >= 0)
@@ -4381,7 +4435,7 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data)
barrier();
}
- spin_unlock_irqrestore(&irt->lock, flags);
+ raw_spin_unlock_irqrestore(&table->lock, flags);
iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu);