aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorScott Wood <swood@redhat.com>2018-01-21 03:28:54 -0600
committerJoerg Roedel <jroedel@suse.de>2018-02-13 14:18:06 +0100
commit27790398c2aed917828dc3c6f81240d57f1584c9 (patch)
treee10672afbc28616d10901241df5bcec458c7bb94 /drivers/iommu
parentLinux 4.16-rc1 (diff)
downloadlinux-dev-27790398c2aed917828dc3c6f81240d57f1584c9.tar.xz
linux-dev-27790398c2aed917828dc3c6f81240d57f1584c9.zip
iommu/amd: Use raw locks on atomic context paths
Several functions in this driver are called from atomic context, and thus raw locks must be used in order to be safe on PREEMPT_RT. This includes paths that must wait for command completion, which is a potential PREEMPT_RT latency concern but not easily avoidable. Signed-off-by: Scott Wood <swood@redhat.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd_iommu.c30
-rw-r--r--drivers/iommu/amd_iommu_init.c2
-rw-r--r--drivers/iommu/amd_iommu_types.h4
3 files changed, 18 insertions, 18 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 74788fdeb773..d227f761cc21 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1056,9 +1056,9 @@ static int iommu_queue_command_sync(struct amd_iommu *iommu,
unsigned long flags;
int ret;
- spin_lock_irqsave(&iommu->lock, flags);
+ raw_spin_lock_irqsave(&iommu->lock, flags);
ret = __iommu_queue_command_sync(iommu, cmd, sync);
- spin_unlock_irqrestore(&iommu->lock, flags);
+ raw_spin_unlock_irqrestore(&iommu->lock, flags);
return ret;
}
@@ -1084,7 +1084,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
build_completion_wait(&cmd, (u64)&iommu->cmd_sem);
- spin_lock_irqsave(&iommu->lock, flags);
+ raw_spin_lock_irqsave(&iommu->lock, flags);
iommu->cmd_sem = 0;
@@ -1095,7 +1095,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
ret = wait_on_sem(&iommu->cmd_sem);
out_unlock:
- spin_unlock_irqrestore(&iommu->lock, flags);
+ raw_spin_unlock_irqrestore(&iommu->lock, flags);
return ret;
}
@@ -3627,7 +3627,7 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
goto out_unlock;
/* Initialize table spin-lock */
- spin_lock_init(&table->lock);
+ raw_spin_lock_init(&table->lock);
if (ioapic)
/* Keep the first 32 indexes free for IOAPIC interrupts */
@@ -3689,7 +3689,7 @@ static int alloc_irq_index(u16 devid, int count, bool align)
if (align)
alignment = roundup_pow_of_two(count);
- spin_lock_irqsave(&table->lock, flags);
+ raw_spin_lock_irqsave(&table->lock, flags);
/* Scan table for free entries */
for (index = ALIGN(table->min_index, alignment), c = 0;
@@ -3716,7 +3716,7 @@ static int alloc_irq_index(u16 devid, int count, bool align)
index = -ENOSPC;
out:
- spin_unlock_irqrestore(&table->lock, flags);
+ raw_spin_unlock_irqrestore(&table->lock, flags);
return index;
}
@@ -3737,7 +3737,7 @@ static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
if (!table)
return -ENOMEM;
- spin_lock_irqsave(&table->lock, flags);
+ raw_spin_lock_irqsave(&table->lock, flags);
entry = (struct irte_ga *)table->table;
entry = &entry[index];
@@ -3748,7 +3748,7 @@ static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
if (data)
data->ref = entry;
- spin_unlock_irqrestore(&table->lock, flags);
+ raw_spin_unlock_irqrestore(&table->lock, flags);
iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu);
@@ -3770,9 +3770,9 @@ static int modify_irte(u16 devid, int index, union irte *irte)
if (!table)
return -ENOMEM;
- spin_lock_irqsave(&table->lock, flags);
+ raw_spin_lock_irqsave(&table->lock, flags);
table->table[index] = irte->val;
- spin_unlock_irqrestore(&table->lock, flags);
+ raw_spin_unlock_irqrestore(&table->lock, flags);
iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu);
@@ -3794,9 +3794,9 @@ static void free_irte(u16 devid, int index)
if (!table)
return;
- spin_lock_irqsave(&table->lock, flags);
+ raw_spin_lock_irqsave(&table->lock, flags);
iommu->irte_ops->clear_allocated(table, index);
- spin_unlock_irqrestore(&table->lock, flags);
+ raw_spin_unlock_irqrestore(&table->lock, flags);
iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu);
@@ -4397,7 +4397,7 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data)
if (!irt)
return -ENODEV;
- spin_lock_irqsave(&irt->lock, flags);
+ raw_spin_lock_irqsave(&irt->lock, flags);
if (ref->lo.fields_vapic.guest_mode) {
if (cpu >= 0)
@@ -4406,7 +4406,7 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data)
barrier();
}
- spin_unlock_irqrestore(&irt->lock, flags);
+ raw_spin_unlock_irqrestore(&irt->lock, flags);
iommu_flush_irt(iommu, devid);
iommu_completion_wait(iommu);
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 4e4a615bf13f..904c575d1677 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1474,7 +1474,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
{
int ret;
- spin_lock_init(&iommu->lock);
+ raw_spin_lock_init(&iommu->lock);
/* Add IOMMU to internal data structures */
list_add_tail(&iommu->list, &amd_iommu_list);
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 6a877ebd058b..da886b0095aa 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -408,7 +408,7 @@ extern bool amd_iommu_iotlb_sup;
#define IRQ_TABLE_ALIGNMENT 128
struct irq_remap_table {
- spinlock_t lock;
+ raw_spinlock_t lock;
unsigned min_index;
u32 *table;
};
@@ -490,7 +490,7 @@ struct amd_iommu {
int index;
/* locks the accesses to the hardware */
- spinlock_t lock;
+ raw_spinlock_t lock;
/* Pointer to PCI device of this IOMMU */
struct pci_dev *dev;