aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/amd_iommu.c
diff options
context:
space:
mode:
authorTom Murphy <murphyt7@tcd.ie>2019-09-08 09:56:37 -0700
committerJoerg Roedel <jroedel@suse.de>2019-10-15 11:31:03 +0200
commit37ec8eb851c1876580a963f283fe7496592b9f72 (patch)
tree6435fe81d85d41a8e193bb6c26a58cb35fc45a33 /drivers/iommu/amd_iommu.c
parentLinux 5.4-rc3 (diff)
downloadlinux-dev-37ec8eb851c1876580a963f283fe7496592b9f72.tar.xz
linux-dev-37ec8eb851c1876580a963f283fe7496592b9f72.zip
iommu/amd: Remove unnecessary locking from AMD iommu driver
With or without locking it doesn't make sense for two writers to be writing to the same IOVA range at the same time. Even with locking we still have a race condition, whoever gets the lock first, so we still can't be sure what the result will be. With locking the result will be more sane, it will be correct for the last writer, but still useless because we can't be sure which writer will get the lock last. It's a fundamentally broken design to have two writers writing to the same IOVA range at the same time. So we can remove the locking and work on the assumption that no two writers will be writing to the same IOVA range at the same time. The only exception is when we have to allocate a middle page in the page tables, the middle page can cover more than just the IOVA range a writer has been allocated. However this isn't an issue in the AMD driver because it can atomically allocate middle pages using "cmpxchg64()". Signed-off-by: Tom Murphy <murphyt7@tcd.ie> Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/amd_iommu.c')
-rw-r--r--drivers/iommu/amd_iommu.c10
1 files changed, 1 insertions, 9 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 2369b8af81f3..97ca4ed4ce0c 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2934,7 +2934,6 @@ static void protection_domain_free(struct protection_domain *domain)
static int protection_domain_init(struct protection_domain *domain)
{
spin_lock_init(&domain->lock);
- mutex_init(&domain->api_lock);
domain->id = domain_id_alloc();
if (!domain->id)
return -ENOMEM;
@@ -3121,9 +3120,7 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
if (iommu_prot & IOMMU_WRITE)
prot |= IOMMU_PROT_IW;
- mutex_lock(&domain->api_lock);
ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL);
- mutex_unlock(&domain->api_lock);
domain_flush_np_cache(domain, iova, page_size);
@@ -3135,16 +3132,11 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
struct iommu_iotlb_gather *gather)
{
struct protection_domain *domain = to_pdomain(dom);
- size_t unmap_size;
if (domain->mode == PAGE_MODE_NONE)
return 0;
- mutex_lock(&domain->api_lock);
- unmap_size = iommu_unmap_page(domain, iova, page_size);
- mutex_unlock(&domain->api_lock);
-
- return unmap_size;
+ return iommu_unmap_page(domain, iova, page_size);
}
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,