aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm/dma-mapping.c
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2018-09-12 16:24:14 +0100
committerJoerg Roedel <jroedel@suse.de>2018-09-25 10:23:16 +0200
commit7adb562c3e90f87b0da196c372afe251ad4ec62e (patch)
tree5b2e10c8ec384fc92ef5f80dab41fbf86675690d /arch/arm64/mm/dma-mapping.c
parentiommu/dma: Use fast DMA domain lookup (diff)
downloadlinux-dev-7adb562c3e90f87b0da196c372afe251ad4ec62e.tar.xz
linux-dev-7adb562c3e90f87b0da196c372afe251ad4ec62e.zip
arm64/dma-mapping: Mildly optimise non-coherent IOMMU ops
Whilst the symmetry of deferring to the existing sync callback in __iommu_map_page() is nice, taking a round-trip through iommu_iova_to_phys() is a pretty heavyweight way to get an address we can trivially compute from the page we already have. Tweaking it to just perform the cache maintenance directly when appropriate doesn't really make the code any more complicated, and the runtime efficiency gain can only be a benefit. Furthermore, the sync operations themselves know they can only be invoked on a managed DMA ops domain, so can use the fast specific domain lookup to avoid excessive manipulation of the group refcount (particularly in the scatterlist cases). Acked-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Robin Murphy <robin.murphy@arm.com> Tested-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'arch/arm64/mm/dma-mapping.c')
-rw-r--r--arch/arm64/mm/dma-mapping.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 072c51fb07d7..cf017c5bb5e7 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -712,7 +712,7 @@ static void __iommu_sync_single_for_cpu(struct device *dev,
if (is_device_dma_coherent(dev))
return;
- phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
+ phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dev_addr);
__dma_unmap_area(phys_to_virt(phys), size, dir);
}
@@ -725,7 +725,7 @@ static void __iommu_sync_single_for_device(struct device *dev,
if (is_device_dma_coherent(dev))
return;
- phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
+ phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dev_addr);
__dma_map_area(phys_to_virt(phys), size, dir);
}
@@ -738,9 +738,9 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
int prot = dma_info_to_prot(dir, coherent, attrs);
dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
- if (!iommu_dma_mapping_error(dev, dev_addr) &&
- (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
- __iommu_sync_single_for_device(dev, dev_addr, size, dir);
+ if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+ !iommu_dma_mapping_error(dev, dev_addr))
+ __dma_map_area(page_address(page) + offset, size, dir);
return dev_addr;
}