aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/dma-iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/dma-iommu.c')
-rw-r--r--drivers/iommu/dma-iommu.c72
1 files changed, 53 insertions, 19 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 4959f5df21bd..0cbcd3fc3e7e 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -10,9 +10,8 @@
#include <linux/acpi_iort.h>
#include <linux/device.h>
-#include <linux/dma-contiguous.h>
+#include <linux/dma-map-ops.h>
#include <linux/dma-iommu.h>
-#include <linux/dma-noncoherent.h>
#include <linux/gfp.h>
#include <linux/huge_mm.h>
#include <linux/iommu.h>
@@ -343,8 +342,11 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
- cookie->fq_domain = domain;
- init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL);
+ if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all,
+ NULL))
+ pr_warn("iova flush queue initialization failed\n");
+ else
+ cookie->fq_domain = domain;
}
if (!dev)
@@ -471,7 +473,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
WARN_ON(unmapped != size);
if (!cookie->fq_domain)
- iommu_tlb_sync(domain, &iotlb_gather);
+ iommu_iotlb_sync(domain, &iotlb_gather);
iommu_dma_free_iova(cookie, dma_addr, size);
}
@@ -524,6 +526,9 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
/* IOMMU can map any pages, so himem can also be used here */
gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
+ /* It makes no sense to muck about with huge pages */
+ gfp &= ~__GFP_COMP;
+
while (count) {
struct page *page = NULL;
unsigned int order_size;
@@ -544,15 +549,9 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
page = alloc_pages_node(nid, alloc_flags, order);
if (!page)
continue;
- if (!order)
- break;
- if (!PageCompound(page)) {
+ if (order)
split_page(page, order);
- break;
- } else if (!split_huge_page(page)) {
- break;
- }
- __free_pages(page, order);
+ break;
}
if (!page) {
__iommu_dma_free_pages(pages, i);
@@ -572,6 +571,7 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
* @size: Size of buffer in bytes
* @dma_handle: Out argument for allocated DMA handle
* @gfp: Allocation flags
+ * @prot: pgprot_t to use for the remapped mapping
* @attrs: DMA attributes for this allocation
*
* If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
@@ -580,14 +580,14 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
* Return: Mapped virtual address, or NULL on failure.
*/
static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot,
+ unsigned long attrs)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
bool coherent = dev_is_dma_coherent(dev);
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
- pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
struct page **pages;
struct sg_table sgt;
@@ -1030,13 +1030,15 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
gfp |= __GFP_ZERO;
if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
- !(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
- return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);
+ !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
+ return iommu_dma_alloc_remap(dev, size, handle, gfp,
+ dma_pgprot(dev, PAGE_KERNEL, attrs), attrs);
+ }
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
!gfpflags_allow_blocking(gfp) && !coherent)
- cpu_addr = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page,
- gfp);
+ page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr,
+ gfp, NULL);
else
cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
if (!cpu_addr)
@@ -1052,6 +1054,34 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
return cpu_addr;
}
+#ifdef CONFIG_DMA_REMAP
+static void *iommu_dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *handle, enum dma_data_direction dir, gfp_t gfp)
+{
+ if (!gfpflags_allow_blocking(gfp)) {
+ struct page *page;
+
+ page = dma_common_alloc_pages(dev, size, handle, dir, gfp);
+ if (!page)
+ return NULL;
+ return page_address(page);
+ }
+
+ return iommu_dma_alloc_remap(dev, size, handle, gfp | __GFP_ZERO,
+ PAGE_KERNEL, 0);
+}
+
+static void iommu_dma_free_noncoherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t handle, enum dma_data_direction dir)
+{
+ __iommu_dma_unmap(dev, handle, size);
+ __iommu_dma_free(dev, size, cpu_addr);
+}
+#else
+#define iommu_dma_alloc_noncoherent NULL
+#define iommu_dma_free_noncoherent NULL
+#endif /* CONFIG_DMA_REMAP */
+
static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
@@ -1120,6 +1150,10 @@ static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
static const struct dma_map_ops iommu_dma_ops = {
.alloc = iommu_dma_alloc,
.free = iommu_dma_free,
+ .alloc_pages = dma_common_alloc_pages,
+ .free_pages = dma_common_free_pages,
+ .alloc_noncoherent = iommu_dma_alloc_noncoherent,
+ .free_noncoherent = iommu_dma_free_noncoherent,
.mmap = iommu_dma_mmap,
.get_sgtable = iommu_dma_get_sgtable,
.map_page = iommu_dma_map_page,