aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/dma-mapping.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r--arch/arm/mm/dma-mapping.c84
1 files changed, 14 insertions, 70 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index d42557ee69c2..7d042d5c43e3 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -14,6 +14,7 @@
#include <linux/list.h>
#include <linux/init.h>
#include <linux/device.h>
+#include <linux/dma-direct.h>
#include <linux/dma-mapping.h>
#include <linux/dma-noncoherent.h>
#include <linux/dma-contiguous.h>
@@ -35,6 +36,7 @@
#include <asm/mach/map.h>
#include <asm/system_info.h>
#include <asm/dma-contiguous.h>
+#include <xen/swiotlb-xen.h>
#include "dma.h"
#include "mm.h"
@@ -192,6 +194,7 @@ const struct dma_map_ops arm_dma_ops = {
.sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
.sync_sg_for_device = arm_dma_sync_sg_for_device,
.dma_supported = arm_dma_supported,
+ .get_required_mask = dma_direct_get_required_mask,
};
EXPORT_SYMBOL(arm_dma_ops);
@@ -212,6 +215,7 @@ const struct dma_map_ops arm_coherent_dma_ops = {
.map_sg = arm_dma_map_sg,
.map_resource = dma_direct_map_resource,
.dma_supported = arm_dma_supported,
+ .get_required_mask = dma_direct_get_required_mask,
};
EXPORT_SYMBOL(arm_coherent_dma_ops);
@@ -336,25 +340,6 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
pgprot_t prot, struct page **ret_page,
const void *caller, bool want_vaddr);
-static void *
-__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
- const void *caller)
-{
- /*
- * DMA allocation can be mapped to user space, so lets
- * set VM_USERMAP flags too.
- */
- return dma_common_contiguous_remap(page, size,
- VM_ARM_DMA_CONSISTENT | VM_USERMAP,
- prot, caller);
-}
-
-static void __dma_free_remap(void *cpu_addr, size_t size)
-{
- dma_common_free_remap(cpu_addr, size,
- VM_ARM_DMA_CONSISTENT | VM_USERMAP);
-}
-
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
static struct gen_pool *atomic_pool __ro_after_init;
@@ -510,7 +495,7 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
if (!want_vaddr)
goto out;
- ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
+ ptr = dma_common_contiguous_remap(page, size, prot, caller);
if (!ptr) {
__dma_free_buffer(page, size);
return NULL;
@@ -577,7 +562,7 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
goto out;
if (PageHighMem(page)) {
- ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
+ ptr = dma_common_contiguous_remap(page, size, prot, caller);
if (!ptr) {
dma_release_from_contiguous(dev, page, count);
return NULL;
@@ -597,7 +582,7 @@ static void __free_from_contiguous(struct device *dev, struct page *page,
{
if (want_vaddr) {
if (PageHighMem(page))
- __dma_free_remap(cpu_addr, size);
+ dma_common_free_remap(cpu_addr, size);
else
__dma_remap(page, size, PAGE_KERNEL);
}
@@ -689,7 +674,7 @@ static void *remap_allocator_alloc(struct arm_dma_alloc_args *args,
static void remap_allocator_free(struct arm_dma_free_args *args)
{
if (args->want_vaddr)
- __dma_free_remap(args->cpu_addr, args->size);
+ dma_common_free_remap(args->cpu_addr, args->size);
__dma_free_buffer(args->page, args->size);
}
@@ -877,17 +862,6 @@ static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_add
__arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
}
-/*
- * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
- * that the intention is to allow exporting memory allocated via the
- * coherent DMA APIs through the dma_buf API, which only accepts a
- * scattertable. This presents a couple of problems:
- * 1. Not all memory allocated via the coherent DMA APIs is backed by
- * a struct page
- * 2. Passing coherent DMA memory into the streaming APIs is not allowed
- * as we will try to flush the memory through a different alias to that
- * actually being used (and the flushes are redundant.)
- */
int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t handle, size_t size,
unsigned long attrs)
@@ -1132,10 +1106,6 @@ static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
* 32-bit DMA.
* Use the generic dma-direct / swiotlb ops code in that case, as that
* handles bounce buffering for us.
- *
- * Note: this checks CONFIG_ARM_LPAE instead of CONFIG_SWIOTLB as the
- * latter is also selected by the Xen code, but that code for now relies
- * on non-NULL dev_dma_ops. To be cleaned up later.
*/
if (IS_ENABLED(CONFIG_ARM_LPAE))
return NULL;
@@ -1373,17 +1343,6 @@ static int __iommu_free_buffer(struct device *dev, struct page **pages,
}
/*
- * Create a CPU mapping for a specified pages
- */
-static void *
-__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
- const void *caller)
-{
- return dma_common_pages_remap(pages, size,
- VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller);
-}
-
-/*
* Create a mapping in device IO address space for specified pages
*/
static dma_addr_t
@@ -1455,18 +1414,13 @@ static struct page **__atomic_get_pages(void *addr)
static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
{
- struct vm_struct *area;
-
if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
return __atomic_get_pages(cpu_addr);
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
return cpu_addr;
- area = find_vm_area(cpu_addr);
- if (area && (area->flags & VM_ARM_DMA_CONSISTENT))
- return area->pages;
- return NULL;
+ return dma_common_find_pages(cpu_addr);
}
static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
@@ -1539,7 +1493,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
return pages;
- addr = __iommu_alloc_remap(pages, size, gfp, prot,
+ addr = dma_common_pages_remap(pages, size, prot,
__builtin_return_address(0));
if (!addr)
goto err_mapping;
@@ -1622,10 +1576,8 @@ void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
return;
}
- if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) {
- dma_common_free_remap(cpu_addr, size,
- VM_ARM_DMA_CONSISTENT | VM_USERMAP);
- }
+ if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
+ dma_common_free_remap(cpu_addr, size);
__iommu_remove_mapping(dev, handle, size);
__iommu_free_buffer(dev, pages, size, attrs);
@@ -2363,10 +2315,8 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
set_dma_ops(dev, dma_ops);
#ifdef CONFIG_XEN
- if (xen_initial_domain()) {
- dev->archdata.dev_dma_ops = dev->dma_ops;
- dev->dma_ops = xen_dma_ops;
- }
+ if (xen_initial_domain())
+ dev->dma_ops = &xen_swiotlb_dma_ops;
#endif
dev->archdata.dma_ops_setup = true;
}
@@ -2402,12 +2352,6 @@ long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
return dma_to_pfn(dev, dma_addr);
}
-pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
- unsigned long attrs)
-{
- return __get_dma_pgprot(attrs, prot);
-}
-
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs)
{