aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc/mm/dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/mm/dma.c')
-rw-r--r--arch/arc/mm/dma.c152
1 files changed, 105 insertions, 47 deletions
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 29a46bb198cc..01eaf88bf821 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -17,18 +17,14 @@
*/
#include <linux/dma-mapping.h>
-#include <linux/dma-debug.h>
-#include <linux/export.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
-/*
- * Helpers for Coherent DMA API.
- */
-void *dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
+
+static void *arc_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
{
- void *paddr;
+ void *paddr, *kvaddr;
/* This is linear addr (0x8000_0000 based) */
paddr = alloc_pages_exact(size, gfp);
@@ -38,22 +34,6 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size,
/* This is bus address, platform dependent */
*dma_handle = (dma_addr_t)paddr;
- return paddr;
-}
-EXPORT_SYMBOL(dma_alloc_noncoherent);
-
-void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle)
-{
- free_pages_exact((void *)dma_handle, size);
-}
-EXPORT_SYMBOL(dma_free_noncoherent);
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
-{
- void *paddr, *kvaddr;
-
/*
* IOC relies on all data (even coherent DMA data) being in cache
* Thus allocate normal cached memory
@@ -65,22 +45,15 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
* -For coherent data, Read/Write to buffers terminate early in cache
* (vs. always going to memory - thus are faster)
*/
- if (is_isa_arcv2() && ioc_exists)
- return dma_alloc_noncoherent(dev, size, dma_handle, gfp);
-
- /* This is linear addr (0x8000_0000 based) */
- paddr = alloc_pages_exact(size, gfp);
- if (!paddr)
- return NULL;
+ if ((is_isa_arcv2() && ioc_exists) ||
+ dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
+ return paddr;
/* This is kernel Virtual address (0x7000_0000 based) */
kvaddr = ioremap_nocache((unsigned long)paddr, size);
if (kvaddr == NULL)
return NULL;
- /* This is bus address, platform dependent */
- *dma_handle = (dma_addr_t)paddr;
-
/*
* Evict any existing L1 and/or L2 lines for the backing page
* in case it was used earlier as a normal "cached" page.
@@ -95,26 +68,111 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
return kvaddr;
}
-EXPORT_SYMBOL(dma_alloc_coherent);
-void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
- dma_addr_t dma_handle)
+static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
- if (is_isa_arcv2() && ioc_exists)
- return dma_free_noncoherent(dev, size, kvaddr, dma_handle);
-
- iounmap((void __force __iomem *)kvaddr);
+ if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) &&
+ !(is_isa_arcv2() && ioc_exists))
+ iounmap((void __force __iomem *)vaddr);
free_pages_exact((void *)dma_handle, size);
}
-EXPORT_SYMBOL(dma_free_coherent);
/*
- * Helper for streaming DMA...
+ * streaming DMA Mapping API...
+ * CPU accesses page via normal paddr, thus needs to explicitly made
+ * consistent before each use
*/
-void __arc_dma_cache_sync(unsigned long paddr, size_t size,
- enum dma_data_direction dir)
+static void _dma_cache_sync(unsigned long paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_FROM_DEVICE:
+ dma_cache_inv(paddr, size);
+ break;
+ case DMA_TO_DEVICE:
+ dma_cache_wback(paddr, size);
+ break;
+ case DMA_BIDIRECTIONAL:
+ dma_cache_wback_inv(paddr, size);
+ break;
+ default:
+ pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
+ }
+}
+
+static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ unsigned long paddr = page_to_phys(page) + offset;
+ _dma_cache_sync(paddr, size, dir);
+ return (dma_addr_t)paddr;
+}
+
+static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i)
+ s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
+ s->length, dir);
+
+ return nents;
+}
+
+static void arc_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
+{
+ _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE);
+}
+
+static void arc_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
- __inline_dma_cache_sync(paddr, size, dir);
+ _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE);
}
-EXPORT_SYMBOL(__arc_dma_cache_sync);
+
+static void arc_dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sglist, int nelems,
+ enum dma_data_direction dir)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sglist, sg, nelems, i)
+ _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
+}
+
+static void arc_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sglist, int nelems,
+ enum dma_data_direction dir)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sglist, sg, nelems, i)
+ _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
+}
+
+static int arc_dma_supported(struct device *dev, u64 dma_mask)
+{
+ /* Support 32 bit DMA mask exclusively */
+ return dma_mask == DMA_BIT_MASK(32);
+}
+
+struct dma_map_ops arc_dma_ops = {
+ .alloc = arc_dma_alloc,
+ .free = arc_dma_free,
+ .map_page = arc_dma_map_page,
+ .map_sg = arc_dma_map_sg,
+ .sync_single_for_device = arc_dma_sync_single_for_device,
+ .sync_single_for_cpu = arc_dma_sync_single_for_cpu,
+ .sync_sg_for_cpu = arc_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = arc_dma_sync_sg_for_device,
+ .dma_supported = arc_dma_supported,
+};
+EXPORT_SYMBOL(arc_dma_ops);