aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel/dma-mapping.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-12-06 07:06:04 -0800
committerChristoph Hellwig <hch@lst.de>2018-12-13 21:06:14 +0100
commit68c608345cc569bcfa1c1b2add4c00c343ecf933 (patch)
tree87bded168f1a45c71ac669f5a316663ac5ce3ed8 /arch/ia64/kernel/dma-mapping.c
parentswiotlb: remove SWIOTLB_MAP_ERROR (diff)
downloadlinux-dev-68c608345cc569bcfa1c1b2add4c00c343ecf933.tar.xz
linux-dev-68c608345cc569bcfa1c1b2add4c00c343ecf933.zip
swiotlb: remove dma_mark_clean
Instead of providing a special dma_mark_clean hook just for ia64, switch ia64 to use the normal arch_sync_dma_for_cpu hooks instead. This means that we now also set the PG_arch_1 bit for pages in the swiotlb buffer, which isn't stricly needed as we will never execute code out of the swiotlb buffer, but otherwise harmless. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Jesper Dangaard Brouer <brouer@redhat.com> Tested-by: Jesper Dangaard Brouer <brouer@redhat.com> Tested-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to '')
-rw-r--r--arch/ia64/kernel/dma-mapping.c20
1 files changed, 19 insertions, 1 deletions
diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
index 7a471d8d67d4..36dd6aa6d759 100644
--- a/arch/ia64/kernel/dma-mapping.c
+++ b/arch/ia64/kernel/dma-mapping.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/swiotlb.h>
#include <linux/export.h>
@@ -16,6 +16,24 @@ const struct dma_map_ops *dma_get_ops(struct device *dev)
EXPORT_SYMBOL(dma_get_ops);
#ifdef CONFIG_SWIOTLB
+void *arch_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+{
+ return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
+}
+
+void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr, unsigned long attrs)
+{
+ dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
+}
+
+long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
+ dma_addr_t dma_addr)
+{
+ return page_to_pfn(virt_to_page(cpu_addr));
+}
+
void __init swiotlb_dma_init(void)
{
dma_ops = &swiotlb_dma_ops;