From 25622e045a6aed45c9b12d1977fcb3fc3b349a73 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 16 Apr 2018 19:27:22 +0200 Subject: nios2: use generic dma_noncoherent_ops Switch to the generic noncoherent direct mapping implementation. Signed-off-by: Christoph Hellwig Acked-by: Ley Foon Tan Tested-by: Ley Foon Tan --- arch/nios2/Kconfig | 3 + arch/nios2/include/asm/Kbuild | 1 + arch/nios2/include/asm/dma-mapping.h | 20 ----- arch/nios2/mm/dma-mapping.c | 139 ++++------------------------------- 4 files changed, 17 insertions(+), 146 deletions(-) delete mode 100644 arch/nios2/include/asm/dma-mapping.h (limited to 'arch') diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig index 3d4ec88f1db1..92035042cf62 100644 --- a/arch/nios2/Kconfig +++ b/arch/nios2/Kconfig @@ -1,6 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 config NIOS2 def_bool y + select ARCH_HAS_SYNC_DMA_FOR_CPU + select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select DMA_NONCOHERENT_OPS select TIMER_OF select GENERIC_ATOMIC64 select GENERIC_CLOCKEVENTS diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild index 64ed3d656956..8fde4fa2c34f 100644 --- a/arch/nios2/include/asm/Kbuild +++ b/arch/nios2/include/asm/Kbuild @@ -9,6 +9,7 @@ generic-y += current.h generic-y += device.h generic-y += div64.h generic-y += dma.h +generic-y += dma-mapping.h generic-y += emergency-restart.h generic-y += exec.h generic-y += extable.h diff --git a/arch/nios2/include/asm/dma-mapping.h b/arch/nios2/include/asm/dma-mapping.h deleted file mode 100644 index 6ceb92251da0..000000000000 --- a/arch/nios2/include/asm/dma-mapping.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright (C) 2011 Tobias Klauser - * Copyright (C) 2009 Wind River Systems Inc - * - * This file is subject to the terms and conditions of the GNU General - * Public License. See the file COPYING in the main directory of this - * archive for more details. - */ - -#ifndef _ASM_NIOS2_DMA_MAPPING_H -#define _ASM_NIOS2_DMA_MAPPING_H - -extern const struct dma_map_ops nios2_dma_ops; - -static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) -{ - return &nios2_dma_ops; -} - -#endif /* _ASM_NIOS2_DMA_MAPPING_H */ diff --git a/arch/nios2/mm/dma-mapping.c b/arch/nios2/mm/dma-mapping.c index 4be815519dd4..4af9e5b5ba1c 100644 --- a/arch/nios2/mm/dma-mapping.c +++ b/arch/nios2/mm/dma-mapping.c @@ -12,18 +12,18 @@ #include #include -#include #include -#include #include #include #include #include -static inline void __dma_sync_for_device(void *vaddr, size_t size, - enum dma_data_direction direction) +void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) { - switch (direction) { + void *vaddr = phys_to_virt(paddr); + + switch (dir) { case DMA_FROM_DEVICE: invalidate_dcache_range((unsigned long)vaddr, (unsigned long)(vaddr + size)); @@ -42,10 +42,12 @@ static inline void __dma_sync_for_device(void *vaddr, size_t size, } } -static inline void __dma_sync_for_cpu(void *vaddr, size_t size, - enum dma_data_direction direction) +void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) { - switch (direction) { + void *vaddr = phys_to_virt(paddr); + + switch (dir) { case DMA_BIDIRECTIONAL: case DMA_FROM_DEVICE: invalidate_dcache_range((unsigned long)vaddr, @@ -58,8 +60,8 @@ static inline void __dma_sync_for_cpu(void *vaddr, size_t size, } } -static void *nios2_dma_alloc(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) +void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t gfp, unsigned long attrs) { void *ret; @@ -80,125 +82,10 @@ static void *nios2_dma_alloc(struct device *dev, size_t size, return ret; } -static void nios2_dma_free(struct device *dev, size_t size, void *vaddr, +void arch_dma_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr); free_pages(addr, get_order(size)); } - -static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction direction, - unsigned long attrs) -{ - int i; - - for_each_sg(sg, sg, nents, i) { - void *addr = sg_virt(sg); - - if (!addr) - continue; - - sg->dma_address = sg_phys(sg); - - if (attrs & DMA_ATTR_SKIP_CPU_SYNC) - continue; - - __dma_sync_for_device(addr, sg->length, direction); - } - - return nents; -} - -static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction direction, - unsigned long attrs) -{ - void *addr = page_address(page) + offset; - - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - __dma_sync_for_device(addr, size, direction); - - return page_to_phys(page) + offset; -} - -static void nios2_dma_unmap_page(struct device *dev, dma_addr_t dma_address, - size_t size, enum dma_data_direction direction, - unsigned long attrs) -{ - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - __dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); -} - -static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg, - int nhwentries, enum dma_data_direction direction, - unsigned long attrs) -{ - void *addr; - int i; - - if (direction == DMA_TO_DEVICE) - return; - - if (attrs & DMA_ATTR_SKIP_CPU_SYNC) - return; - - for_each_sg(sg, sg, nhwentries, i) { - addr = sg_virt(sg); - if (addr) - __dma_sync_for_cpu(addr, sg->length, direction); - } -} - -static void nios2_dma_sync_single_for_cpu(struct device *dev, - dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) -{ - __dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction); -} - -static void nios2_dma_sync_single_for_device(struct device *dev, - dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) -{ - __dma_sync_for_device(phys_to_virt(dma_handle), size, direction); -} - -static void nios2_dma_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sg, int nelems, - enum dma_data_direction direction) -{ - int i; - - /* Make sure that gcc doesn't leave the empty loop body. */ - for_each_sg(sg, sg, nelems, i) - __dma_sync_for_cpu(sg_virt(sg), sg->length, direction); -} - -static void nios2_dma_sync_sg_for_device(struct device *dev, - struct scatterlist *sg, int nelems, - enum dma_data_direction direction) -{ - int i; - - /* Make sure that gcc doesn't leave the empty loop body. */ - for_each_sg(sg, sg, nelems, i) - __dma_sync_for_device(sg_virt(sg), sg->length, direction); - -} - -const struct dma_map_ops nios2_dma_ops = { - .alloc = nios2_dma_alloc, - .free = nios2_dma_free, - .map_page = nios2_dma_map_page, - .unmap_page = nios2_dma_unmap_page, - .map_sg = nios2_dma_map_sg, - .unmap_sg = nios2_dma_unmap_sg, - .sync_single_for_device = nios2_dma_sync_single_for_device, - .sync_single_for_cpu = nios2_dma_sync_single_for_cpu, - .sync_sg_for_cpu = nios2_dma_sync_sg_for_cpu, - .sync_sg_for_device = nios2_dma_sync_sg_for_device, -}; -EXPORT_SYMBOL(nios2_dma_ops); -- cgit v1.2.3-59-g8ed1b From f07d141fe9430cdf9f8a65a87c4136bd83b8ab2e Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 23 Jul 2018 23:16:07 +0100 Subject: dma-mapping: Generalise dma_32bit_limit flag Whilst the notion of an upstream DMA restriction is most commonly seen in PCI host bridges saddled with a 32-bit native interface, a more general version of the same issue can exist on complex SoCs where a bus or point-to-point interconnect link from a device's DMA master interface to another component along the path to memory (often an IOMMU) may carry fewer address bits than the interfaces at both ends nominally support. In order to properly deal with this, the first step is to expand the dma_32bit_limit flag into an arbitrary mask. To minimise the impact on existing code, we'll make sure to only consider this new mask valid if set. That makes sense anyway, since a mask of zero would represent DMA not being wired up at all, and that would be better handled by not providing valid ops in the first place. Signed-off-by: Robin Murphy Acked-by: Ard Biesheuvel Signed-off-by: Christoph Hellwig --- arch/x86/kernel/pci-dma.c | 2 +- include/linux/device.h | 6 +++--- kernel/dma/direct.c | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index ab5d9dd668d2..80f9fe8d27d0 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -175,7 +175,7 @@ rootfs_initcall(pci_iommu_init); static int via_no_dac_cb(struct pci_dev *pdev, void *data) { - pdev->dev.dma_32bit_limit = true; + pdev->dev.bus_dma_mask = DMA_BIT_MASK(32); return 0; } diff --git a/include/linux/device.h b/include/linux/device.h index 055a69dbcd18..6d3b000be57e 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -886,6 +886,8 @@ struct dev_links_info { * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all * hardware supports 64-bit addresses for consistent allocations * such descriptors. + * @bus_dma_mask: Mask of an upstream bridge or bus which imposes a smaller DMA + * limit than the device itself supports. * @dma_pfn_offset: offset of DMA memory range relatively of RAM * @dma_parms: A low level driver may set these to teach IOMMU code about * segment limitations. @@ -912,8 +914,6 @@ struct dev_links_info { * @offline: Set after successful invocation of bus type's .offline(). * @of_node_reused: Set if the device-tree node is shared with an ancestor * device. - * @dma_32bit_limit: bridge limited to 32bit DMA even if the device itself - * indicates support for a higher limit in the dma_mask field. * * At the lowest level, every device in a Linux system is represented by an * instance of struct device. The device structure contains the information @@ -967,6 +967,7 @@ struct device { not all hardware supports 64 bit addresses for consistent allocations such descriptors. */ + u64 bus_dma_mask; /* upstream dma_mask constraint */ unsigned long dma_pfn_offset; struct device_dma_parameters *dma_parms; @@ -1002,7 +1003,6 @@ struct device { bool offline_disabled:1; bool offline:1; bool of_node_reused:1; - bool dma_32bit_limit:1; }; static inline struct device *kobj_to_dev(struct kobject *kobj) diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 8be8106270c2..c2860c5a9e96 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -180,10 +180,10 @@ int dma_direct_supported(struct device *dev, u64 mask) return 0; #endif /* - * Various PCI/PCIe bridges have broken support for > 32bit DMA even - * if the device itself might support it. + * Upstream PCI/PCIe bridges or SoC interconnects may not carry + * as many DMA address bits as the device itself supports. */ - if (dev->dma_32bit_limit && mask > DMA_BIT_MASK(32)) + if (dev->bus_dma_mask && mask > dev->bus_dma_mask) return 0; return 1; } -- cgit v1.2.3-59-g8ed1b From b2fcb677d4dd2aac3202365719733452b5512719 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 16 Apr 2018 17:26:38 +0200 Subject: sh: simplify get_arch_dma_ops Remove the indirection through the dma_ops variable, and just return nommu_dma_ops directly from get_arch_dma_ops. Signed-off-by: Christoph Hellwig Acked-by: Yoshinori Sato --- arch/sh/include/asm/dma-mapping.h | 5 ++--- arch/sh/kernel/dma-nommu.c | 8 +------- arch/sh/mm/consistent.c | 3 --- arch/sh/mm/init.c | 10 ---------- 4 files changed, 3 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index 41167931e5d9..149e71f95be7 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h @@ -2,12 +2,11 @@ #ifndef __ASM_SH_DMA_MAPPING_H #define __ASM_SH_DMA_MAPPING_H -extern const struct dma_map_ops *dma_ops; -extern void no_iommu_init(void); +extern const struct dma_map_ops nommu_dma_ops; static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { - return dma_ops; + return &nommu_dma_ops; } extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c index 3e3a32fc676e..79a9edafa5b0 100644 --- a/arch/sh/kernel/dma-nommu.c +++ b/arch/sh/kernel/dma-nommu.c @@ -79,10 +79,4 @@ const struct dma_map_ops nommu_dma_ops = { .sync_sg_for_device = nommu_sync_sg_for_device, #endif }; - -void __init no_iommu_init(void) -{ - if (dma_ops) - return; - dma_ops = &nommu_dma_ops; -} +EXPORT_SYMBOL(nommu_dma_ops); diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index fceb2adfcac7..e9d422bd42a5 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c @@ -20,9 +20,6 @@ #include #include -const struct dma_map_ops *dma_ops; -EXPORT_SYMBOL(dma_ops); - void *dma_generic_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 4034035fbede..7713c084d040 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -339,22 +339,12 @@ void __init paging_init(void) free_area_init_nodes(max_zone_pfns); } -/* - * Early initialization for any I/O MMUs we might have. - */ -static void __init iommu_init(void) -{ - no_iommu_init(); -} - unsigned int mem_init_done = 0; void __init mem_init(void) { pg_data_t *pgdat; - iommu_init(); - high_memory = NULL; for_each_online_pgdat(pgdat) high_memory = max_t(void *, high_memory, -- cgit v1.2.3-59-g8ed1b From 47fcae0d2a5fc77123fc14b0db9fe0025a1a829a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 18 Apr 2018 08:53:46 +0200 Subject: sh: introduce a sh_cacheop_vaddr helper And use it in the maple bus code to avoid a dma API dependency. Signed-off-by: Christoph Hellwig Acked-by: Yoshinori Sato --- arch/sh/include/asm/cacheflush.h | 7 +++++++ arch/sh/mm/consistent.c | 6 +----- drivers/sh/maple/maple.c | 7 ++++--- 3 files changed, 12 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h index d103ab5a4e4b..b932e42ef028 100644 --- a/arch/sh/include/asm/cacheflush.h +++ b/arch/sh/include/asm/cacheflush.h @@ -101,5 +101,12 @@ void kunmap_coherent(void *kvaddr); void cpu_cache_init(void); +static inline void *sh_cacheop_vaddr(void *vaddr) +{ + if (__in_29bit_mode()) + vaddr = (void *)CAC_ADDR((unsigned long)vaddr); + return vaddr; +} + #endif /* __KERNEL__ */ #endif /* __ASM_SH_CACHEFLUSH_H */ diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index e9d422bd42a5..1622ae6b9dbd 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c @@ -74,10 +74,7 @@ void dma_generic_free_coherent(struct device *dev, size_t size, void sh_sync_dma_for_device(void *vaddr, size_t size, enum dma_data_direction direction) { - void *addr; - - addr = __in_29bit_mode() ? - (void *)CAC_ADDR((unsigned long)vaddr) : vaddr; + void *addr = sh_cacheop_vaddr(vaddr); switch (direction) { case DMA_FROM_DEVICE: /* invalidate only */ @@ -93,7 +90,6 @@ void sh_sync_dma_for_device(void *vaddr, size_t size, BUG(); } } -EXPORT_SYMBOL(sh_sync_dma_for_device); static int __init memchunk_setup(char *str) { diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c index 2e45988d1259..e5d7fb81ad66 100644 --- a/drivers/sh/maple/maple.c +++ b/drivers/sh/maple/maple.c @@ -300,8 +300,8 @@ static void maple_send(void) mutex_unlock(&maple_wlist_lock); if (maple_packets > 0) { for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++) - sh_sync_dma_for_device(maple_sendbuf + i * PAGE_SIZE, - PAGE_SIZE, DMA_BIDIRECTIONAL); + __flush_purge_region(maple_sendbuf + i * PAGE_SIZE, + PAGE_SIZE); } finish: @@ -642,7 +642,8 @@ static void maple_dma_handler(struct work_struct *work) list_for_each_entry_safe(mq, nmq, &maple_sentq, list) { mdev = mq->dev; recvbuf = mq->recvbuf->buf; - sh_sync_dma_for_device(recvbuf, 0x400, DMA_FROM_DEVICE); + __flush_invalidate_region(sh_cacheop_vaddr(recvbuf), + 0x400); code = recvbuf[0]; kfree(mq->sendbuf); list_del_init(&mq->list); -- cgit v1.2.3-59-g8ed1b From a602915f5d0d9eff96a1d85b6f81e4921b52edfe Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 17 Apr 2018 22:02:10 +0200 Subject: sh: use dma_direct_ops for the CONFIG_DMA_COHERENT case This is a slight change in behavior as we avoid the detour through the virtual mapping for the coherent allocator, but if this CPU really is coherent that should be the right thing to do. Signed-off-by: Christoph Hellwig Acked-by: Yoshinori Sato --- arch/sh/Kconfig | 1 + arch/sh/include/asm/dma-mapping.h | 4 ++++ arch/sh/kernel/Makefile | 4 ++-- arch/sh/kernel/dma-nommu.c | 4 ---- 4 files changed, 7 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index dd4f3d3e644f..c9993a0cdc7e 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -159,6 +159,7 @@ config SWAP_IO_SPACE bool config DMA_COHERENT + select DMA_DIRECT_OPS bool config DMA_NONCOHERENT diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index 149e71f95be7..1ebc6a4eb1c5 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h @@ -6,7 +6,11 @@ extern const struct dma_map_ops nommu_dma_ops; static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { +#ifdef CONFIG_DMA_NONCOHERENT return &nommu_dma_ops; +#else + return &dma_direct_ops; +#endif } extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile index dc80041f7363..cb5f1bfb52de 100644 --- a/arch/sh/kernel/Makefile +++ b/arch/sh/kernel/Makefile @@ -12,7 +12,7 @@ endif CFLAGS_REMOVE_return_address.o = -pg -obj-y := debugtraps.o dma-nommu.o dumpstack.o \ +obj-y := debugtraps.o dumpstack.o \ idle.o io.o irq.o irq_$(BITS).o kdebugfs.o \ machvec.o nmi_debug.o process.o \ process_$(BITS).o ptrace.o ptrace_$(BITS).o \ @@ -45,7 +45,7 @@ obj-$(CONFIG_DUMP_CODE) += disassemble.o obj-$(CONFIG_HIBERNATION) += swsusp.o obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_callchain.o - +obj-$(CONFIG_DMA_NONCOHERENT) += dma-nommu.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o ccflags-y := -Werror diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c index 79a9edafa5b0..d8689b1cb743 100644 --- a/arch/sh/kernel/dma-nommu.c +++ b/arch/sh/kernel/dma-nommu.c @@ -51,7 +51,6 @@ static int nommu_map_sg(struct device *dev, struct scatterlist *sg, return nents; } -#ifdef CONFIG_DMA_NONCOHERENT static void nommu_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { @@ -67,16 +66,13 @@ static void nommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, for_each_sg(sg, s, nelems, i) sh_sync_dma_for_device(sg_virt(s), s->length, dir); } -#endif const struct dma_map_ops nommu_dma_ops = { .alloc = dma_generic_alloc_coherent, .free = dma_generic_free_coherent, .map_page = nommu_map_page, .map_sg = nommu_map_sg, -#ifdef CONFIG_DMA_NONCOHERENT .sync_single_for_device = nommu_sync_single_for_device, .sync_sg_for_device = nommu_sync_sg_for_device, -#endif }; EXPORT_SYMBOL(nommu_dma_ops); -- cgit v1.2.3-59-g8ed1b From 46bcde94cd02283535cb719666399f1c4cfb8f22 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 18 Jul 2018 06:55:20 -0700 Subject: sh: split arch/sh/mm/consistent.c Half of the file just contains platform device memory setup code which is required for all builds, and half contains helpers for dma coherent allocation, which is only needed if CONFIG_DMA_NONCOHERENT is enabled. Signed-off-by: Christoph Hellwig Acked-by: Yoshinori Sato --- arch/sh/kernel/Makefile | 2 +- arch/sh/kernel/dma-coherent.c | 84 +++++++++++++++++++++++++++++++++++++++++++ arch/sh/mm/consistent.c | 80 ----------------------------------------- 3 files changed, 85 insertions(+), 81 deletions(-) create mode 100644 arch/sh/kernel/dma-coherent.c (limited to 'arch') diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile index cb5f1bfb52de..d5ddb64bfffe 100644 --- a/arch/sh/kernel/Makefile +++ b/arch/sh/kernel/Makefile @@ -45,7 +45,7 @@ obj-$(CONFIG_DUMP_CODE) += disassemble.o obj-$(CONFIG_HIBERNATION) += swsusp.o obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_callchain.o -obj-$(CONFIG_DMA_NONCOHERENT) += dma-nommu.o +obj-$(CONFIG_DMA_NONCOHERENT) += dma-nommu.o dma-coherent.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o ccflags-y := -Werror diff --git a/arch/sh/kernel/dma-coherent.c b/arch/sh/kernel/dma-coherent.c new file mode 100644 index 000000000000..2518065d5d27 --- /dev/null +++ b/arch/sh/kernel/dma-coherent.c @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2004 - 2007 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include +#include + +void *dma_generic_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + unsigned long attrs) +{ + void *ret, *ret_nocache; + int order = get_order(size); + + gfp |= __GFP_ZERO; + + ret = (void *)__get_free_pages(gfp, order); + if (!ret) + return NULL; + + /* + * Pages from the page allocator may have data present in + * cache. So flush the cache before using uncached memory. + */ + sh_sync_dma_for_device(ret, size, DMA_BIDIRECTIONAL); + + ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size); + if (!ret_nocache) { + free_pages((unsigned long)ret, order); + return NULL; + } + + split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order); + + *dma_handle = virt_to_phys(ret); + if (!WARN_ON(!dev)) + *dma_handle -= PFN_PHYS(dev->dma_pfn_offset); + + return ret_nocache; +} + +void dma_generic_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle, + unsigned long attrs) +{ + int order = get_order(size); + unsigned long pfn = (dma_handle >> PAGE_SHIFT); + int k; + + if (!WARN_ON(!dev)) + pfn += dev->dma_pfn_offset; + + for (k = 0; k < (1 << order); k++) + __free_pages(pfn_to_page(pfn + k), 0); + + iounmap(vaddr); +} + +void sh_sync_dma_for_device(void *vaddr, size_t size, + enum dma_data_direction direction) +{ + void *addr = sh_cacheop_vaddr(vaddr); + + switch (direction) { + case DMA_FROM_DEVICE: /* invalidate only */ + __flush_invalidate_region(addr, size); + break; + case DMA_TO_DEVICE: /* writeback only */ + __flush_wback_region(addr, size); + break; + case DMA_BIDIRECTIONAL: /* writeback and invalidate */ + __flush_purge_region(addr, size); + break; + default: + BUG(); + } +} diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index 1622ae6b9dbd..792f36129062 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c @@ -1,10 +1,6 @@ /* - * arch/sh/mm/consistent.c - * * Copyright (C) 2004 - 2007 Paul Mundt * - * Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c - * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. @@ -13,83 +9,7 @@ #include #include #include -#include #include -#include -#include -#include -#include - -void *dma_generic_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, - unsigned long attrs) -{ - void *ret, *ret_nocache; - int order = get_order(size); - - gfp |= __GFP_ZERO; - - ret = (void *)__get_free_pages(gfp, order); - if (!ret) - return NULL; - - /* - * Pages from the page allocator may have data present in - * cache. So flush the cache before using uncached memory. - */ - sh_sync_dma_for_device(ret, size, DMA_BIDIRECTIONAL); - - ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size); - if (!ret_nocache) { - free_pages((unsigned long)ret, order); - return NULL; - } - - split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order); - - *dma_handle = virt_to_phys(ret); - if (!WARN_ON(!dev)) - *dma_handle -= PFN_PHYS(dev->dma_pfn_offset); - - return ret_nocache; -} - -void dma_generic_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, - unsigned long attrs) -{ - int order = get_order(size); - unsigned long pfn = dma_handle >> PAGE_SHIFT; - int k; - - if (!WARN_ON(!dev)) - pfn += dev->dma_pfn_offset; - - for (k = 0; k < (1 << order); k++) - __free_pages(pfn_to_page(pfn + k), 0); - - iounmap(vaddr); -} - -void sh_sync_dma_for_device(void *vaddr, size_t size, - enum dma_data_direction direction) -{ - void *addr = sh_cacheop_vaddr(vaddr); - - switch (direction) { - case DMA_FROM_DEVICE: /* invalidate only */ - __flush_invalidate_region(addr, size); - break; - case DMA_TO_DEVICE: /* writeback only */ - __flush_wback_region(addr, size); - break; - case DMA_BIDIRECTIONAL: /* writeback and invalidate */ - __flush_purge_region(addr, size); - break; - default: - BUG(); - } -} static int __init memchunk_setup(char *str) { -- cgit v1.2.3-59-g8ed1b From 6fa1d28e38cfa0c90f8d64c0cab37769e5337b85 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 11 May 2018 08:36:53 +0200 Subject: sh: use generic dma_noncoherent_ops Switch to the generic noncoherent direct mapping implementation. Signed-off-by: Christoph Hellwig Acked-by: Yoshinori Sato --- arch/sh/Kconfig | 3 +- arch/sh/include/asm/Kbuild | 1 + arch/sh/include/asm/dma-mapping.h | 26 ------------- arch/sh/kernel/Makefile | 2 +- arch/sh/kernel/dma-coherent.c | 23 ++++++------ arch/sh/kernel/dma-nommu.c | 78 --------------------------------------- 6 files changed, 15 insertions(+), 118 deletions(-) delete mode 100644 arch/sh/include/asm/dma-mapping.h delete mode 100644 arch/sh/kernel/dma-nommu.c (limited to 'arch') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index c9993a0cdc7e..da4db4b5359f 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -51,7 +51,6 @@ config SUPERH select HAVE_ARCH_AUDITSYSCALL select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_NMI - select NEED_DMA_MAP_STATE select NEED_SG_DMA_LENGTH help @@ -164,6 +163,8 @@ config DMA_COHERENT config DMA_NONCOHERENT def_bool !DMA_COHERENT + select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select DMA_NONCOHERENT_OPS config PGTABLE_LEVELS default 3 if X2TLB diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild index 46dd82ab2c29..6a5609a55965 100644 --- a/arch/sh/include/asm/Kbuild +++ b/arch/sh/include/asm/Kbuild @@ -2,6 +2,7 @@ generic-y += compat.h generic-y += current.h generic-y += delay.h generic-y += div64.h +generic-y += dma-mapping.h generic-y += emergency-restart.h generic-y += exec.h generic-y += irq_regs.h diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h deleted file mode 100644 index 1ebc6a4eb1c5..000000000000 --- a/arch/sh/include/asm/dma-mapping.h +++ /dev/null @@ -1,26 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __ASM_SH_DMA_MAPPING_H -#define __ASM_SH_DMA_MAPPING_H - -extern const struct dma_map_ops nommu_dma_ops; - -static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) -{ -#ifdef CONFIG_DMA_NONCOHERENT - return &nommu_dma_ops; -#else - return &dma_direct_ops; -#endif -} - -extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_addr, gfp_t flag, - unsigned long attrs); -extern void dma_generic_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, - unsigned long attrs); - -void sh_sync_dma_for_device(void *vaddr, size_t size, - enum dma_data_direction dir); - -#endif /* __ASM_SH_DMA_MAPPING_H */ diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile index d5ddb64bfffe..59673f8a3379 100644 --- a/arch/sh/kernel/Makefile +++ b/arch/sh/kernel/Makefile @@ -45,7 +45,7 @@ obj-$(CONFIG_DUMP_CODE) += disassemble.o obj-$(CONFIG_HIBERNATION) += swsusp.o obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_callchain.o -obj-$(CONFIG_DMA_NONCOHERENT) += dma-nommu.o dma-coherent.o +obj-$(CONFIG_DMA_NONCOHERENT) += dma-coherent.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o ccflags-y := -Werror diff --git a/arch/sh/kernel/dma-coherent.c b/arch/sh/kernel/dma-coherent.c index 2518065d5d27..a0021eef956b 100644 --- a/arch/sh/kernel/dma-coherent.c +++ b/arch/sh/kernel/dma-coherent.c @@ -7,14 +7,13 @@ */ #include #include -#include +#include #include #include #include -void *dma_generic_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, - unsigned long attrs) +void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t gfp, unsigned long attrs) { void *ret, *ret_nocache; int order = get_order(size); @@ -29,7 +28,8 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, * Pages from the page allocator may have data present in * cache. So flush the cache before using uncached memory. */ - sh_sync_dma_for_device(ret, size, DMA_BIDIRECTIONAL); + arch_sync_dma_for_device(dev, virt_to_phys(ret), size, + DMA_BIDIRECTIONAL); ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size); if (!ret_nocache) { @@ -46,9 +46,8 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, return ret_nocache; } -void dma_generic_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, - unsigned long attrs) +void arch_dma_free(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle, unsigned long attrs) { int order = get_order(size); unsigned long pfn = (dma_handle >> PAGE_SHIFT); @@ -63,12 +62,12 @@ void dma_generic_free_coherent(struct device *dev, size_t size, iounmap(vaddr); } -void sh_sync_dma_for_device(void *vaddr, size_t size, - enum dma_data_direction direction) +void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) { - void *addr = sh_cacheop_vaddr(vaddr); + void *addr = sh_cacheop_vaddr(phys_to_virt(paddr)); - switch (direction) { + switch (dir) { case DMA_FROM_DEVICE: /* invalidate only */ __flush_invalidate_region(addr, size); break; diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c deleted file mode 100644 index d8689b1cb743..000000000000 --- a/arch/sh/kernel/dma-nommu.c +++ /dev/null @@ -1,78 +0,0 @@ -/* - * DMA mapping support for platforms lacking IOMMUs. - * - * Copyright (C) 2009 Paul Mundt - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#include -#include -#include - -static dma_addr_t nommu_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - unsigned long attrs) -{ - dma_addr_t addr = page_to_phys(page) + offset - - PFN_PHYS(dev->dma_pfn_offset); - - WARN_ON(size == 0); - - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - sh_sync_dma_for_device(page_address(page) + offset, size, dir); - - return addr; -} - -static int nommu_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, - unsigned long attrs) -{ - struct scatterlist *s; - int i; - - WARN_ON(nents == 0 || sg[0].length == 0); - - for_each_sg(sg, s, nents, i) { - dma_addr_t offset = PFN_PHYS(dev->dma_pfn_offset); - - BUG_ON(!sg_page(s)); - - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - sh_sync_dma_for_device(sg_virt(s), s->length, dir); - - s->dma_address = sg_phys(s) - offset; - s->dma_length = s->length; - } - - return nents; -} - -static void nommu_sync_single_for_device(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir) -{ - sh_sync_dma_for_device(phys_to_virt(addr), size, dir); -} - -static void nommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir) -{ - struct scatterlist *s; - int i; - - for_each_sg(sg, s, nelems, i) - sh_sync_dma_for_device(sg_virt(s), s->length, dir); -} - -const struct dma_map_ops nommu_dma_ops = { - .alloc = dma_generic_alloc_coherent, - .free = dma_generic_free_coherent, - .map_page = nommu_map_page, - .map_sg = nommu_map_sg, - .sync_single_for_device = nommu_sync_single_for_device, - .sync_sg_for_device = nommu_sync_sg_for_device, -}; -EXPORT_SYMBOL(nommu_dma_ops); -- cgit v1.2.3-59-g8ed1b