From ccf640f4c9988653ef884672381b03b9be247bec Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 24 Aug 2018 09:40:24 +0200 Subject: dma-mapping: remove dma_configure There is no good reason for this indirection given that the method always exists. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy --- include/linux/dma-mapping.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'include') diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 1db6a6b46d0d..1c6c7c09bcf2 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -754,14 +754,8 @@ dma_mark_declared_memory_occupied(struct device *dev, #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ #ifdef CONFIG_HAS_DMA -int dma_configure(struct device *dev); void dma_deconfigure(struct device *dev); #else -static inline int dma_configure(struct device *dev) -{ - return 0; -} - static inline void dma_deconfigure(struct device *dev) {} #endif -- cgit v1.2.3-59-g8ed1b From dc3c05504d38849f77149cb962caeaedd1efa127 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 24 Aug 2018 10:28:18 +0200 Subject: dma-mapping: remove dma_deconfigure This goes through a lot of hooks just to call arch_teardown_dma_ops. Replace it with a direct call instead. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy --- drivers/acpi/arm64/iort.c | 2 +- drivers/acpi/scan.c | 10 ---------- drivers/base/dd.c | 4 ++-- drivers/of/device.c | 12 ------------ include/acpi/acpi_bus.h | 1 - include/linux/acpi.h | 2 -- include/linux/dma-mapping.h | 6 ------ include/linux/of_device.h | 3 --- kernel/dma/mapping.c | 6 ------ 9 files changed, 3 insertions(+), 43 deletions(-) (limited to 'include') diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 08f26db2da7e..2a361e22d38d 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -1428,7 +1428,7 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node, return 0; dma_deconfigure: - acpi_dma_deconfigure(&pdev->dev); + arch_teardown_dma_ops(&pdev->dev); dev_put: platform_device_put(pdev); diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index e1b6231cfa1c..56676a56b3e3 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1469,16 +1469,6 @@ int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr) } EXPORT_SYMBOL_GPL(acpi_dma_configure); -/** - * acpi_dma_deconfigure - Tear-down DMA configuration for the device. - * @dev: The pointer to the device - */ -void acpi_dma_deconfigure(struct device *dev) -{ - arch_teardown_dma_ops(dev); -} -EXPORT_SYMBOL_GPL(acpi_dma_deconfigure); - static void acpi_init_coherency(struct acpi_device *adev) { unsigned long long cca = 0; diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 65128cf8427c..169412ee4ae8 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -539,7 +539,7 @@ re_probe: goto done; probe_failed: - dma_deconfigure(dev); + arch_teardown_dma_ops(dev); dma_failed: if (dev->bus) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, @@ -968,7 +968,7 @@ static void __device_release_driver(struct device *dev, struct device *parent) drv->remove(dev); device_links_driver_cleanup(dev); - dma_deconfigure(dev); + arch_teardown_dma_ops(dev); devres_release_all(dev); dev->driver = NULL; diff --git a/drivers/of/device.c b/drivers/of/device.c index 5957cd4fa262..c7fa5a9697c9 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -170,18 +170,6 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma) } EXPORT_SYMBOL_GPL(of_dma_configure); -/** - * of_dma_deconfigure - Clean up DMA configuration - * @dev: Device for which to clean up DMA configuration - * - * Clean up all configuration performed by of_dma_configure_ops() and free all - * resources that have been allocated. - */ -void of_dma_deconfigure(struct device *dev) -{ - arch_teardown_dma_ops(dev); -} - int of_device_register(struct platform_device *pdev) { device_initialize(&pdev->dev); diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index ba4dd54f2c82..53600f527a70 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -595,7 +595,6 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev); int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset, u64 *size); int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr); -void acpi_dma_deconfigure(struct device *dev); struct acpi_device *acpi_find_child_device(struct acpi_device *parent, u64 address, bool check_children); diff --git a/include/linux/acpi.h b/include/linux/acpi.h index de8d3d3fa651..af4628979d13 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -831,8 +831,6 @@ static inline int acpi_dma_configure(struct device *dev, return 0; } -static inline void acpi_dma_deconfigure(struct device *dev) { } - #define ACPI_PTR(_ptr) (NULL) static inline void acpi_device_set_enumerated(struct acpi_device *adev) diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 1c6c7c09bcf2..1423b69f3cc9 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -753,12 +753,6 @@ dma_mark_declared_memory_occupied(struct device *dev, } #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ -#ifdef CONFIG_HAS_DMA -void dma_deconfigure(struct device *dev); -#else -static inline void dma_deconfigure(struct device *dev) {} -#endif - /* * Managed DMA API */ diff --git a/include/linux/of_device.h b/include/linux/of_device.h index 165fd302b442..8d31e39dd564 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h @@ -58,7 +58,6 @@ static inline struct device_node *of_cpu_device_node_get(int cpu) int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma); -void of_dma_deconfigure(struct device *dev); #else /* CONFIG_OF */ static inline int of_driver_match_device(struct device *dev, @@ -113,8 +112,6 @@ static inline int of_dma_configure(struct device *dev, { return 0; } -static inline void of_dma_deconfigure(struct device *dev) -{} #endif /* CONFIG_OF */ #endif /* _LINUX_OF_DEVICE_H */ diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 25607ceb4a50..3540cb399bd2 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -327,9 +327,3 @@ void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags) vunmap(cpu_addr); } #endif - -void dma_deconfigure(struct device *dev) -{ - of_dma_deconfigure(dev); - acpi_dma_deconfigure(dev); -} -- cgit v1.2.3-59-g8ed1b From 46053c73685411915d3de50c5a0045beef32806b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 24 Aug 2018 10:29:02 +0200 Subject: dma-mapping: clear dev->dma_ops in arch_teardown_dma_ops There is no reason to leave the per-device dma_ops around when deconfiguring a device, so move this code from arm64 into the common code. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy --- arch/arm64/include/asm/dma-mapping.h | 5 ----- arch/arm64/mm/dma-mapping.c | 5 ----- include/linux/dma-mapping.h | 5 ++++- 3 files changed, 4 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index b7847eb8a7bb..0a2d13332545 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -39,11 +39,6 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent); #define arch_setup_dma_ops arch_setup_dma_ops -#ifdef CONFIG_IOMMU_DMA -void arch_teardown_dma_ops(struct device *dev); -#define arch_teardown_dma_ops arch_teardown_dma_ops -#endif - /* do not use this function in a driver */ static inline bool is_device_dma_coherent(struct device *dev) { diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 072c51fb07d7..cdcb73db9ea2 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -862,11 +862,6 @@ out_err: dev_name(dev)); } -void arch_teardown_dma_ops(struct device *dev) -{ - dev->dma_ops = NULL; -} - #else static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 1423b69f3cc9..eafd6f318e78 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -664,7 +664,10 @@ static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, #endif #ifndef arch_teardown_dma_ops -static inline void arch_teardown_dma_ops(struct device *dev) { } +static inline void arch_teardown_dma_ops(struct device *dev) +{ + dev->dma_ops = NULL; +} #endif static inline unsigned int dma_get_max_seg_size(struct device *dev) -- cgit v1.2.3-59-g8ed1b From f3ecc0ff0457eae93503792c6fc35921fa8a6204 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Aug 2018 14:53:20 +0200 Subject: dma-mapping: move the dma_coherent flag to struct device Various architectures support both coherent and non-coherent dma on a per-device basis. Move the dma_noncoherent flag from the mips archdata field to struct device proper to prepare the infrastructure for reuse on other architectures. Signed-off-by: Christoph Hellwig Acked-by: Paul Burton Acked-by: Greg Kroah-Hartman --- arch/mips/Kconfig | 1 + arch/mips/include/asm/Kbuild | 1 + arch/mips/include/asm/device.h | 19 ------------------- arch/mips/include/asm/dma-coherence.h | 6 ++++++ arch/mips/include/asm/dma-mapping.h | 2 +- arch/mips/mm/dma-noncoherent.c | 32 ++++++-------------------------- include/linux/device.h | 7 +++++++ include/linux/dma-noncoherent.h | 16 ++++++++++++++++ kernel/dma/Kconfig | 3 +++ 9 files changed, 41 insertions(+), 46 deletions(-) delete mode 100644 arch/mips/include/asm/device.h (limited to 'include') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 0b25180028b8..54c52bd0d9d3 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1106,6 +1106,7 @@ config ARCH_SUPPORTS_UPROBES bool config DMA_MAYBE_COHERENT + select ARCH_HAS_DMA_COHERENCE_H select DMA_NONCOHERENT bool diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 58351e48421e..9a81e72119da 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -1,6 +1,7 @@ # MIPS headers generic-(CONFIG_GENERIC_CSUM) += checksum.h generic-y += current.h +generic-y += device.h generic-y += dma-contiguous.h generic-y += emergency-restart.h generic-y += export.h diff --git a/arch/mips/include/asm/device.h b/arch/mips/include/asm/device.h deleted file mode 100644 index 6aa796f1081a..000000000000 --- a/arch/mips/include/asm/device.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Arch specific extensions to struct device - * - * This file is released under the GPLv2 - */ -#ifndef _ASM_MIPS_DEVICE_H -#define _ASM_MIPS_DEVICE_H - -struct dev_archdata { -#ifdef CONFIG_DMA_PERDEV_COHERENT - /* Non-zero if DMA is coherent with CPU caches */ - bool dma_coherent; -#endif -}; - -struct pdev_archdata { -}; - -#endif /* _ASM_MIPS_DEVICE_H*/ diff --git a/arch/mips/include/asm/dma-coherence.h b/arch/mips/include/asm/dma-coherence.h index 8eda48748ed5..5eaa1fcc878a 100644 --- a/arch/mips/include/asm/dma-coherence.h +++ b/arch/mips/include/asm/dma-coherence.h @@ -20,6 +20,12 @@ enum coherent_io_user_state { #elif defined(CONFIG_DMA_MAYBE_COHERENT) extern enum coherent_io_user_state coherentio; extern int hw_coherentio; + +static inline bool dev_is_dma_coherent(struct device *dev) +{ + return coherentio == IO_COHERENCE_ENABLED || + (coherentio == IO_COHERENCE_DEFAULT && hw_coherentio); +} #else #ifdef CONFIG_DMA_NONCOHERENT #define coherentio IO_COHERENCE_DISABLED diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index e81c4e97ff1a..40d825c779de 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h @@ -25,7 +25,7 @@ static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, bool coherent) { #ifdef CONFIG_DMA_PERDEV_COHERENT - dev->archdata.dma_coherent = coherent; + dev->dma_coherent = coherent; #endif } diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c index 2aca1236af36..d408ac51f56c 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-noncoherent.c @@ -14,26 +14,6 @@ #include #include -#ifdef CONFIG_DMA_PERDEV_COHERENT -static inline int dev_is_coherent(struct device *dev) -{ - return dev->archdata.dma_coherent; -} -#else -static inline int dev_is_coherent(struct device *dev) -{ - switch (coherentio) { - default: - case IO_COHERENCE_DEFAULT: - return hw_coherentio; - case IO_COHERENCE_ENABLED: - return 1; - case IO_COHERENCE_DISABLED: - return 0; - } -} -#endif /* CONFIG_DMA_PERDEV_COHERENT */ - /* * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively * fill random cachelines with stale data at any time, requiring an extra @@ -49,7 +29,7 @@ static inline int dev_is_coherent(struct device *dev) */ static inline bool cpu_needs_post_dma_flush(struct device *dev) { - if (dev_is_coherent(dev)) + if (dev_is_dma_coherent(dev)) return false; switch (boot_cpu_type()) { @@ -76,7 +56,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, if (!ret) return NULL; - if (!dev_is_coherent(dev) && !(attrs & DMA_ATTR_NON_CONSISTENT)) { + if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_NON_CONSISTENT)) { dma_cache_wback_inv((unsigned long) ret, size); ret = (void *)UNCAC_ADDR(ret); } @@ -87,7 +67,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) { - if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !dev_is_coherent(dev)) + if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !dev_is_dma_coherent(dev)) cpu_addr = (void *)CAC_ADDR((unsigned long)cpu_addr); dma_direct_free(dev, size, cpu_addr, dma_addr, attrs); } @@ -103,7 +83,7 @@ int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, unsigned long pfn; int ret = -ENXIO; - if (!dev_is_coherent(dev)) + if (!dev_is_dma_coherent(dev)) addr = CAC_ADDR(addr); pfn = page_to_pfn(virt_to_page((void *)addr)); @@ -187,7 +167,7 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size, void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, size_t size, enum dma_data_direction dir) { - if (!dev_is_coherent(dev)) + if (!dev_is_dma_coherent(dev)) dma_sync_phys(paddr, size, dir); } @@ -203,6 +183,6 @@ void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, { BUG_ON(direction == DMA_NONE); - if (!dev_is_coherent(dev)) + if (!dev_is_dma_coherent(dev)) dma_sync_virt(vaddr, size, direction); } diff --git a/include/linux/device.h b/include/linux/device.h index 8f882549edee..983506789402 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -927,6 +927,8 @@ struct dev_links_info { * @offline: Set after successful invocation of bus type's .offline(). * @of_node_reused: Set if the device-tree node is shared with an ancestor * device. + * @dma_coherent: this particular device is dma coherent, even if the + * architecture supports non-coherent devices. * * At the lowest level, every device in a Linux system is represented by an * instance of struct device. The device structure contains the information @@ -1016,6 +1018,11 @@ struct device { bool offline_disabled:1; bool offline:1; bool of_node_reused:1; +#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ + defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ + defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) + bool dma_coherent:1; +#endif }; static inline struct device *kobj_to_dev(struct kobject *kobj) diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h index a0aa00cc909d..ce9732506ef4 100644 --- a/include/linux/dma-noncoherent.h +++ b/include/linux/dma-noncoherent.h @@ -4,6 +4,22 @@ #include +#ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H +#include +#elif defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ + defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ + defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) +static inline bool dev_is_dma_coherent(struct device *dev) +{ + return dev->dma_coherent; +} +#else +static inline bool dev_is_dma_coherent(struct device *dev) +{ + return true; +} +#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */ + void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig index 1b1d63b3634b..79476749f196 100644 --- a/kernel/dma/Kconfig +++ b/kernel/dma/Kconfig @@ -13,6 +13,9 @@ config NEED_DMA_MAP_STATE config ARCH_DMA_ADDR_T_64BIT def_bool 64BIT || PHYS_ADDR_T_64BIT +config ARCH_HAS_DMA_COHERENCE_H + bool + config HAVE_GENERIC_DMA_COHERENT bool -- cgit v1.2.3-59-g8ed1b From bc3ec75de5452db59b683487867ba562b950708a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 8 Sep 2018 11:22:43 +0200 Subject: dma-mapping: merge direct and noncoherent ops All the cache maintainance is already stubbed out when not enabled, but merging the two allows us to nicely handle the case where cache maintainance is required for some devices, but not others. Signed-off-by: Christoph Hellwig Acked-by: Paul Burton # MIPS parts --- arch/arc/Kconfig | 2 +- arch/arc/mm/dma.c | 16 ++--- arch/arm/mm/dma-mapping-nommu.c | 5 +- arch/c6x/Kconfig | 2 +- arch/hexagon/Kconfig | 2 +- arch/m68k/Kconfig | 2 +- arch/microblaze/Kconfig | 2 +- arch/mips/Kconfig | 1 - arch/mips/include/asm/dma-mapping.h | 2 - arch/mips/jazz/jazzdma.c | 6 +- arch/mips/mm/dma-noncoherent.c | 29 +++------ arch/nds32/Kconfig | 2 +- arch/nios2/Kconfig | 2 +- arch/openrisc/Kconfig | 2 +- arch/parisc/Kconfig | 2 +- arch/parisc/kernel/setup.c | 2 +- arch/sh/Kconfig | 3 +- arch/sparc/Kconfig | 2 +- arch/sparc/include/asm/dma-mapping.h | 4 +- arch/x86/kernel/amd_gart_64.c | 6 +- arch/xtensa/Kconfig | 2 +- include/asm-generic/dma-mapping.h | 9 --- include/linux/dma-direct.h | 4 ++ include/linux/dma-mapping.h | 1 - include/linux/dma-noncoherent.h | 5 -- kernel/dma/Kconfig | 9 +-- kernel/dma/Makefile | 1 - kernel/dma/direct.c | 121 +++++++++++++++++++++++++++++++++-- kernel/dma/noncoherent.c | 106 ------------------------------ 29 files changed, 160 insertions(+), 192 deletions(-) delete mode 100644 kernel/dma/noncoherent.c (limited to 'include') diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index b4441b0764d7..ca03694d518a 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -17,7 +17,7 @@ config ARC select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS select COMMON_CLK - select DMA_NONCOHERENT_OPS + select DMA_DIRECT_OPS select DMA_NONCOHERENT_MMAP select GENERIC_ATOMIC64 if !ISA_ARCV2 || !(ARC_HAS_LL64 && ARC_HAS_LLSC) select GENERIC_CLOCKEVENTS diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index c75d5c3470e3..535ed4a068ef 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -167,7 +167,7 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, } /* - * Plug in coherent or noncoherent dma ops + * Plug in direct dma map ops. */ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent) @@ -175,13 +175,11 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, /* * IOC hardware snoops all DMA traffic keeping the caches consistent * with memory - eliding need for any explicit cache maintenance of - * DMA buffers - so we can use dma_direct cache ops. + * DMA buffers. */ - if (is_isa_arcv2() && ioc_enable && coherent) { - set_dma_ops(dev, &dma_direct_ops); - dev_info(dev, "use dma_direct_ops cache ops\n"); - } else { - set_dma_ops(dev, &dma_noncoherent_ops); - dev_info(dev, "use dma_noncoherent_ops cache ops\n"); - } + if (is_isa_arcv2() && ioc_enable && coherent) + dev->dma_coherent = true; + + dev_info(dev, "use %sncoherent DMA ops\n", + dev->dma_coherent ? "" : "non"); } diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c index aa7aba302e76..0ad156f9985b 100644 --- a/arch/arm/mm/dma-mapping-nommu.c +++ b/arch/arm/mm/dma-mapping-nommu.c @@ -47,7 +47,8 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size, */ if (attrs & DMA_ATTR_NON_CONSISTENT) - return dma_direct_alloc(dev, size, dma_handle, gfp, attrs); + return dma_direct_alloc_pages(dev, size, dma_handle, gfp, + attrs); ret = dma_alloc_from_global_coherent(size, dma_handle); @@ -70,7 +71,7 @@ static void arm_nommu_dma_free(struct device *dev, size_t size, unsigned long attrs) { if (attrs & DMA_ATTR_NON_CONSISTENT) { - dma_direct_free(dev, size, cpu_addr, dma_addr, attrs); + dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs); } else { int ret = dma_release_from_global_coherent(get_order(size), cpu_addr); diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig index a641b0bf1611..f65a084607fd 100644 --- a/arch/c6x/Kconfig +++ b/arch/c6x/Kconfig @@ -9,7 +9,7 @@ config C6X select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select CLKDEV_LOOKUP - select DMA_NONCOHERENT_OPS + select DMA_DIRECT_OPS select GENERIC_ATOMIC64 select GENERIC_IRQ_SHOW select HAVE_ARCH_TRACEHOOK diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 6cee842a9b44..3ef46522e89f 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig @@ -30,7 +30,7 @@ config HEXAGON select GENERIC_CLOCKEVENTS_BROADCAST select MODULES_USE_ELF_RELA select GENERIC_CPU_DEVICES - select DMA_NONCOHERENT_OPS + select DMA_DIRECT_OPS ---help--- Qualcomm Hexagon is a processor architecture designed for high performance and low power across a wide variety of applications. diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 070553791e97..c7b2a8d60a41 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -26,7 +26,7 @@ config M68K select MODULES_USE_ELF_RELA select OLD_SIGSUSPEND3 select OLD_SIGACTION - select DMA_NONCOHERENT_OPS if HAS_DMA + select DMA_DIRECT_OPS if HAS_DMA select HAVE_MEMBLOCK select ARCH_DISCARD_MEMBLOCK select NO_BOOTMEM diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index ace5c5bf1836..0f48ab6a8070 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -11,7 +11,7 @@ config MICROBLAZE select TIMER_OF select CLONE_BACKWARDS3 select COMMON_CLK - select DMA_NONCOHERENT_OPS + select DMA_DIRECT_OPS select DMA_NONCOHERENT_MMAP select GENERIC_ATOMIC64 select GENERIC_CLOCKEVENTS diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 54c52bd0d9d3..96da6e3396e1 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1121,7 +1121,6 @@ config DMA_NONCOHERENT select NEED_DMA_MAP_STATE select DMA_NONCOHERENT_MMAP select DMA_NONCOHERENT_CACHE_SYNC - select DMA_NONCOHERENT_OPS config SYS_HAS_EARLY_PRINTK bool diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index 40d825c779de..b4c477eb46ce 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h @@ -12,8 +12,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) return &jazz_dma_ops; #elif defined(CONFIG_SWIOTLB) return &swiotlb_dma_ops; -#elif defined(CONFIG_DMA_NONCOHERENT_OPS) - return &dma_noncoherent_ops; #else return &dma_direct_ops; #endif diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c index d31bc2f01208..bb49dfa1a9a3 100644 --- a/arch/mips/jazz/jazzdma.c +++ b/arch/mips/jazz/jazzdma.c @@ -564,13 +564,13 @@ static void *jazz_dma_alloc(struct device *dev, size_t size, { void *ret; - ret = dma_direct_alloc(dev, size, dma_handle, gfp, attrs); + ret = dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs); if (!ret) return NULL; *dma_handle = vdma_alloc(virt_to_phys(ret), size); if (*dma_handle == VDMA_ERROR) { - dma_direct_free(dev, size, ret, *dma_handle, attrs); + dma_direct_free_pages(dev, size, ret, *dma_handle, attrs); return NULL; } @@ -587,7 +587,7 @@ static void jazz_dma_free(struct device *dev, size_t size, void *vaddr, vdma_free(dma_handle); if (!(attrs & DMA_ATTR_NON_CONSISTENT)) vaddr = (void *)CAC_ADDR((unsigned long)vaddr); - return dma_direct_free(dev, size, vaddr, dma_handle, attrs); + dma_direct_free_pages(dev, size, vaddr, dma_handle, attrs); } static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page, diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c index d408ac51f56c..b01b9a3e424f 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-noncoherent.c @@ -29,9 +29,6 @@ */ static inline bool cpu_needs_post_dma_flush(struct device *dev) { - if (dev_is_dma_coherent(dev)) - return false; - switch (boot_cpu_type()) { case CPU_R10000: case CPU_R12000: @@ -52,11 +49,8 @@ void *arch_dma_alloc(struct device *dev, size_t size, { void *ret; - ret = dma_direct_alloc(dev, size, dma_handle, gfp, attrs); - if (!ret) - return NULL; - - if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_NON_CONSISTENT)) { + ret = dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs); + if (!ret && !(attrs & DMA_ATTR_NON_CONSISTENT)) { dma_cache_wback_inv((unsigned long) ret, size); ret = (void *)UNCAC_ADDR(ret); } @@ -67,9 +61,9 @@ void *arch_dma_alloc(struct device *dev, size_t size, void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) { - if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !dev_is_dma_coherent(dev)) + if (!(attrs & DMA_ATTR_NON_CONSISTENT)) cpu_addr = (void *)CAC_ADDR((unsigned long)cpu_addr); - dma_direct_free(dev, size, cpu_addr, dma_addr, attrs); + dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs); } int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, @@ -78,16 +72,11 @@ int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, { unsigned long user_count = vma_pages(vma); unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; - unsigned long addr = (unsigned long)cpu_addr; + unsigned long addr = CAC_ADDR((unsigned long)cpu_addr); unsigned long off = vma->vm_pgoff; - unsigned long pfn; + unsigned long pfn = page_to_pfn(virt_to_page((void *)addr)); int ret = -ENXIO; - if (!dev_is_dma_coherent(dev)) - addr = CAC_ADDR(addr); - - pfn = page_to_pfn(virt_to_page((void *)addr)); - if (attrs & DMA_ATTR_WRITE_COMBINE) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); else @@ -167,8 +156,7 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size, void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, size_t size, enum dma_data_direction dir) { - if (!dev_is_dma_coherent(dev)) - dma_sync_phys(paddr, size, dir); + dma_sync_phys(paddr, size, dir); } void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, @@ -183,6 +171,5 @@ void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, { BUG_ON(direction == DMA_NONE); - if (!dev_is_dma_coherent(dev)) - dma_sync_virt(vaddr, size, direction); + dma_sync_virt(vaddr, size, direction); } diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig index 7068f341133d..56992330026a 100644 --- a/arch/nds32/Kconfig +++ b/arch/nds32/Kconfig @@ -11,7 +11,7 @@ config NDS32 select CLKSRC_MMIO select CLONE_BACKWARDS select COMMON_CLK - select DMA_NONCOHERENT_OPS + select DMA_DIRECT_OPS select GENERIC_ATOMIC64 select GENERIC_CPU_DEVICES select GENERIC_CLOCKEVENTS diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig index f4ad1138e6b9..03965692fbfe 100644 --- a/arch/nios2/Kconfig +++ b/arch/nios2/Kconfig @@ -4,7 +4,7 @@ config NIOS2 select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_NO_SWAP - select DMA_NONCOHERENT_OPS + select DMA_DIRECT_OPS select TIMER_OF select GENERIC_ATOMIC64 select GENERIC_CLOCKEVENTS diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index e0081e734827..a655ae280637 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig @@ -7,7 +7,7 @@ config OPENRISC def_bool y select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select DMA_NONCOHERENT_OPS + select DMA_DIRECT_OPS select OF select OF_EARLY_FLATTREE select IRQ_DOMAIN diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 8e6d83f79e72..f1cd12afd943 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -186,7 +186,7 @@ config PA11 depends on PA7000 || PA7100LC || PA7200 || PA7300LC select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select DMA_NONCOHERENT_OPS + select DMA_DIRECT_OPS select DMA_NONCOHERENT_CACHE_SYNC config PREFETCH diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 4e87c35c22b7..755e89ec828a 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -102,7 +102,7 @@ void __init dma_ops_init(void) case pcxl: /* falls through */ case pcxs: case pcxt: - hppa_dma_ops = &dma_noncoherent_ops; + hppa_dma_ops = &dma_direct_ops; break; default: break; diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 1fb7b6d72baf..475d786a65b0 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -7,6 +7,7 @@ config SUPERH select ARCH_NO_COHERENT_DMA_MMAP if !MMU select HAVE_PATA_PLATFORM select CLKDEV_LOOKUP + select DMA_DIRECT_OPS select HAVE_IDE if HAS_IOPORT_MAP select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP @@ -158,13 +159,11 @@ config SWAP_IO_SPACE bool config DMA_COHERENT - select DMA_DIRECT_OPS bool config DMA_NONCOHERENT def_bool !DMA_COHERENT select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select DMA_NONCOHERENT_OPS config PGTABLE_LEVELS default 3 if X2TLB diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index e6f2a38d2e61..7e2aa59fcc29 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -51,7 +51,7 @@ config SPARC config SPARC32 def_bool !64BIT select ARCH_HAS_SYNC_DMA_FOR_CPU - select DMA_NONCOHERENT_OPS + select DMA_DIRECT_OPS select GENERIC_ATOMIC64 select CLZ_TAB select HAVE_UID16 diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h index e17566376934..b0bb2fcaf1c9 100644 --- a/arch/sparc/include/asm/dma-mapping.h +++ b/arch/sparc/include/asm/dma-mapping.h @@ -14,11 +14,11 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { #ifdef CONFIG_SPARC_LEON if (sparc_cpu_model == sparc_leon) - return &dma_noncoherent_ops; + return &dma_direct_ops; #endif #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI) if (bus == &pci_bus_type) - return &dma_noncoherent_ops; + return &dma_direct_ops; #endif return dma_ops; } diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index f299d8a479bb..3f9d1b4019bb 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c @@ -482,7 +482,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, { void *vaddr; - vaddr = dma_direct_alloc(dev, size, dma_addr, flag, attrs); + vaddr = dma_direct_alloc_pages(dev, size, dma_addr, flag, attrs); if (!vaddr || !force_iommu || dev->coherent_dma_mask <= DMA_BIT_MASK(24)) return vaddr; @@ -494,7 +494,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, goto out_free; return vaddr; out_free: - dma_direct_free(dev, size, vaddr, *dma_addr, attrs); + dma_direct_free_pages(dev, size, vaddr, *dma_addr, attrs); return NULL; } @@ -504,7 +504,7 @@ gart_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_addr, unsigned long attrs) { gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0); - dma_direct_free(dev, size, vaddr, dma_addr, attrs); + dma_direct_free_pages(dev, size, vaddr, dma_addr, attrs); } static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr) diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 04d038f3b6fa..516694937b7a 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -12,7 +12,7 @@ config XTENSA select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS select COMMON_CLK - select DMA_NONCOHERENT_OPS + select DMA_DIRECT_OPS select GENERIC_ATOMIC64 select GENERIC_CLOCKEVENTS select GENERIC_IRQ_SHOW diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h index ad2868263867..880a292d792f 100644 --- a/include/asm-generic/dma-mapping.h +++ b/include/asm-generic/dma-mapping.h @@ -4,16 +4,7 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { - /* - * Use the non-coherent ops if available. If an architecture wants a - * more fine-grained selection of operations it will have to implement - * get_arch_dma_ops itself or use the per-device dma_ops. - */ -#ifdef CONFIG_DMA_NONCOHERENT_OPS - return &dma_noncoherent_ops; -#else return &dma_direct_ops; -#endif } #endif /* _ASM_GENERIC_DMA_MAPPING_H */ diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index 8d9f33febde5..86a59ba5a7f3 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -59,6 +59,10 @@ void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs); +void *dma_direct_alloc_pages(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); +void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_addr, unsigned long attrs); dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs); diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index eafd6f318e78..8f2001181cd1 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -136,7 +136,6 @@ struct dma_map_ops { }; extern const struct dma_map_ops dma_direct_ops; -extern const struct dma_map_ops dma_noncoherent_ops; extern const struct dma_map_ops dma_virt_ops; #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h index ce9732506ef4..3f503025a0cd 100644 --- a/include/linux/dma-noncoherent.h +++ b/include/linux/dma-noncoherent.h @@ -24,14 +24,9 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs); - -#ifdef CONFIG_DMA_NONCOHERENT_MMAP int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs); -#else -#define arch_dma_mmap NULL -#endif /* CONFIG_DMA_NONCOHERENT_MMAP */ #ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig index 79476749f196..5617c9a76208 100644 --- a/kernel/dma/Kconfig +++ b/kernel/dma/Kconfig @@ -33,18 +33,13 @@ config DMA_DIRECT_OPS bool depends on HAS_DMA -config DMA_NONCOHERENT_OPS - bool - depends on HAS_DMA - select DMA_DIRECT_OPS - config DMA_NONCOHERENT_MMAP bool - depends on DMA_NONCOHERENT_OPS + depends on DMA_DIRECT_OPS config DMA_NONCOHERENT_CACHE_SYNC bool - depends on DMA_NONCOHERENT_OPS + depends on DMA_DIRECT_OPS config DMA_VIRT_OPS bool diff --git a/kernel/dma/Makefile b/kernel/dma/Makefile index 6de44e4eb454..7d581e4eea4a 100644 --- a/kernel/dma/Makefile +++ b/kernel/dma/Makefile @@ -4,7 +4,6 @@ obj-$(CONFIG_HAS_DMA) += mapping.o obj-$(CONFIG_DMA_CMA) += contiguous.o obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += coherent.o obj-$(CONFIG_DMA_DIRECT_OPS) += direct.o -obj-$(CONFIG_DMA_NONCOHERENT_OPS) += noncoherent.o obj-$(CONFIG_DMA_VIRT_OPS) += virt.o obj-$(CONFIG_DMA_API_DEBUG) += debug.o obj-$(CONFIG_SWIOTLB) += swiotlb.o diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index de87b0282e74..09e85f6aa4ba 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -1,13 +1,15 @@ // SPDX-License-Identifier: GPL-2.0 /* - * DMA operations that map physical memory directly without using an IOMMU or - * flushing caches. + * Copyright (C) 2018 Christoph Hellwig. + * + * DMA operations that map physical memory directly without using an IOMMU. */ #include #include #include #include #include +#include #include #include @@ -58,8 +60,8 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) return addr + size - 1 <= dev->coherent_dma_mask; } -void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t gfp, unsigned long attrs) +void *dma_direct_alloc_pages(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; int page_order = get_order(size); @@ -124,7 +126,7 @@ again: * NOTE: this function must never look at the dma_addr argument, because we want * to be able to use it as a helper for iommu implementations as well. */ -void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, +void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) { unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; @@ -136,14 +138,106 @@ void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, free_pages((unsigned long)cpu_addr, page_order); } +void *dma_direct_alloc(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) +{ + if (!dev_is_dma_coherent(dev)) + return arch_dma_alloc(dev, size, dma_handle, gfp, attrs); + return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs); +} + +void dma_direct_free(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) +{ + if (!dev_is_dma_coherent(dev)) + arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); + else + dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs); +} + +static int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs) +{ + if (!dev_is_dma_coherent(dev) && + IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP)) + return arch_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); + return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); +} + +static void dma_direct_sync_single_for_device(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + if (dev_is_dma_coherent(dev)) + return; + arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir); +} + +static void dma_direct_sync_sg_for_device(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + if (dev_is_dma_coherent(dev)) + return; + + for_each_sg(sgl, sg, nents, i) + arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir); +} + +#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ + defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) +static void dma_direct_sync_single_for_cpu(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + if (dev_is_dma_coherent(dev)) + return; + arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir); + arch_sync_dma_for_cpu_all(dev); +} + +static void dma_direct_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + if (dev_is_dma_coherent(dev)) + return; + + for_each_sg(sgl, sg, nents, i) + arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir); + arch_sync_dma_for_cpu_all(dev); +} + +static void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_direct_sync_single_for_cpu(dev, addr, size, dir); +} + +static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_direct_sync_sg_for_cpu(dev, sgl, nents, dir); +} +#endif + dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs) { - dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset; + phys_addr_t phys = page_to_phys(page) + offset; + dma_addr_t dma_addr = phys_to_dma(dev, phys); if (!check_addr(dev, dma_addr, size, __func__)) return DIRECT_MAPPING_ERROR; + + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_direct_sync_single_for_device(dev, dma_addr, size, dir); return dma_addr; } @@ -162,6 +256,8 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, sg_dma_len(sg) = sg->length; } + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_direct_sync_sg_for_device(dev, sgl, nents, dir); return nents; } @@ -197,9 +293,22 @@ int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) const struct dma_map_ops dma_direct_ops = { .alloc = dma_direct_alloc, .free = dma_direct_free, + .mmap = dma_direct_mmap, .map_page = dma_direct_map_page, .map_sg = dma_direct_map_sg, +#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) + .sync_single_for_device = dma_direct_sync_single_for_device, + .sync_sg_for_device = dma_direct_sync_sg_for_device, +#endif +#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ + defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) + .sync_single_for_cpu = dma_direct_sync_single_for_cpu, + .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu, + .unmap_page = dma_direct_unmap_page, + .unmap_sg = dma_direct_unmap_sg, +#endif .dma_supported = dma_direct_supported, .mapping_error = dma_direct_mapping_error, + .cache_sync = arch_dma_cache_sync, }; EXPORT_SYMBOL(dma_direct_ops); diff --git a/kernel/dma/noncoherent.c b/kernel/dma/noncoherent.c deleted file mode 100644 index 031fe235d958..000000000000 --- a/kernel/dma/noncoherent.c +++ /dev/null @@ -1,106 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2018 Christoph Hellwig. - * - * DMA operations that map physical memory directly without providing cache - * coherence. - */ -#include -#include -#include -#include -#include - -static void dma_noncoherent_sync_single_for_device(struct device *dev, - dma_addr_t addr, size_t size, enum dma_data_direction dir) -{ - arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir); -} - -static void dma_noncoherent_sync_sg_for_device(struct device *dev, - struct scatterlist *sgl, int nents, enum dma_data_direction dir) -{ - struct scatterlist *sg; - int i; - - for_each_sg(sgl, sg, nents, i) - arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir); -} - -static dma_addr_t dma_noncoherent_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - dma_addr_t addr; - - addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); - if (!dma_mapping_error(dev, addr) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - arch_sync_dma_for_device(dev, page_to_phys(page) + offset, - size, dir); - return addr; -} - -static int dma_noncoherent_map_sg(struct device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction dir, unsigned long attrs) -{ - nents = dma_direct_map_sg(dev, sgl, nents, dir, attrs); - if (nents > 0 && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - dma_noncoherent_sync_sg_for_device(dev, sgl, nents, dir); - return nents; -} - -#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ - defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) -static void dma_noncoherent_sync_single_for_cpu(struct device *dev, - dma_addr_t addr, size_t size, enum dma_data_direction dir) -{ - arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir); - arch_sync_dma_for_cpu_all(dev); -} - -static void dma_noncoherent_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sgl, int nents, enum dma_data_direction dir) -{ - struct scatterlist *sg; - int i; - - for_each_sg(sgl, sg, nents, i) - arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir); - arch_sync_dma_for_cpu_all(dev); -} - -static void dma_noncoherent_unmap_page(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir, unsigned long attrs) -{ - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - dma_noncoherent_sync_single_for_cpu(dev, addr, size, dir); -} - -static void dma_noncoherent_unmap_sg(struct device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction dir, unsigned long attrs) -{ - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - dma_noncoherent_sync_sg_for_cpu(dev, sgl, nents, dir); -} -#endif - -const struct dma_map_ops dma_noncoherent_ops = { - .alloc = arch_dma_alloc, - .free = arch_dma_free, - .mmap = arch_dma_mmap, - .sync_single_for_device = dma_noncoherent_sync_single_for_device, - .sync_sg_for_device = dma_noncoherent_sync_sg_for_device, - .map_page = dma_noncoherent_map_page, - .map_sg = dma_noncoherent_map_sg, -#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ - defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) - .sync_single_for_cpu = dma_noncoherent_sync_single_for_cpu, - .sync_sg_for_cpu = dma_noncoherent_sync_sg_for_cpu, - .unmap_page = dma_noncoherent_unmap_page, - .unmap_sg = dma_noncoherent_unmap_sg, -#endif - .dma_supported = dma_direct_supported, - .mapping_error = dma_direct_mapping_error, - .cache_sync = arch_dma_cache_sync, -}; -EXPORT_SYMBOL(dma_noncoherent_ops); -- cgit v1.2.3-59-g8ed1b From 58b0440663ec11372befb8ead0ee7099d8878590 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 11 Sep 2018 08:55:28 +0200 Subject: dma-mapping: consolidate the dma mmap implementations The only functional differences (modulo a few missing fixes in the arch code) is that architectures without coherent caches need a hook to convert a virtual or dma address into a pfn, given that we don't have the kernel linear mapping available for the otherwise easy virt_to_page call. As a side effect we can support mmap of the per-device coherent area even on architectures not providing the callback, and we make previous dangerous default methods dma_common_mmap actually save for non-coherent architectures by rejecting it without the right helper. In addition to that we need a hook so that some architectures can override the protection bits when mmaping a dma coherent allocations. Signed-off-by: Christoph Hellwig Acked-by: Paul Burton # MIPS parts --- arch/arc/Kconfig | 2 +- arch/arc/mm/dma.c | 25 +++---------------------- arch/arm/mm/dma-mapping-nommu.c | 2 +- arch/microblaze/Kconfig | 2 +- arch/microblaze/include/asm/pgtable.h | 2 -- arch/microblaze/kernel/dma.c | 22 ---------------------- arch/microblaze/mm/consistent.c | 3 ++- arch/mips/Kconfig | 3 ++- arch/mips/jazz/jazzdma.c | 1 - arch/mips/mm/dma-noncoherent.c | 32 +++++++++----------------------- drivers/xen/swiotlb-xen.c | 2 +- include/linux/dma-mapping.h | 5 +++-- include/linux/dma-noncoherent.h | 10 ++++++++-- kernel/dma/Kconfig | 10 ++++++---- kernel/dma/direct.c | 11 ----------- kernel/dma/mapping.c | 32 +++++++++++++++++++++----------- 16 files changed, 58 insertions(+), 106 deletions(-) (limited to 'include') diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index ca03694d518a..3d9bdecfa52d 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -9,6 +9,7 @@ config ARC def_bool y select ARC_TIMERS + select ARCH_HAS_DMA_COHERENT_TO_PFN select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE @@ -18,7 +19,6 @@ config ARC select CLONE_BACKWARDS select COMMON_CLK select DMA_DIRECT_OPS - select DMA_NONCOHERENT_MMAP select GENERIC_ATOMIC64 if !ISA_ARCV2 || !(ARC_HAS_LL64 && ARC_HAS_LLSC) select GENERIC_CLOCKEVENTS select GENERIC_FIND_FIRST_BIT diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index 535ed4a068ef..db203ff69ccf 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -84,29 +84,10 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr, __free_pages(page, get_order(size)); } -int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs) +long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, + dma_addr_t dma_addr) { - unsigned long user_count = vma_pages(vma); - unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; - unsigned long pfn = __phys_to_pfn(dma_addr); - unsigned long off = vma->vm_pgoff; - int ret = -ENXIO; - - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - - if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) - return ret; - - if (off < count && user_count <= (count - off)) { - ret = remap_pfn_range(vma, vma->vm_start, - pfn + off, - user_count << PAGE_SHIFT, - vma->vm_page_prot); - } - - return ret; + return __phys_to_pfn(dma_addr); } /* diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c index 0ad156f9985b..712416ecd8e6 100644 --- a/arch/arm/mm/dma-mapping-nommu.c +++ b/arch/arm/mm/dma-mapping-nommu.c @@ -91,7 +91,7 @@ static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret)) return ret; - return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); + return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); } diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 0f48ab6a8070..164a4857737a 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -1,6 +1,7 @@ config MICROBLAZE def_bool y select ARCH_NO_SWAP + select ARCH_HAS_DMA_COHERENT_TO_PFN if MMU select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE @@ -12,7 +13,6 @@ config MICROBLAZE select CLONE_BACKWARDS3 select COMMON_CLK select DMA_DIRECT_OPS - select DMA_NONCOHERENT_MMAP select GENERIC_ATOMIC64 select GENERIC_CLOCKEVENTS select GENERIC_CPU_DEVICES diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h index 7b650ab14fa0..f64ebb9c9a41 100644 --- a/arch/microblaze/include/asm/pgtable.h +++ b/arch/microblaze/include/asm/pgtable.h @@ -553,8 +553,6 @@ void __init *early_get_page(void); extern unsigned long ioremap_bot, ioremap_base; -unsigned long consistent_virt_to_pfn(void *vaddr); - void setup_memory(void); #endif /* __ASSEMBLY__ */ diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index 71032cf64669..a89c2d4ed5ff 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c @@ -42,25 +42,3 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, { __dma_sync(dev, paddr, size, dir); } - -int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t handle, size_t size, - unsigned long attrs) -{ -#ifdef CONFIG_MMU - unsigned long user_count = vma_pages(vma); - unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; - unsigned long off = vma->vm_pgoff; - unsigned long pfn; - - if (off >= count || user_count > (count - off)) - return -ENXIO; - - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - pfn = consistent_virt_to_pfn(cpu_addr); - return remap_pfn_range(vma, vma->vm_start, pfn + off, - vma->vm_end - vma->vm_start, vma->vm_page_prot); -#else - return -ENXIO; -#endif -} diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c index c9a278ac795a..d801cc5f5b95 100644 --- a/arch/microblaze/mm/consistent.c +++ b/arch/microblaze/mm/consistent.c @@ -165,7 +165,8 @@ static pte_t *consistent_virt_to_pte(void *vaddr) return pte_offset_kernel(pmd_offset(pgd_offset_k(addr), addr), addr); } -unsigned long consistent_virt_to_pfn(void *vaddr) +long arch_dma_coherent_to_pfn(struct device *dev, void *vaddr, + dma_addr_t dma_addr) { pte_t *ptep = consistent_virt_to_pte(vaddr); diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 96da6e3396e1..77c022e56e6e 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1116,10 +1116,11 @@ config DMA_PERDEV_COHERENT config DMA_NONCOHERENT bool + select ARCH_HAS_DMA_MMAP_PGPROT select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_SYNC_DMA_FOR_CPU select NEED_DMA_MAP_STATE - select DMA_NONCOHERENT_MMAP + select ARCH_HAS_DMA_COHERENT_TO_PFN select DMA_NONCOHERENT_CACHE_SYNC config SYS_HAS_EARLY_PRINTK diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c index bb49dfa1a9a3..0a0aaf39fd16 100644 --- a/arch/mips/jazz/jazzdma.c +++ b/arch/mips/jazz/jazzdma.c @@ -682,7 +682,6 @@ static int jazz_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) const struct dma_map_ops jazz_dma_ops = { .alloc = jazz_dma_alloc, .free = jazz_dma_free, - .mmap = arch_dma_mmap, .map_page = jazz_dma_map_page, .unmap_page = jazz_dma_unmap_page, .map_sg = jazz_dma_map_sg, diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c index b01b9a3e424f..e6c9485cadcf 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-noncoherent.c @@ -66,33 +66,19 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs); } -int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs) +long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, + dma_addr_t dma_addr) { - unsigned long user_count = vma_pages(vma); - unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long addr = CAC_ADDR((unsigned long)cpu_addr); - unsigned long off = vma->vm_pgoff; - unsigned long pfn = page_to_pfn(virt_to_page((void *)addr)); - int ret = -ENXIO; + return page_to_pfn(virt_to_page((void *)addr)); +} +pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, + unsigned long attrs) +{ if (attrs & DMA_ATTR_WRITE_COMBINE) - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); - else - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - - if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) - return ret; - - if (off < count && user_count <= (count - off)) { - ret = remap_pfn_range(vma, vma->vm_start, - pfn + off, - user_count << PAGE_SHIFT, - vma->vm_page_prot); - } - - return ret; + return pgprot_writecombine(prot); + return pgprot_noncached(prot); } static inline void dma_sync_virt(void *addr, size_t size, diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index a6f9ba85dc4b..470757ddddea 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -662,7 +662,7 @@ xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma, return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); #endif - return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); + return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); } /* diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 8f2001181cd1..c3378d4e0d57 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -444,7 +444,8 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size, } extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size); + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs); void *dma_common_contiguous_remap(struct page *page, size_t size, unsigned long vm_flags, @@ -476,7 +477,7 @@ dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, BUG_ON(!ops); if (ops->mmap) return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); - return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); + return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); } #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h index 3f503025a0cd..9051b055beec 100644 --- a/include/linux/dma-noncoherent.h +++ b/include/linux/dma-noncoherent.h @@ -24,9 +24,15 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs); -int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, +long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, + dma_addr_t dma_addr); + +#ifdef CONFIG_ARCH_HAS_DMA_MMAP_PGPROT +pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs); +#else +# define arch_dma_mmap_pgprot(dev, prot, attrs) pgprot_noncached(prot) +#endif #ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig index 5617c9a76208..645c7a2ecde8 100644 --- a/kernel/dma/Kconfig +++ b/kernel/dma/Kconfig @@ -29,13 +29,15 @@ config ARCH_HAS_SYNC_DMA_FOR_CPU config ARCH_HAS_SYNC_DMA_FOR_CPU_ALL bool -config DMA_DIRECT_OPS +config ARCH_HAS_DMA_COHERENT_TO_PFN bool - depends on HAS_DMA -config DMA_NONCOHERENT_MMAP +config ARCH_HAS_DMA_MMAP_PGPROT bool - depends on DMA_DIRECT_OPS + +config DMA_DIRECT_OPS + bool + depends on HAS_DMA config DMA_NONCOHERENT_CACHE_SYNC bool diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 09e85f6aa4ba..c954f0a6dc62 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -155,16 +155,6 @@ void dma_direct_free(struct device *dev, size_t size, dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs); } -static int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs) -{ - if (!dev_is_dma_coherent(dev) && - IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP)) - return arch_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); - return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); -} - static void dma_direct_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { @@ -293,7 +283,6 @@ int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) const struct dma_map_ops dma_direct_ops = { .alloc = dma_direct_alloc, .free = dma_direct_free, - .mmap = dma_direct_mmap, .map_page = dma_direct_map_page, .map_sg = dma_direct_map_sg, #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 3540cb399bd2..42fd73aca305 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -7,7 +7,7 @@ */ #include -#include +#include #include #include #include @@ -220,27 +220,37 @@ EXPORT_SYMBOL(dma_common_get_sgtable); * Create userspace mapping for the DMA-coherent memory. */ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size) + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs) { - int ret = -ENXIO; #ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP unsigned long user_count = vma_pages(vma); unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long off = vma->vm_pgoff; + unsigned long pfn; + int ret = -ENXIO; - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs); if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; - if (off < count && user_count <= (count - off)) - ret = remap_pfn_range(vma, vma->vm_start, - page_to_pfn(virt_to_page(cpu_addr)) + off, - user_count << PAGE_SHIFT, - vma->vm_page_prot); -#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ + if (off >= count || user_count > count - off) + return -ENXIO; + + if (!dev_is_dma_coherent(dev)) { + if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN)) + return -ENXIO; + pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr); + } else { + pfn = page_to_pfn(virt_to_page(cpu_addr)); + } - return ret; + return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, + user_count << PAGE_SHIFT, vma->vm_page_prot); +#else + return -ENXIO; +#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ } EXPORT_SYMBOL(dma_common_mmap); -- cgit v1.2.3-59-g8ed1b From 9406a49fd1f4379409ed87b29fdaa259b0441912 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Aug 2018 09:39:38 +0200 Subject: dma-mapping: support non-coherent devices in dma_common_get_sgtable We can use the arch_dma_coherent_to_pfn hook to provide a ->get_sgtable implementation. Note that this isn't an endorsement of this interface (which is a horrible bad idea), but it is required to move arm64 over to the generic code without a loss of functionality. Signed-off-by: Christoph Hellwig --- drivers/xen/swiotlb-xen.c | 2 +- include/linux/dma-mapping.h | 7 ++++--- kernel/dma/mapping.c | 23 ++++++++++++++++------- 3 files changed, 21 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 470757ddddea..28819a0e61d0 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -689,7 +689,7 @@ xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, handle, size, attrs); } #endif - return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size); + return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size, attrs); } static int xen_swiotlb_mapping_error(struct device *dev, dma_addr_t dma_addr) diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index c3378d4e0d57..bd81e74cca7b 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -483,8 +483,8 @@ dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) int -dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t dma_addr, size_t size); +dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, + dma_addr_t dma_addr, size_t size, unsigned long attrs); static inline int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, @@ -496,7 +496,8 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, if (ops->get_sgtable) return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs); - return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); + return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size, + attrs); } #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 42fd73aca305..58dec7a92b7b 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -202,17 +202,26 @@ EXPORT_SYMBOL(dmam_release_declared_memory); * Create scatter-list for the already allocated DMA buffer. */ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t handle, size_t size) + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs) { - struct page *page = virt_to_page(cpu_addr); + struct page *page; int ret; - ret = sg_alloc_table(sgt, 1, GFP_KERNEL); - if (unlikely(ret)) - return ret; + if (!dev_is_dma_coherent(dev)) { + if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN)) + return -ENXIO; - sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); - return 0; + page = pfn_to_page(arch_dma_coherent_to_pfn(dev, cpu_addr, + dma_addr)); + } else { + page = virt_to_page(cpu_addr); + } + + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); + if (!ret) + sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); + return ret; } EXPORT_SYMBOL(dma_common_get_sgtable); -- cgit v1.2.3-59-g8ed1b From 1a0afc14b5da329765d6ecd4a79f546b9363ad8c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 25 Sep 2018 13:16:55 -0700 Subject: Revert "dma-mapping: clear dev->dma_ops in arch_teardown_dma_ops" This reverts commit 46053c73685411915d3de50c5a0045beef32806b. This change breaks architectures setting up dma_ops in their own magic way and not using arch_setup_dma_ops, so revert it. Reported-by: Guenter Roeck Signed-off-by: Christoph Hellwig --- arch/arm64/include/asm/dma-mapping.h | 5 +++++ arch/arm64/mm/dma-mapping.c | 5 +++++ include/linux/dma-mapping.h | 5 +---- 3 files changed, 11 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 0a2d13332545..b7847eb8a7bb 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -39,6 +39,11 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent); #define arch_setup_dma_ops arch_setup_dma_ops +#ifdef CONFIG_IOMMU_DMA +void arch_teardown_dma_ops(struct device *dev); +#define arch_teardown_dma_ops arch_teardown_dma_ops +#endif + /* do not use this function in a driver */ static inline bool is_device_dma_coherent(struct device *dev) { diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index cdcb73db9ea2..072c51fb07d7 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -862,6 +862,11 @@ out_err: dev_name(dev)); } +void arch_teardown_dma_ops(struct device *dev) +{ + dev->dma_ops = NULL; +} + #else static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index bd81e74cca7b..d23fc45c8208 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -665,10 +665,7 @@ static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, #endif #ifndef arch_teardown_dma_ops -static inline void arch_teardown_dma_ops(struct device *dev) -{ - dev->dma_ops = NULL; -} +static inline void arch_teardown_dma_ops(struct device *dev) { } #endif static inline unsigned int dma_get_max_seg_size(struct device *dev) -- cgit v1.2.3-59-g8ed1b From c6d4381220a0087ce19dbf6984d92c451bd6b364 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 6 Sep 2018 19:27:24 -0400 Subject: dma-mapping: make the get_required_mask method available unconditionally This save some duplication for ia64, and makes the interface more general. In the long run we want each dma_map_ops instance to fill this out, but this will take a little more prep work. Signed-off-by: Christoph Hellwig Acked-by: Benjamin Herrenschmidt --- arch/ia64/include/asm/dma-mapping.h | 2 -- arch/ia64/include/asm/machvec.h | 7 ------- arch/ia64/include/asm/machvec_init.h | 1 - arch/ia64/include/asm/machvec_sn2.h | 2 -- arch/ia64/pci/pci.c | 26 -------------------------- arch/ia64/sn/pci/pci_dma.c | 4 ++-- drivers/base/platform.c | 13 +++++++++++-- drivers/pci/controller/vmd.c | 4 ---- include/linux/dma-mapping.h | 2 -- 9 files changed, 13 insertions(+), 48 deletions(-) (limited to 'include') diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index 76e4d6632d68..522745ae67bb 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h @@ -10,8 +10,6 @@ #include #include -#define ARCH_HAS_DMA_GET_REQUIRED_MASK - extern const struct dma_map_ops *dma_ops; extern struct ia64_machine_vector ia64_mv; extern void set_iommu_machvec(void); diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h index 267f4f170191..5133739966bc 100644 --- a/arch/ia64/include/asm/machvec.h +++ b/arch/ia64/include/asm/machvec.h @@ -44,7 +44,6 @@ typedef void ia64_mv_kernel_launch_event_t(void); /* DMA-mapping interface: */ typedef void ia64_mv_dma_init (void); -typedef u64 ia64_mv_dma_get_required_mask (struct device *); typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *); /* @@ -127,7 +126,6 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *); # define platform_global_tlb_purge ia64_mv.global_tlb_purge # define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish # define platform_dma_init ia64_mv.dma_init -# define platform_dma_get_required_mask ia64_mv.dma_get_required_mask # define platform_dma_get_ops ia64_mv.dma_get_ops # define platform_irq_to_vector ia64_mv.irq_to_vector # define platform_local_vector_to_irq ia64_mv.local_vector_to_irq @@ -171,7 +169,6 @@ struct ia64_machine_vector { ia64_mv_global_tlb_purge_t *global_tlb_purge; ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish; ia64_mv_dma_init *dma_init; - ia64_mv_dma_get_required_mask *dma_get_required_mask; ia64_mv_dma_get_ops *dma_get_ops; ia64_mv_irq_to_vector *irq_to_vector; ia64_mv_local_vector_to_irq *local_vector_to_irq; @@ -211,7 +208,6 @@ struct ia64_machine_vector { platform_global_tlb_purge, \ platform_tlb_migrate_finish, \ platform_dma_init, \ - platform_dma_get_required_mask, \ platform_dma_get_ops, \ platform_irq_to_vector, \ platform_local_vector_to_irq, \ @@ -286,9 +282,6 @@ extern const struct dma_map_ops *dma_get_ops(struct device *); #ifndef platform_dma_get_ops # define platform_dma_get_ops dma_get_ops #endif -#ifndef platform_dma_get_required_mask -# define platform_dma_get_required_mask ia64_dma_get_required_mask -#endif #ifndef platform_irq_to_vector # define platform_irq_to_vector __ia64_irq_to_vector #endif diff --git a/arch/ia64/include/asm/machvec_init.h b/arch/ia64/include/asm/machvec_init.h index 2b32fd06b7c6..2aafb69a3787 100644 --- a/arch/ia64/include/asm/machvec_init.h +++ b/arch/ia64/include/asm/machvec_init.h @@ -4,7 +4,6 @@ extern ia64_mv_send_ipi_t ia64_send_ipi; extern ia64_mv_global_tlb_purge_t ia64_global_tlb_purge; -extern ia64_mv_dma_get_required_mask ia64_dma_get_required_mask; extern ia64_mv_irq_to_vector __ia64_irq_to_vector; extern ia64_mv_local_vector_to_irq __ia64_local_vector_to_irq; extern ia64_mv_pci_get_legacy_mem_t ia64_pci_get_legacy_mem; diff --git a/arch/ia64/include/asm/machvec_sn2.h b/arch/ia64/include/asm/machvec_sn2.h index ece9fa85be88..b5153d300289 100644 --- a/arch/ia64/include/asm/machvec_sn2.h +++ b/arch/ia64/include/asm/machvec_sn2.h @@ -55,7 +55,6 @@ extern ia64_mv_readb_t __sn_readb_relaxed; extern ia64_mv_readw_t __sn_readw_relaxed; extern ia64_mv_readl_t __sn_readl_relaxed; extern ia64_mv_readq_t __sn_readq_relaxed; -extern ia64_mv_dma_get_required_mask sn_dma_get_required_mask; extern ia64_mv_dma_init sn_dma_init; extern ia64_mv_migrate_t sn_migrate; extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event; @@ -100,7 +99,6 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus; #define platform_pci_get_legacy_mem sn_pci_get_legacy_mem #define platform_pci_legacy_read sn_pci_legacy_read #define platform_pci_legacy_write sn_pci_legacy_write -#define platform_dma_get_required_mask sn_dma_get_required_mask #define platform_dma_init sn_dma_init #define platform_migrate sn_migrate #define platform_kernel_launch_event sn_kernel_launch_event diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index 7ccc64d5fe3e..5d71800df431 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c @@ -568,32 +568,6 @@ static void __init set_pci_dfl_cacheline_size(void) pci_dfl_cache_line_size = (1 << cci.pcci_line_size) / 4; } -u64 ia64_dma_get_required_mask(struct device *dev) -{ - u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT); - u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT)); - u64 mask; - - if (!high_totalram) { - /* convert to mask just covering totalram */ - low_totalram = (1 << (fls(low_totalram) - 1)); - low_totalram += low_totalram - 1; - mask = low_totalram; - } else { - high_totalram = (1 << (fls(high_totalram) - 1)); - high_totalram += high_totalram - 1; - mask = (((u64)high_totalram) << 32) + 0xffffffff; - } - return mask; -} -EXPORT_SYMBOL_GPL(ia64_dma_get_required_mask); - -u64 dma_get_required_mask(struct device *dev) -{ - return platform_dma_get_required_mask(dev); -} -EXPORT_SYMBOL_GPL(dma_get_required_mask); - static int __init pcibios_init(void) { set_pci_dfl_cacheline_size(); diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index 74c934a997bb..96eb2567718a 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c @@ -344,11 +344,10 @@ static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) return 0; } -u64 sn_dma_get_required_mask(struct device *dev) +static u64 sn_dma_get_required_mask(struct device *dev) { return DMA_BIT_MASK(64); } -EXPORT_SYMBOL_GPL(sn_dma_get_required_mask); char *sn_pci_get_legacy_mem(struct pci_bus *bus) { @@ -473,6 +472,7 @@ static struct dma_map_ops sn_dma_ops = { .sync_sg_for_device = sn_dma_sync_sg_for_device, .mapping_error = sn_dma_mapping_error, .dma_supported = sn_dma_supported, + .get_required_mask = sn_dma_get_required_mask, }; void sn_dma_init(void) diff --git a/drivers/base/platform.c b/drivers/base/platform.c index dff82a3c2caa..cfe22fded980 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -1179,8 +1179,7 @@ int __init platform_bus_init(void) return error; } -#ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK -u64 dma_get_required_mask(struct device *dev) +static u64 dma_default_get_required_mask(struct device *dev) { u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT); u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT)); @@ -1198,6 +1197,16 @@ u64 dma_get_required_mask(struct device *dev) } return mask; } + +#ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK +u64 dma_get_required_mask(struct device *dev) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + if (ops->get_required_mask) + return ops->get_required_mask(dev); + return dma_default_get_required_mask(dev); +} EXPORT_SYMBOL_GPL(dma_get_required_mask); #endif diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index fd2dbd7eed7b..f31ed62d518c 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -404,12 +404,10 @@ static int vmd_dma_supported(struct device *dev, u64 mask) return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask); } -#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK static u64 vmd_get_required_mask(struct device *dev) { return vmd_dma_ops(dev)->get_required_mask(to_vmd_dev(dev)); } -#endif static void vmd_teardown_dma_ops(struct vmd_dev *vmd) { @@ -450,9 +448,7 @@ static void vmd_setup_dma_ops(struct vmd_dev *vmd) ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device); ASSIGN_VMD_DMA_OPS(source, dest, mapping_error); ASSIGN_VMD_DMA_OPS(source, dest, dma_supported); -#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask); -#endif add_dma_domain(domain); } #undef ASSIGN_VMD_DMA_OPS diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index d23fc45c8208..562af6b45f23 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -130,9 +130,7 @@ struct dma_map_ops { enum dma_data_direction direction); int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); int (*dma_supported)(struct device *dev, u64 mask); -#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK u64 (*get_required_mask)(struct device *dev); -#endif }; extern const struct dma_map_ops dma_direct_ops; -- cgit v1.2.3-59-g8ed1b From a20bb058375147cb639c7aa17ef86ad68b32d847 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 20 Sep 2018 13:26:13 +0200 Subject: dma-direct: add an explicit dma_direct_get_required_mask This is somewhat modelled after the powerpc version, and differs from the legacy fallback in use fls64 instead of pointlessly splitting up the address into low and high dwords and in that it takes (__)phys_to_dma into account. Signed-off-by: Christoph Hellwig Acked-by: Benjamin Herrenschmidt Reviewed-by: Robin Murphy --- include/linux/dma-direct.h | 1 + kernel/dma/direct.c | 22 +++++++++++++++++++--- 2 files changed, 20 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index 86a59ba5a7f3..b79496d8c75b 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -55,6 +55,7 @@ static inline void dma_mark_clean(void *addr, size_t size) } #endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */ +u64 dma_direct_get_required_mask(struct device *dev); void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index c954f0a6dc62..f32b33cfa331 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -4,6 +4,7 @@ * * DMA operations that map physical memory directly without using an IOMMU. */ +#include /* for max_pfn */ #include #include #include @@ -53,11 +54,25 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, return true; } +static inline dma_addr_t phys_to_dma_direct(struct device *dev, + phys_addr_t phys) +{ + if (force_dma_unencrypted()) + return __phys_to_dma(dev, phys); + return phys_to_dma(dev, phys); +} + +u64 dma_direct_get_required_mask(struct device *dev) +{ + u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT); + + return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; +} + static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) { - dma_addr_t addr = force_dma_unencrypted() ? - __phys_to_dma(dev, phys) : phys_to_dma(dev, phys); - return addr + size - 1 <= dev->coherent_dma_mask; + return phys_to_dma_direct(dev, phys) + size - 1 <= + dev->coherent_dma_mask; } void *dma_direct_alloc_pages(struct device *dev, size_t size, @@ -296,6 +311,7 @@ const struct dma_map_ops dma_direct_ops = { .unmap_page = dma_direct_unmap_page, .unmap_sg = dma_direct_unmap_sg, #endif + .get_required_mask = dma_direct_get_required_mask, .dma_supported = dma_direct_supported, .mapping_error = dma_direct_mapping_error, .cache_sync = arch_dma_cache_sync, -- cgit v1.2.3-59-g8ed1b From b4ebe6063204da58e48600b810a97c29ae9e5d12 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 20 Sep 2018 14:04:08 +0200 Subject: dma-direct: implement complete bus_dma_mask handling Instead of rejecting devices with a too small bus_dma_mask we can handle by taking the bus dma_mask into account for allocations and bounce buffering decisions. Signed-off-by: Christoph Hellwig --- include/linux/dma-direct.h | 3 ++- kernel/dma/direct.c | 21 +++++++++++---------- 2 files changed, 13 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index b79496d8c75b..fbca184ff5a0 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -27,7 +27,8 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) if (!dev->dma_mask) return false; - return addr + size - 1 <= *dev->dma_mask; + return addr + size - 1 <= + min_not_zero(*dev->dma_mask, dev->bus_dma_mask); } #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */ diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index e78548397a92..60c433b880e0 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -44,10 +44,11 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, return false; } - if (*dev->dma_mask >= DMA_BIT_MASK(32)) { + if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_mask) { dev_err(dev, - "%s: overflow %pad+%zu of device mask %llx\n", - caller, &dma_addr, size, *dev->dma_mask); + "%s: overflow %pad+%zu of device mask %llx bus mask %llx\n", + caller, &dma_addr, size, + *dev->dma_mask, dev->bus_dma_mask); } return false; } @@ -66,12 +67,18 @@ u64 dma_direct_get_required_mask(struct device *dev) { u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT); + if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma) + max_dma = dev->bus_dma_mask; + return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; } static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, u64 *phys_mask) { + if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask) + dma_mask = dev->bus_dma_mask; + if (force_dma_unencrypted()) *phys_mask = __dma_to_phys(dev, dma_mask); else @@ -88,7 +95,7 @@ static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) { return phys_to_dma_direct(dev, phys) + size - 1 <= - dev->coherent_dma_mask; + min_not_zero(dev->coherent_dma_mask, dev->bus_dma_mask); } void *dma_direct_alloc_pages(struct device *dev, size_t size, @@ -292,12 +299,6 @@ int dma_direct_supported(struct device *dev, u64 mask) if (mask < phys_to_dma(dev, DMA_BIT_MASK(32))) return 0; #endif - /* - * Upstream PCI/PCIe bridges or SoC interconnects may not carry - * as many DMA address bits as the device itself supports. - */ - if (dev->bus_dma_mask && mask > dev->bus_dma_mask) - return 0; return 1; } -- cgit v1.2.3-59-g8ed1b From 99c65fa7c59ff558e70db8aa61bbdece5d3a9588 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Mon, 8 Oct 2018 00:20:07 -0700 Subject: dma-debug: Check for drivers mapping invalid addresses in dma_map_single() I recently debugged a DMA mapping oops where a driver was trying to map a buffer returned from request_firmware() with dma_map_single(). Memory returned from request_firmware() is mapped into the vmalloc region and this isn't a valid region to map with dma_map_single() per the DMA documentation's "What memory is DMA'able?" section. Unfortunately, we don't really check that in the DMA debugging code, so enabling DMA debugging doesn't help catch this problem. Let's add a new DMA debug function to check for a vmalloc address or an invalid virtual address and print a warning if this happens. This makes it a little easier to debug these sorts of problems, instead of seeing odd behavior or crashes when drivers attempt to map the vmalloc space for DMA. Cc: Marek Szyprowski Reviewed-by: Robin Murphy Signed-off-by: Stephen Boyd Signed-off-by: Christoph Hellwig --- include/linux/dma-debug.h | 8 ++++++++ include/linux/dma-mapping.h | 1 + kernel/dma/debug.c | 16 ++++++++++++++++ 3 files changed, 25 insertions(+) (limited to 'include') diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h index a785f2507159..30213adbb6b9 100644 --- a/include/linux/dma-debug.h +++ b/include/linux/dma-debug.h @@ -32,6 +32,9 @@ extern void dma_debug_add_bus(struct bus_type *bus); extern int dma_debug_resize_entries(u32 num_entries); +extern void debug_dma_map_single(struct device *dev, const void *addr, + unsigned long len); + extern void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, int direction, dma_addr_t dma_addr, @@ -103,6 +106,11 @@ static inline int dma_debug_resize_entries(u32 num_entries) return 0; } +static inline void debug_dma_map_single(struct device *dev, const void *addr, + unsigned long len) +{ +} + static inline void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, int direction, dma_addr_t dma_addr, diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 562af6b45f23..547a48bcfa3d 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -229,6 +229,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, dma_addr_t addr; BUG_ON(!valid_dma_direction(dir)); + debug_dma_map_single(dev, ptr, size); addr = ops->map_page(dev, virt_to_page(ptr), offset_in_page(ptr), size, dir, attrs); diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index c007d25bee09..231ca4628062 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -1312,6 +1312,22 @@ static void check_sg_segment(struct device *dev, struct scatterlist *sg) #endif } +void debug_dma_map_single(struct device *dev, const void *addr, + unsigned long len) +{ + if (unlikely(dma_debug_disabled())) + return; + + if (!virt_addr_valid(addr)) + err_printk(dev, NULL, "DMA-API: device driver maps memory from invalid area [addr=%p] [len=%lu]\n", + addr, len); + + if (is_vmalloc_addr(addr)) + err_printk(dev, NULL, "DMA-API: device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n", + addr, len); +} +EXPORT_SYMBOL(debug_dma_map_single); + void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, int direction, dma_addr_t dma_addr, bool map_single) -- cgit v1.2.3-59-g8ed1b From 7ed1d91a9ed61afe438ba51cbf49cb567ab7dca8 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 24 Sep 2018 13:06:58 +0200 Subject: dma-mapping: translate __GFP_NOFAIL to DMA_ATTR_NO_WARN This allows all dma_map_ops instances to entirely rely on DMA_ATTR_NO_WARN going forward. Signed-off-by: Christoph Hellwig Acked-by: Robin Murphy --- include/linux/dma-mapping.h | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 547a48bcfa3d..15bd41447025 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -558,9 +558,11 @@ static inline void dma_free_attrs(struct device *dev, size_t size, } static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) + dma_addr_t *dma_handle, gfp_t gfp) { - return dma_alloc_attrs(dev, size, dma_handle, flag, 0); + + return dma_alloc_attrs(dev, size, dma_handle, gfp, + (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); } static inline void dma_free_coherent(struct device *dev, size_t size, @@ -794,8 +796,12 @@ static inline void dmam_release_declared_memory(struct device *dev) static inline void *dma_alloc_wc(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t gfp) { - return dma_alloc_attrs(dev, size, dma_addr, gfp, - DMA_ATTR_WRITE_COMBINE); + unsigned long attrs = DMA_ATTR_NO_WARN; + + if (gfp & __GFP_NOWARN) + attrs |= DMA_ATTR_NO_WARN; + + return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs); } #ifndef dma_alloc_writecombine #define dma_alloc_writecombine dma_alloc_wc -- cgit v1.2.3-59-g8ed1b