From ed59dfd9509d172e4920994ed9cbebf93b0050cc Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Mon, 23 May 2022 19:31:25 +0800 Subject: asm-generic: Add memory barrier dma_mb() The memory barrier dma_mb() is introduced by commit a76a37777f2c ("iommu/arm-smmu-v3: Ensure queue is read after updating prod pointer"), which is used to ensure that prior (both reads and writes) accesses to memory by a CPU are ordered w.r.t. a subsequent MMIO write. Reviewed-by: Arnd Bergmann # for asm-generic Signed-off-by: Kefeng Wang Reviewed-by: Marco Elver Link: https://lore.kernel.org/r/20220523113126.171714-2-wangkefeng.wang@huawei.com Signed-off-by: Will Deacon --- include/asm-generic/barrier.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'include/asm-generic') diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index fd7e8fbaeef1..961f4d88f9ef 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -38,6 +38,10 @@ #define wmb() do { kcsan_wmb(); __wmb(); } while (0) #endif +#ifdef __dma_mb +#define dma_mb() do { kcsan_mb(); __dma_mb(); } while (0) +#endif + #ifdef __dma_rmb #define dma_rmb() do { kcsan_rmb(); __dma_rmb(); } while (0) #endif @@ -65,6 +69,10 @@ #define wmb() mb() #endif +#ifndef dma_mb +#define dma_mb() mb() +#endif + #ifndef dma_rmb #define dma_rmb() rmb() #endif -- cgit v1.2.3-59-g8ed1b From abc5992b9dd0ba599f7a91d5a0f4569a78ae88ac Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 7 Jun 2022 20:50:23 +0800 Subject: mm: ioremap: Use more sensible name in ioremap_prot() Use more meaningful and sensible naming phys_addr instead addr in ioremap_prot(). Suggested-by: Andrew Morton Reviewed-by: Anshuman Khandual Signed-off-by: Kefeng Wang Reviewed-by: Baoquan He Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20220610092255.32445-1-wangkefeng.wang@huawei.com Signed-off-by: Will Deacon --- include/asm-generic/io.h | 3 ++- mm/ioremap.c | 14 ++++++++------ 2 files changed, 10 insertions(+), 7 deletions(-) (limited to 'include/asm-generic') diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 7ce93aaf69f8..b76379628a02 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -964,7 +964,8 @@ static inline void iounmap(volatile void __iomem *addr) #elif defined(CONFIG_GENERIC_IOREMAP) #include -void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot); +void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, + unsigned long prot); void iounmap(volatile void __iomem *addr); static inline void __iomem *ioremap(phys_addr_t addr, size_t size) diff --git a/mm/ioremap.c b/mm/ioremap.c index 5fe598ecd9b7..2d754b48d230 100644 --- a/mm/ioremap.c +++ b/mm/ioremap.c @@ -11,20 +11,21 @@ #include #include -void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot) +void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, + unsigned long prot) { unsigned long offset, vaddr; phys_addr_t last_addr; struct vm_struct *area; /* Disallow wrap-around or zero size */ - last_addr = addr + size - 1; - if (!size || last_addr < addr) + last_addr = phys_addr + size - 1; + if (!size || last_addr < phys_addr) return NULL; /* Page-align mappings */ - offset = addr & (~PAGE_MASK); - addr -= offset; + offset = phys_addr & (~PAGE_MASK); + phys_addr -= offset; size = PAGE_ALIGN(size + offset); area = get_vm_area_caller(size, VM_IOREMAP, @@ -33,7 +34,8 @@ void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot) return NULL; vaddr = (unsigned long)area->addr; - if (ioremap_page_range(vaddr, vaddr + size, addr, __pgprot(prot))) { + if (ioremap_page_range(vaddr, vaddr + size, phys_addr, + __pgprot(prot))) { free_vm_area(area); return NULL; } -- cgit v1.2.3-59-g8ed1b From 18e780b4e6ab89a3a10f46d151971863562c1c91 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 7 Jun 2022 20:50:25 +0800 Subject: mm: ioremap: Add ioremap/iounmap_allowed() Add special hook for architecture to verify addr, size or prot when ioremap() or iounmap(), which will make the generic ioremap more useful. ioremap_allowed() return a bool, - true means continue to remap - false means skip remap and return directly iounmap_allowed() return a bool, - true means continue to vunmap - false code means skip vunmap and return directly Meanwhile, only vunmap the address when it is in vmalloc area as the generic ioremap only returns vmalloc addresses. Acked-by: Andrew Morton Signed-off-by: Kefeng Wang Reviewed-by: Christoph Hellwig Reviewed-by: Baoquan He Link: https://lore.kernel.org/r/20220607125027.44946-5-wangkefeng.wang@huawei.com Signed-off-by: Will Deacon --- include/asm-generic/io.h | 26 ++++++++++++++++++++++++++ mm/ioremap.c | 11 ++++++++++- 2 files changed, 36 insertions(+), 1 deletion(-) (limited to 'include/asm-generic') diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index b76379628a02..db5b890eaff7 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -964,6 +964,32 @@ static inline void iounmap(volatile void __iomem *addr) #elif defined(CONFIG_GENERIC_IOREMAP) #include +/* + * Arch code can implement the following two hooks when using GENERIC_IOREMAP + * ioremap_allowed() return a bool, + * - true means continue to remap + * - false means skip remap and return directly + * iounmap_allowed() return a bool, + * - true means continue to vunmap + * - false means skip vunmap and return directly + */ +#ifndef ioremap_allowed +#define ioremap_allowed ioremap_allowed +static inline bool ioremap_allowed(phys_addr_t phys_addr, size_t size, + unsigned long prot) +{ + return true; +} +#endif + +#ifndef iounmap_allowed +#define iounmap_allowed iounmap_allowed +static inline bool iounmap_allowed(void *addr) +{ + return true; +} +#endif + void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, unsigned long prot); void iounmap(volatile void __iomem *addr); diff --git a/mm/ioremap.c b/mm/ioremap.c index e1d008e8f87f..8652426282cc 100644 --- a/mm/ioremap.c +++ b/mm/ioremap.c @@ -28,6 +28,9 @@ void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, phys_addr -= offset; size = PAGE_ALIGN(size + offset); + if (!ioremap_allowed(phys_addr, size, prot)) + return NULL; + area = get_vm_area_caller(size, VM_IOREMAP, __builtin_return_address(0)); if (!area) @@ -47,6 +50,12 @@ EXPORT_SYMBOL(ioremap_prot); void iounmap(volatile void __iomem *addr) { - vunmap((void *)((unsigned long)addr & PAGE_MASK)); + void *vaddr = (void *)((unsigned long)addr & PAGE_MASK); + + if (!iounmap_allowed(vaddr)) + return; + + if (is_vmalloc_addr(vaddr)) + vunmap(vaddr); } EXPORT_SYMBOL(iounmap); -- cgit v1.2.3-59-g8ed1b