From 7e04cc918954f9090952e8d17cb2c3c4a5ad055e Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 10 May 2021 17:52:06 +0530 Subject: arm64/mm: Validate CONFIG_PGTABLE_LEVELS CONFIG_PGTABLE_LEVELS has been statically defined in (arch/arm64/Kconfig) depending on the page size and requested virtual address range. In order to validate this page table levels selection this adds a BUILD_BUG_ON() as per the existing formula ARM64_HW_PGTABLE_LEVELS(). This would help protect any inadvertent changes to CONFIG_PGTABLE_LEVELS selection. Cc: Catalin Marinas Cc: Will Deacon Cc: Mark Rutland Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Link: https://lore.kernel.org/r/1620649326-24115-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Will Deacon --- arch/arm64/mm/init.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index e55409caaee3..6e1ca044ca90 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -499,6 +499,13 @@ void __init mem_init(void) BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64); #endif + /* + * Selected page table levels should match when derived from + * scratch using the virtual address range and page size. + */ + BUILD_BUG_ON(ARM64_HW_PGTABLE_LEVELS(CONFIG_ARM64_VA_BITS) != + CONFIG_PGTABLE_LEVELS); + if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { extern int sysctl_overcommit_memory; /* -- cgit v1.2.3-59-g8ed1b From 40221c737608cf324870c58ef063159c3a6a4c81 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 24 May 2021 13:10:30 +0530 Subject: arm64/mm: Make vmemmap_free() available only with CONFIG_MEMORY_HOTPLUG vmemmap_free() callsites (mm/sparse.c) and declaration (include/linux/mm.h) are protected with CONFIG_MEMORY_HOTPLUG. This function is not required if CONFIG_MEMORY_HOTPLUG is not enabled. Hence move the config wrapper outside the function definition. Cc: Catalin Marinas Cc: Will Deacon Cc: Mark Rutland Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Link: https://lore.kernel.org/r/1621842030-23256-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Will Deacon --- arch/arm64/mm/mmu.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 6dd9369e3ea0..3d34cd127f6b 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1166,16 +1166,17 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, return 0; } #endif /* !ARM64_SWAPPER_USES_SECTION_MAPS */ + +#ifdef CONFIG_MEMORY_HOTPLUG void vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap) { -#ifdef CONFIG_MEMORY_HOTPLUG WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); unmap_hotplug_range(start, end, true, altmap); free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END); -#endif } +#endif /* CONFIG_MEMORY_HOTPLUG */ static inline pud_t *fixmap_pud(unsigned long addr) { -- cgit v1.2.3-59-g8ed1b From 116b7f559492b719ae4bd22ee773cb7fb046a736 Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 24 May 2021 09:29:47 +0100 Subject: arm64: Do not enable uaccess for flush_icache_range __flush_icache_range works on kernel addresses, and doesn't need uaccess. The existing code is a side-effect of its current implementation with __flush_cache_user_range fallthrough. Instead of fallthrough to share the code, use a common macro for the two where the caller specifies an optional fixup label if user access is needed. If provided, this label would be used to generate an extable entry. Simplify the code to use dcache_by_line_op, instead of replicating much of its functionality. No functional change intended. Possible performance impact due to the reduced number of instructions. Reported-by: Catalin Marinas Reported-by: Will Deacon Reported-by: Mark Rutland Link: https://lore.kernel.org/linux-arch/20200511110014.lb9PEahJ4hVOYrbwIb_qUHXyNy9KQzNFdb_I3YlzY6A@z/ Link: https://lore.kernel.org/linux-arm-kernel/20210521121846.GB1040@C02TD0UTHF1T.local/ Signed-off-by: Fuad Tabba Acked-by: Mark Rutland Acked-by: Catalin Marinas Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20210524083001.2586635-5-tabba@google.com Signed-off-by: Will Deacon --- arch/arm64/mm/cache.S | 57 ++++++++++++++++++++++++++++++--------------------- 1 file changed, 34 insertions(+), 23 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 2d881f34dd9d..7c54bcbf5a36 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -14,6 +14,34 @@ #include #include +/* + * __flush_cache_range(start,end) [fixup] + * + * Ensure that the I and D caches are coherent within specified region. + * This is typically used when code has been written to a memory region, + * and will be executed. + * + * - start - virtual start address of region + * - end - virtual end address of region + * - fixup - optional label to branch to on user fault + */ +.macro __flush_cache_range, fixup +alternative_if ARM64_HAS_CACHE_IDC + dsb ishst + b .Ldc_skip_\@ +alternative_else_nop_endif + mov x2, x0 + sub x3, x1, x0 + dcache_by_line_op cvau, ish, x2, x3, x4, x5, \fixup +.Ldc_skip_\@: +alternative_if ARM64_HAS_CACHE_DIC + isb + b .Lic_skip_\@ +alternative_else_nop_endif + invalidate_icache_by_line x0, x1, x2, x3, \fixup +.Lic_skip_\@: +.endm + /* * flush_icache_range(start,end) * @@ -25,7 +53,9 @@ * - end - virtual end address of region */ SYM_FUNC_START(__flush_icache_range) - /* FALLTHROUGH */ + __flush_cache_range + ret +SYM_FUNC_END(__flush_icache_range) /* * __flush_cache_user_range(start,end) @@ -39,34 +69,15 @@ SYM_FUNC_START(__flush_icache_range) */ SYM_FUNC_START(__flush_cache_user_range) uaccess_ttbr0_enable x2, x3, x4 -alternative_if ARM64_HAS_CACHE_IDC - dsb ishst - b 7f -alternative_else_nop_endif - dcache_line_size x2, x3 - sub x3, x2, #1 - bic x4, x0, x3 -1: -user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE - add x4, x4, x2 - cmp x4, x1 - b.lo 1b - dsb ish -7: -alternative_if ARM64_HAS_CACHE_DIC - isb - b 8f -alternative_else_nop_endif - invalidate_icache_by_line x0, x1, x2, x3, 9f -8: mov x0, #0 + __flush_cache_range 2f + mov x0, xzr 1: uaccess_ttbr0_disable x1, x2 ret -9: +2: mov x0, #-EFAULT b 1b -SYM_FUNC_END(__flush_icache_range) SYM_FUNC_END(__flush_cache_user_range) /* -- cgit v1.2.3-59-g8ed1b From 7908072da535dca52b3a011ed6e1f73534546b59 Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 24 May 2021 09:29:48 +0100 Subject: arm64: Do not enable uaccess for invalidate_icache_range invalidate_icache_range() works on kernel addresses, and doesn't need uaccess. Remove the code that toggles uaccess_ttbr0_enable, as well as the code that emits an entry into the exception table (via the macro invalidate_icache_by_line). Changes return type of invalidate_icache_range() from int (which used to indicate a fault) to void, since it doesn't need uaccess and won't fault. Note that return value was never checked by any of the callers. No functional change intended. Possible performance impact due to the reduced number of instructions. Reported-by: Catalin Marinas Reported-by: Will Deacon Link: https://lore.kernel.org/linux-arch/20200511110014.lb9PEahJ4hVOYrbwIb_qUHXyNy9KQzNFdb_I3YlzY6A@z/ Acked-by: Mark Rutland Signed-off-by: Fuad Tabba Acked-by: Catalin Marinas Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20210524083001.2586635-6-tabba@google.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/cacheflush.h | 2 +- arch/arm64/mm/cache.S | 11 +---------- 2 files changed, 2 insertions(+), 11 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 52e5c1623224..a586afa84172 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -57,7 +57,7 @@ * - size - region size */ extern void __flush_icache_range(unsigned long start, unsigned long end); -extern int invalidate_icache_range(unsigned long start, unsigned long end); +extern void invalidate_icache_range(unsigned long start, unsigned long end); extern void __flush_dcache_area(void *addr, size_t len); extern void __inval_dcache_area(void *addr, size_t len); extern void __clean_dcache_area_poc(void *addr, size_t len); diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 7c54bcbf5a36..14eac9d76d57 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -90,21 +90,12 @@ SYM_FUNC_END(__flush_cache_user_range) */ SYM_FUNC_START(invalidate_icache_range) alternative_if ARM64_HAS_CACHE_DIC - mov x0, xzr isb ret alternative_else_nop_endif - uaccess_ttbr0_enable x2, x3, x4 - - invalidate_icache_by_line x0, x1, x2, x3, 2f - mov x0, xzr -1: - uaccess_ttbr0_disable x1, x2 + invalidate_icache_by_line x0, x1, x2, x3 ret -2: - mov x0, #-EFAULT - b 1b SYM_FUNC_END(invalidate_icache_range) /* -- cgit v1.2.3-59-g8ed1b From d044f8141847bee542998a6fd8de2c270fe40e48 Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 24 May 2021 09:29:52 +0100 Subject: arm64: Fix comments to refer to correct function __flush_icache_range Many comments refer to the function flush_icache_range, where the intent is in fact __flush_icache_range. Fix these comments to refer to the intended function. That's probably due to commit 3b8c9f1cdfc506e9 ("arm64: IPI each CPU after invalidating the I-cache for kernel mappings"), which renamed flush_icache_range() to __flush_icache_range() and added a wrapper. No functional change intended. Acked-by: Mark Rutland Signed-off-by: Fuad Tabba Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20210524083001.2586635-10-tabba@google.com Signed-off-by: Will Deacon --- arch/arm64/kernel/hibernate-asm.S | 4 ++-- arch/arm64/mm/cache.S | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S index 0ed2f72a6b94..ef2ab7caf815 100644 --- a/arch/arm64/kernel/hibernate-asm.S +++ b/arch/arm64/kernel/hibernate-asm.S @@ -45,7 +45,7 @@ * Because this code has to be copied to a 'safe' page, it can't call out to * other functions by PC-relative address. Also remember that it may be * mid-way through over-writing other functions. For this reason it contains - * code from flush_icache_range() and uses the copy_page() macro. + * code from __flush_icache_range() and uses the copy_page() macro. * * This 'safe' page is mapped via ttbr0, and executed from there. This function * switches to a copy of the linear map in ttbr1, performs the restore, then @@ -87,7 +87,7 @@ SYM_CODE_START(swsusp_arch_suspend_exit) copy_page x0, x1, x2, x3, x4, x5, x6, x7, x8, x9 add x1, x10, #PAGE_SIZE - /* Clean the copied page to PoU - based on flush_icache_range() */ + /* Clean the copied page to PoU - based on __flush_icache_range() */ raw_dcache_line_size x2, x3 sub x3, x2, #1 bic x4, x10, x3 diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 14eac9d76d57..910ae8f6a389 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -43,7 +43,7 @@ alternative_else_nop_endif .endm /* - * flush_icache_range(start,end) + * __flush_icache_range(start,end) * * Ensure that the I and D caches are coherent within specified region. * This is typically used when code has been written to a memory region, -- cgit v1.2.3-59-g8ed1b From e3974adb4ef591e898956083a3dfa6336bb88638 Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 24 May 2021 09:29:53 +0100 Subject: arm64: __inval_dcache_area to take end parameter instead of size To be consistent with other functions with similar names and functionality in cacheflush.h, cache.S, and cachetlb.rst, change to specify the range in terms of start and end, as opposed to start and size. Because the code is shared with __dma_inv_area, it changes the parameters for that as well. However, __dma_inv_area is local to cache.S, so no other users are affected. No functional change intended. Reported-by: Will Deacon Acked-by: Mark Rutland Signed-off-by: Fuad Tabba Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20210524083001.2586635-11-tabba@google.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/cacheflush.h | 2 +- arch/arm64/kernel/head.S | 5 +---- arch/arm64/mm/cache.S | 16 +++++++++------- arch/arm64/mm/flush.c | 2 +- 4 files changed, 12 insertions(+), 13 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index a586afa84172..157234706817 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -59,7 +59,7 @@ extern void __flush_icache_range(unsigned long start, unsigned long end); extern void invalidate_icache_range(unsigned long start, unsigned long end); extern void __flush_dcache_area(void *addr, size_t len); -extern void __inval_dcache_area(void *addr, size_t len); +extern void __inval_dcache_area(unsigned long start, unsigned long end); extern void __clean_dcache_area_poc(void *addr, size_t len); extern void __clean_dcache_area_pop(void *addr, size_t len); extern void __clean_dcache_area_pou(void *addr, size_t len); diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 96873dfa67fd..8df0ac8d9123 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -117,7 +117,7 @@ SYM_CODE_START_LOCAL(preserve_boot_args) dmb sy // needed before dc ivac with // MMU off - mov x1, #0x20 // 4 x 8 bytes + add x1, x0, #0x20 // 4 x 8 bytes b __inval_dcache_area // tail call SYM_CODE_END(preserve_boot_args) @@ -268,7 +268,6 @@ SYM_FUNC_START_LOCAL(__create_page_tables) */ adrp x0, init_pg_dir adrp x1, init_pg_end - sub x1, x1, x0 bl __inval_dcache_area /* @@ -382,12 +381,10 @@ SYM_FUNC_START_LOCAL(__create_page_tables) adrp x0, idmap_pg_dir adrp x1, idmap_pg_end - sub x1, x1, x0 bl __inval_dcache_area adrp x0, init_pg_dir adrp x1, init_pg_end - sub x1, x1, x0 bl __inval_dcache_area ret x28 diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 910ae8f6a389..03c1a7659ffb 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -131,25 +131,24 @@ alternative_else_nop_endif SYM_FUNC_END(__clean_dcache_area_pou) /* - * __inval_dcache_area(kaddr, size) + * __inval_dcache_area(start, end) * - * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) + * Ensure that any D-cache lines for the interval [start, end) * are invalidated. Any partial lines at the ends of the interval are * also cleaned to PoC to prevent data loss. * - * - kaddr - kernel address - * - size - size in question + * - start - kernel start address of region + * - end - kernel end address of region */ SYM_FUNC_START_LOCAL(__dma_inv_area) SYM_FUNC_START_PI(__inval_dcache_area) /* FALLTHROUGH */ /* - * __dma_inv_area(start, size) + * __dma_inv_area(start, end) * - start - virtual start address of region - * - size - size in question + * - end - virtual end address of region */ - add x1, x1, x0 dcache_line_size x2, x3 sub x3, x2, #1 tst x1, x3 // end cache line aligned? @@ -230,8 +229,10 @@ SYM_FUNC_END_PI(__dma_flush_area) * - dir - DMA direction */ SYM_FUNC_START_PI(__dma_map_area) + add x1, x0, x1 cmp w2, #DMA_FROM_DEVICE b.eq __dma_inv_area + sub x1, x1, x0 b __dma_clean_area SYM_FUNC_END_PI(__dma_map_area) @@ -242,6 +243,7 @@ SYM_FUNC_END_PI(__dma_map_area) * - dir - DMA direction */ SYM_FUNC_START_PI(__dma_unmap_area) + add x1, x0, x1 cmp w2, #DMA_TO_DEVICE b.ne __dma_inv_area ret diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index 6d44c028d1c9..be650b573b2a 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -90,7 +90,7 @@ EXPORT_SYMBOL_GPL(arch_wb_cache_pmem); void arch_invalidate_pmem(void *addr, size_t size) { - __inval_dcache_area(addr, size); + __inval_dcache_area((unsigned long)addr, (unsigned long)addr + size); } EXPORT_SYMBOL_GPL(arch_invalidate_pmem); #endif -- cgit v1.2.3-59-g8ed1b From 163d3f80695e31068c7d32244c9e6d406d5c5c00 Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 24 May 2021 09:29:54 +0100 Subject: arm64: dcache_by_line_op to take end parameter instead of size To be consistent with other functions with similar names and functionality in cacheflush.h, cache.S, and cachetlb.rst, change to specify the range in terms of start and end, as opposed to start and size. No functional change intended. Reported-by: Will Deacon Acked-by: Mark Rutland Signed-off-by: Fuad Tabba Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20210524083001.2586635-12-tabba@google.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 27 +++++++++++++-------------- arch/arm64/kvm/hyp/nvhe/cache.S | 1 + arch/arm64/mm/cache.S | 7 ++++++- 3 files changed, 20 insertions(+), 15 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index ced791124b28..c4cecf85dccf 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -397,40 +397,39 @@ alternative_endif /* * Macro to perform a data cache maintenance for the interval - * [addr, addr + size) + * [start, end) * * op: operation passed to dc instruction * domain: domain used in dsb instruciton - * addr: starting virtual address of the region - * size: size of the region + * start: starting virtual address of the region + * end: end virtual address of the region * fixup: optional label to branch to on user fault - * Corrupts: addr, size, tmp1, tmp2 + * Corrupts: start, end, tmp1, tmp2 */ - .macro dcache_by_line_op op, domain, addr, size, tmp1, tmp2, fixup + .macro dcache_by_line_op op, domain, start, end, tmp1, tmp2, fixup dcache_line_size \tmp1, \tmp2 - add \size, \addr, \size sub \tmp2, \tmp1, #1 - bic \addr, \addr, \tmp2 + bic \start, \start, \tmp2 .Ldcache_op\@: .ifc \op, cvau - __dcache_op_workaround_clean_cache \op, \addr + __dcache_op_workaround_clean_cache \op, \start .else .ifc \op, cvac - __dcache_op_workaround_clean_cache \op, \addr + __dcache_op_workaround_clean_cache \op, \start .else .ifc \op, cvap - sys 3, c7, c12, 1, \addr // dc cvap + sys 3, c7, c12, 1, \start // dc cvap .else .ifc \op, cvadp - sys 3, c7, c13, 1, \addr // dc cvadp + sys 3, c7, c13, 1, \start // dc cvadp .else - dc \op, \addr + dc \op, \start .endif .endif .endif .endif - add \addr, \addr, \tmp1 - cmp \addr, \size + add \start, \start, \tmp1 + cmp \start, \end b.lo .Ldcache_op\@ dsb \domain diff --git a/arch/arm64/kvm/hyp/nvhe/cache.S b/arch/arm64/kvm/hyp/nvhe/cache.S index 36cef6915428..3bcfa3cac46f 100644 --- a/arch/arm64/kvm/hyp/nvhe/cache.S +++ b/arch/arm64/kvm/hyp/nvhe/cache.S @@ -8,6 +8,7 @@ #include SYM_FUNC_START_PI(__flush_dcache_area) + add x1, x0, x1 dcache_by_line_op civac, sy, x0, x1, x2, x3 ret SYM_FUNC_END_PI(__flush_dcache_area) diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 03c1a7659ffb..fff883f691f2 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -31,7 +31,7 @@ alternative_if ARM64_HAS_CACHE_IDC b .Ldc_skip_\@ alternative_else_nop_endif mov x2, x0 - sub x3, x1, x0 + mov x3, x1 dcache_by_line_op cvau, ish, x2, x3, x4, x5, \fixup .Ldc_skip_\@: alternative_if ARM64_HAS_CACHE_DIC @@ -108,6 +108,7 @@ SYM_FUNC_END(invalidate_icache_range) * - size - size in question */ SYM_FUNC_START_PI(__flush_dcache_area) + add x1, x0, x1 dcache_by_line_op civac, sy, x0, x1, x2, x3 ret SYM_FUNC_END_PI(__flush_dcache_area) @@ -126,6 +127,7 @@ alternative_if ARM64_HAS_CACHE_IDC dsb ishst ret alternative_else_nop_endif + add x1, x0, x1 dcache_by_line_op cvau, ish, x0, x1, x2, x3 ret SYM_FUNC_END(__clean_dcache_area_pou) @@ -187,6 +189,7 @@ SYM_FUNC_START_PI(__clean_dcache_area_poc) * - start - virtual start address of region * - size - size in question */ + add x1, x0, x1 dcache_by_line_op cvac, sy, x0, x1, x2, x3 ret SYM_FUNC_END_PI(__clean_dcache_area_poc) @@ -205,6 +208,7 @@ SYM_FUNC_START_PI(__clean_dcache_area_pop) alternative_if_not ARM64_HAS_DCPOP b __clean_dcache_area_poc alternative_else_nop_endif + add x1, x0, x1 dcache_by_line_op cvap, sy, x0, x1, x2, x3 ret SYM_FUNC_END_PI(__clean_dcache_area_pop) @@ -218,6 +222,7 @@ SYM_FUNC_END_PI(__clean_dcache_area_pop) * - size - size in question */ SYM_FUNC_START_PI(__dma_flush_area) + add x1, x0, x1 dcache_by_line_op civac, sy, x0, x1, x2, x3 ret SYM_FUNC_END_PI(__dma_flush_area) -- cgit v1.2.3-59-g8ed1b From 814b186079cd54d3fe3b6b8ab539cbd44705ef9d Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 24 May 2021 09:29:55 +0100 Subject: arm64: __flush_dcache_area to take end parameter instead of size To be consistent with other functions with similar names and functionality in cacheflush.h, cache.S, and cachetlb.rst, change to specify the range in terms of start and end, as opposed to start and size. No functional change intended. Reported-by: Will Deacon Acked-by: Mark Rutland Signed-off-by: Fuad Tabba Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20210524083001.2586635-13-tabba@google.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/arch_gicv3.h | 3 ++- arch/arm64/include/asm/cacheflush.h | 8 ++++---- arch/arm64/include/asm/efi.h | 2 +- arch/arm64/include/asm/kvm_mmu.h | 3 ++- arch/arm64/kernel/hibernate.c | 18 +++++++++++------- arch/arm64/kernel/idreg-override.c | 3 ++- arch/arm64/kernel/kaslr.c | 12 +++++++++--- arch/arm64/kernel/machine_kexec.c | 20 +++++++++++++------- arch/arm64/kernel/smp.c | 8 ++++++-- arch/arm64/kernel/smp_spin_table.c | 7 ++++--- arch/arm64/kvm/hyp/nvhe/cache.S | 1 - arch/arm64/kvm/hyp/nvhe/setup.c | 3 ++- arch/arm64/kvm/hyp/pgtable.c | 13 ++++++++++--- arch/arm64/mm/cache.S | 9 ++++----- 14 files changed, 70 insertions(+), 40 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index 934b9be582d2..ed1cc9d8e6df 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -124,7 +124,8 @@ static inline u32 gic_read_rpr(void) #define gic_read_lpir(c) readq_relaxed(c) #define gic_write_lpir(v, c) writeq_relaxed(v, c) -#define gic_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) +#define gic_flush_dcache_to_poc(a,l) \ + __flush_dcache_area((unsigned long)(a), (unsigned long)(a)+(l)) #define gits_read_baser(c) readq_relaxed(c) #define gits_write_baser(v, c) writeq_relaxed(v, c) diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 157234706817..695f88864784 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -50,15 +50,15 @@ * - start - virtual start address * - end - virtual end address * - * __flush_dcache_area(kaddr, size) + * __flush_dcache_area(start, end) * * Ensure that the data held in page is written back. - * - kaddr - page address - * - size - region size + * - start - virtual start address + * - end - virtual end address */ extern void __flush_icache_range(unsigned long start, unsigned long end); extern void invalidate_icache_range(unsigned long start, unsigned long end); -extern void __flush_dcache_area(void *addr, size_t len); +extern void __flush_dcache_area(unsigned long start, unsigned long end); extern void __inval_dcache_area(unsigned long start, unsigned long end); extern void __clean_dcache_area_poc(void *addr, size_t len); extern void __clean_dcache_area_pop(void *addr, size_t len); diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 3578aba9c608..0ae2397076fd 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -137,7 +137,7 @@ void efi_virtmap_unload(void); static inline void efi_capsule_flush_cache_range(void *addr, int size) { - __flush_dcache_area(addr, size); + __flush_dcache_area((unsigned long)addr, (unsigned long)addr + size); } #endif /* _ASM_EFI_H */ diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 25ed956f9af1..33293d5855af 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -180,7 +180,8 @@ static inline void *__kvm_vector_slot2addr(void *base, struct kvm; -#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) +#define kvm_flush_dcache_to_poc(a,l) \ + __flush_dcache_area((unsigned long)(a), (unsigned long)(a)+(l)) static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) { diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index b1cef371df2b..b40ddce71507 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -240,8 +240,6 @@ static int create_safe_exec_page(void *src_start, size_t length, return 0; } -#define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start)) - #ifdef CONFIG_ARM64_MTE static DEFINE_XARRAY(mte_pages); @@ -383,13 +381,18 @@ int swsusp_arch_suspend(void) ret = swsusp_save(); } else { /* Clean kernel core startup/idle code to PoC*/ - dcache_clean_range(__mmuoff_data_start, __mmuoff_data_end); - dcache_clean_range(__idmap_text_start, __idmap_text_end); + __flush_dcache_area((unsigned long)__mmuoff_data_start, + (unsigned long)__mmuoff_data_end); + __flush_dcache_area((unsigned long)__idmap_text_start, + (unsigned long)__idmap_text_end); /* Clean kvm setup code to PoC? */ if (el2_reset_needed()) { - dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end); - dcache_clean_range(__hyp_text_start, __hyp_text_end); + __flush_dcache_area( + (unsigned long)__hyp_idmap_text_start, + (unsigned long)__hyp_idmap_text_end); + __flush_dcache_area((unsigned long)__hyp_text_start, + (unsigned long)__hyp_text_end); } swsusp_mte_restore_tags(); @@ -474,7 +477,8 @@ int swsusp_arch_resume(void) * The hibernate exit text contains a set of el2 vectors, that will * be executed at el2 with the mmu off in order to reload hyp-stub. */ - __flush_dcache_area(hibernate_exit, exit_size); + __flush_dcache_area((unsigned long)hibernate_exit, + (unsigned long)hibernate_exit + exit_size); /* * KASLR will cause the el2 vectors to be in a different location in diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c index e628c8ce1ffe..3dd515baf526 100644 --- a/arch/arm64/kernel/idreg-override.c +++ b/arch/arm64/kernel/idreg-override.c @@ -237,7 +237,8 @@ asmlinkage void __init init_feature_override(void) for (i = 0; i < ARRAY_SIZE(regs); i++) { if (regs[i]->override) - __flush_dcache_area(regs[i]->override, + __flush_dcache_area((unsigned long)regs[i]->override, + (unsigned long)regs[i]->override + sizeof(*regs[i]->override)); } } diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index 341342b207f6..49cccd03cb37 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -72,7 +72,9 @@ u64 __init kaslr_early_init(void) * we end up running with module randomization disabled. */ module_alloc_base = (u64)_etext - MODULES_VSIZE; - __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base)); + __flush_dcache_area((unsigned long)&module_alloc_base, + (unsigned long)&module_alloc_base + + sizeof(module_alloc_base)); /* * Try to map the FDT early. If this fails, we simply bail, @@ -170,8 +172,12 @@ u64 __init kaslr_early_init(void) module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; module_alloc_base &= PAGE_MASK; - __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base)); - __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed)); + __flush_dcache_area((unsigned long)&module_alloc_base, + (unsigned long)&module_alloc_base + + sizeof(module_alloc_base)); + __flush_dcache_area((unsigned long)&memstart_offset_seed, + (unsigned long)&memstart_offset_seed + + sizeof(memstart_offset_seed)); return offset; } diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index a03944fd0cd4..3e79110c8f3a 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -72,7 +72,9 @@ int machine_kexec_post_load(struct kimage *kimage) * For execution with the MMU off, reloc_code needs to be cleaned to the * PoC and invalidated from the I-cache. */ - __flush_dcache_area(reloc_code, arm64_relocate_new_kernel_size); + __flush_dcache_area((unsigned long)reloc_code, + (unsigned long)reloc_code + + arm64_relocate_new_kernel_size); invalidate_icache_range((uintptr_t)reloc_code, (uintptr_t)reloc_code + arm64_relocate_new_kernel_size); @@ -106,16 +108,18 @@ static void kexec_list_flush(struct kimage *kimage) for (entry = &kimage->head; ; entry++) { unsigned int flag; - void *addr; + unsigned long addr; /* flush the list entries. */ - __flush_dcache_area(entry, sizeof(kimage_entry_t)); + __flush_dcache_area((unsigned long)entry, + (unsigned long)entry + + sizeof(kimage_entry_t)); flag = *entry & IND_FLAGS; if (flag == IND_DONE) break; - addr = phys_to_virt(*entry & PAGE_MASK); + addr = (unsigned long)phys_to_virt(*entry & PAGE_MASK); switch (flag) { case IND_INDIRECTION: @@ -124,7 +128,7 @@ static void kexec_list_flush(struct kimage *kimage) break; case IND_SOURCE: /* flush the source pages. */ - __flush_dcache_area(addr, PAGE_SIZE); + __flush_dcache_area(addr, addr + PAGE_SIZE); break; case IND_DESTINATION: break; @@ -151,8 +155,10 @@ static void kexec_segment_flush(const struct kimage *kimage) kimage->segment[i].memsz, kimage->segment[i].memsz / PAGE_SIZE); - __flush_dcache_area(phys_to_virt(kimage->segment[i].mem), - kimage->segment[i].memsz); + __flush_dcache_area( + (unsigned long)phys_to_virt(kimage->segment[i].mem), + (unsigned long)phys_to_virt(kimage->segment[i].mem) + + kimage->segment[i].memsz); } } diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index dcd7041b2b07..5fcdee331087 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -122,7 +122,9 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) secondary_data.task = idle; secondary_data.stack = task_stack_page(idle) + THREAD_SIZE; update_cpu_boot_status(CPU_MMU_OFF); - __flush_dcache_area(&secondary_data, sizeof(secondary_data)); + __flush_dcache_area((unsigned long)&secondary_data, + (unsigned long)&secondary_data + + sizeof(secondary_data)); /* Now bring the CPU into our world */ ret = boot_secondary(cpu, idle); @@ -143,7 +145,9 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) pr_crit("CPU%u: failed to come online\n", cpu); secondary_data.task = NULL; secondary_data.stack = NULL; - __flush_dcache_area(&secondary_data, sizeof(secondary_data)); + __flush_dcache_area((unsigned long)&secondary_data, + (unsigned long)&secondary_data + + sizeof(secondary_data)); status = READ_ONCE(secondary_data.status); if (status == CPU_MMU_OFF) status = READ_ONCE(__early_cpu_boot_status); diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c index c45a83512805..58d804582a35 100644 --- a/arch/arm64/kernel/smp_spin_table.c +++ b/arch/arm64/kernel/smp_spin_table.c @@ -36,7 +36,7 @@ static void write_pen_release(u64 val) unsigned long size = sizeof(secondary_holding_pen_release); secondary_holding_pen_release = val; - __flush_dcache_area(start, size); + __flush_dcache_area((unsigned long)start, (unsigned long)start + size); } @@ -90,8 +90,9 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu) * the boot protocol. */ writeq_relaxed(pa_holding_pen, release_addr); - __flush_dcache_area((__force void *)release_addr, - sizeof(*release_addr)); + __flush_dcache_area((__force unsigned long)release_addr, + (__force unsigned long)release_addr + + sizeof(*release_addr)); /* * Send an event to wake up the secondary CPU. diff --git a/arch/arm64/kvm/hyp/nvhe/cache.S b/arch/arm64/kvm/hyp/nvhe/cache.S index 3bcfa3cac46f..36cef6915428 100644 --- a/arch/arm64/kvm/hyp/nvhe/cache.S +++ b/arch/arm64/kvm/hyp/nvhe/cache.S @@ -8,7 +8,6 @@ #include SYM_FUNC_START_PI(__flush_dcache_area) - add x1, x0, x1 dcache_by_line_op civac, sy, x0, x1, x2, x3 ret SYM_FUNC_END_PI(__flush_dcache_area) diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c index 7488f53b0aa2..5dffe928f256 100644 --- a/arch/arm64/kvm/hyp/nvhe/setup.c +++ b/arch/arm64/kvm/hyp/nvhe/setup.c @@ -134,7 +134,8 @@ static void update_nvhe_init_params(void) for (i = 0; i < hyp_nr_cpus; i++) { params = per_cpu_ptr(&kvm_init_params, i); params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd); - __flush_dcache_area(params, sizeof(*params)); + __flush_dcache_area((unsigned long)params, + (unsigned long)params + sizeof(*params)); } } diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index c37c1dc4feaf..10d2f04013d4 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -839,8 +839,11 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, stage2_put_pte(ptep, mmu, addr, level, mm_ops); if (need_flush) { - __flush_dcache_area(kvm_pte_follow(pte, mm_ops), - kvm_granule_size(level)); + kvm_pte_t *pte_follow = kvm_pte_follow(pte, mm_ops); + + __flush_dcache_area((unsigned long)pte_follow, + (unsigned long)pte_follow + + kvm_granule_size(level)); } if (childp) @@ -988,11 +991,15 @@ static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, struct kvm_pgtable *pgt = arg; struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops; kvm_pte_t pte = *ptep; + kvm_pte_t *pte_follow; if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte)) return 0; - __flush_dcache_area(kvm_pte_follow(pte, mm_ops), kvm_granule_size(level)); + pte_follow = kvm_pte_follow(pte, mm_ops); + __flush_dcache_area((unsigned long)pte_follow, + (unsigned long)pte_follow + + kvm_granule_size(level)); return 0; } diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index fff883f691f2..b2880aeba7ca 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -99,16 +99,15 @@ alternative_else_nop_endif SYM_FUNC_END(invalidate_icache_range) /* - * __flush_dcache_area(kaddr, size) + * __flush_dcache_area(start, end) * - * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) + * Ensure that any D-cache lines for the interval [start, end) * are cleaned and invalidated to the PoC. * - * - kaddr - kernel address - * - size - size in question + * - start - virtual start address of region + * - end - virtual end address of region */ SYM_FUNC_START_PI(__flush_dcache_area) - add x1, x0, x1 dcache_by_line_op civac, sy, x0, x1, x2, x3 ret SYM_FUNC_END_PI(__flush_dcache_area) -- cgit v1.2.3-59-g8ed1b From 1f42faf1d25de2ae239f322fda8af1c92c20e953 Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 24 May 2021 09:29:56 +0100 Subject: arm64: __clean_dcache_area_poc to take end parameter instead of size To be consistent with other functions with similar names and functionality in cacheflush.h, cache.S, and cachetlb.rst, change to specify the range in terms of start and end, as opposed to start and size. Because the code is shared with __dma_clean_area, it changes the parameters for that as well. However, __dma_clean_area is local to cache.S, so no other users are affected. No functional change intended. Reported-by: Will Deacon Acked-by: Mark Rutland Signed-off-by: Fuad Tabba Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20210524083001.2586635-14-tabba@google.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/cacheflush.h | 2 +- arch/arm64/kernel/efi-entry.S | 5 +++-- arch/arm64/mm/cache.S | 16 +++++++--------- 3 files changed, 11 insertions(+), 12 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 695f88864784..3255878d6f30 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -60,7 +60,7 @@ extern void __flush_icache_range(unsigned long start, unsigned long end); extern void invalidate_icache_range(unsigned long start, unsigned long end); extern void __flush_dcache_area(unsigned long start, unsigned long end); extern void __inval_dcache_area(unsigned long start, unsigned long end); -extern void __clean_dcache_area_poc(void *addr, size_t len); +extern void __clean_dcache_area_poc(unsigned long start, unsigned long end); extern void __clean_dcache_area_pop(void *addr, size_t len); extern void __clean_dcache_area_pou(void *addr, size_t len); extern long __flush_cache_user_range(unsigned long start, unsigned long end); diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S index 0073b24b5d25..b0f728fb61f0 100644 --- a/arch/arm64/kernel/efi-entry.S +++ b/arch/arm64/kernel/efi-entry.S @@ -28,6 +28,7 @@ SYM_CODE_START(efi_enter_kernel) * stale icache entries from before relocation. */ ldr w1, =kernel_size + add x1, x0, x1 bl __clean_dcache_area_poc ic ialluis @@ -36,7 +37,7 @@ SYM_CODE_START(efi_enter_kernel) * so that we can safely disable the MMU and caches. */ adr x0, 0f - ldr w1, 3f + adr x1, 3f bl __clean_dcache_area_poc 0: /* Turn off Dcache and MMU */ @@ -64,5 +65,5 @@ SYM_CODE_START(efi_enter_kernel) mov x2, xzr mov x3, xzr br x19 +3: SYM_CODE_END(efi_enter_kernel) -3: .long . - 0b diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index b2880aeba7ca..e2e2740c55ce 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -171,24 +171,23 @@ SYM_FUNC_END_PI(__inval_dcache_area) SYM_FUNC_END(__dma_inv_area) /* - * __clean_dcache_area_poc(kaddr, size) + * __clean_dcache_area_poc(start, end) * - * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) + * Ensure that any D-cache lines for the interval [start, end) * are cleaned to the PoC. * - * - kaddr - kernel address - * - size - size in question + * - start - virtual start address of region + * - end - virtual end address of region */ SYM_FUNC_START_LOCAL(__dma_clean_area) SYM_FUNC_START_PI(__clean_dcache_area_poc) /* FALLTHROUGH */ /* - * __dma_clean_area(start, size) + * __dma_clean_area(start, end) * - start - virtual start address of region - * - size - size in question + * - end - virtual end address of region */ - add x1, x0, x1 dcache_by_line_op cvac, sy, x0, x1, x2, x3 ret SYM_FUNC_END_PI(__clean_dcache_area_poc) @@ -204,10 +203,10 @@ SYM_FUNC_END(__dma_clean_area) * - size - size in question */ SYM_FUNC_START_PI(__clean_dcache_area_pop) + add x1, x0, x1 alternative_if_not ARM64_HAS_DCPOP b __clean_dcache_area_poc alternative_else_nop_endif - add x1, x0, x1 dcache_by_line_op cvap, sy, x0, x1, x2, x3 ret SYM_FUNC_END_PI(__clean_dcache_area_pop) @@ -236,7 +235,6 @@ SYM_FUNC_START_PI(__dma_map_area) add x1, x0, x1 cmp w2, #DMA_FROM_DEVICE b.eq __dma_inv_area - sub x1, x1, x0 b __dma_clean_area SYM_FUNC_END_PI(__dma_map_area) -- cgit v1.2.3-59-g8ed1b From f749448edb9c98bece0aeec5536260a8794af24b Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 24 May 2021 09:29:57 +0100 Subject: arm64: __clean_dcache_area_pop to take end parameter instead of size To be consistent with other functions with similar names and functionality in cacheflush.h, cache.S, and cachetlb.rst, change to specify the range in terms of start and end, as opposed to start and size. No functional change intended. Reported-by: Will Deacon Acked-by: Mark Rutland Signed-off-by: Fuad Tabba Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20210524083001.2586635-15-tabba@google.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/cacheflush.h | 2 +- arch/arm64/lib/uaccess_flushcache.c | 4 ++-- arch/arm64/mm/cache.S | 9 ++++----- arch/arm64/mm/flush.c | 2 +- 4 files changed, 8 insertions(+), 9 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 3255878d6f30..fa5641868d65 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -61,7 +61,7 @@ extern void invalidate_icache_range(unsigned long start, unsigned long end); extern void __flush_dcache_area(unsigned long start, unsigned long end); extern void __inval_dcache_area(unsigned long start, unsigned long end); extern void __clean_dcache_area_poc(unsigned long start, unsigned long end); -extern void __clean_dcache_area_pop(void *addr, size_t len); +extern void __clean_dcache_area_pop(unsigned long start, unsigned long end); extern void __clean_dcache_area_pou(void *addr, size_t len); extern long __flush_cache_user_range(unsigned long start, unsigned long end); extern void sync_icache_aliases(void *kaddr, unsigned long len); diff --git a/arch/arm64/lib/uaccess_flushcache.c b/arch/arm64/lib/uaccess_flushcache.c index c83bb5a4aad2..62ea989effe8 100644 --- a/arch/arm64/lib/uaccess_flushcache.c +++ b/arch/arm64/lib/uaccess_flushcache.c @@ -15,7 +15,7 @@ void memcpy_flushcache(void *dst, const void *src, size_t cnt) * barrier to order the cache maintenance against the memcpy. */ memcpy(dst, src, cnt); - __clean_dcache_area_pop(dst, cnt); + __clean_dcache_area_pop((unsigned long)dst, (unsigned long)dst + cnt); } EXPORT_SYMBOL_GPL(memcpy_flushcache); @@ -33,6 +33,6 @@ unsigned long __copy_user_flushcache(void *to, const void __user *from, rc = raw_copy_from_user(to, from, n); /* See above */ - __clean_dcache_area_pop(to, n - rc); + __clean_dcache_area_pop((unsigned long)to, (unsigned long)to + n - rc); return rc; } diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index e2e2740c55ce..b71fcf56516b 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -194,16 +194,15 @@ SYM_FUNC_END_PI(__clean_dcache_area_poc) SYM_FUNC_END(__dma_clean_area) /* - * __clean_dcache_area_pop(kaddr, size) + * __clean_dcache_area_pop(start, end) * - * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) + * Ensure that any D-cache lines for the interval [start, end) * are cleaned to the PoP. * - * - kaddr - kernel address - * - size - size in question + * - start - virtual start address of region + * - end - virtual end address of region */ SYM_FUNC_START_PI(__clean_dcache_area_pop) - add x1, x0, x1 alternative_if_not ARM64_HAS_DCPOP b __clean_dcache_area_poc alternative_else_nop_endif diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index be650b573b2a..b2c226d93ca5 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -84,7 +84,7 @@ void arch_wb_cache_pmem(void *addr, size_t size) { /* Ensure order against any prior non-cacheable writes */ dmb(osh); - __clean_dcache_area_pop(addr, size); + __clean_dcache_area_pop((unsigned long)addr, (unsigned long)addr + size); } EXPORT_SYMBOL_GPL(arch_wb_cache_pmem); -- cgit v1.2.3-59-g8ed1b From 406d7d4e2bc76d38a6dc88733a0f72fabf02d305 Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 24 May 2021 09:29:58 +0100 Subject: arm64: __clean_dcache_area_pou to take end parameter instead of size To be consistent with other functions with similar names and functionality in cacheflush.h, cache.S, and cachetlb.rst, change to specify the range in terms of start and end, as opposed to start and size. No functional change intended. Reported-by: Will Deacon Acked-by: Mark Rutland Signed-off-by: Fuad Tabba Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20210524083001.2586635-16-tabba@google.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/cacheflush.h | 2 +- arch/arm64/mm/cache.S | 9 ++++----- arch/arm64/mm/flush.c | 2 +- 3 files changed, 6 insertions(+), 7 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index fa5641868d65..f86723047315 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -62,7 +62,7 @@ extern void __flush_dcache_area(unsigned long start, unsigned long end); extern void __inval_dcache_area(unsigned long start, unsigned long end); extern void __clean_dcache_area_poc(unsigned long start, unsigned long end); extern void __clean_dcache_area_pop(unsigned long start, unsigned long end); -extern void __clean_dcache_area_pou(void *addr, size_t len); +extern void __clean_dcache_area_pou(unsigned long start, unsigned long end); extern long __flush_cache_user_range(unsigned long start, unsigned long end); extern void sync_icache_aliases(void *kaddr, unsigned long len); diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index b71fcf56516b..ea605d94182f 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -113,20 +113,19 @@ SYM_FUNC_START_PI(__flush_dcache_area) SYM_FUNC_END_PI(__flush_dcache_area) /* - * __clean_dcache_area_pou(kaddr, size) + * __clean_dcache_area_pou(start, end) * - * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) + * Ensure that any D-cache lines for the interval [start, end) * are cleaned to the PoU. * - * - kaddr - kernel address - * - size - size in question + * - start - virtual start address of region + * - end - virtual end address of region */ SYM_FUNC_START(__clean_dcache_area_pou) alternative_if ARM64_HAS_CACHE_IDC dsb ishst ret alternative_else_nop_endif - add x1, x0, x1 dcache_by_line_op cvau, ish, x0, x1, x2, x3 ret SYM_FUNC_END(__clean_dcache_area_pou) diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index b2c226d93ca5..0341bcc6fdf3 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -19,7 +19,7 @@ void sync_icache_aliases(void *kaddr, unsigned long len) unsigned long addr = (unsigned long)kaddr; if (icache_is_aliasing()) { - __clean_dcache_area_pou(kaddr, len); + __clean_dcache_area_pou(kaddr, kaddr + len); __flush_icache_all(); } else { /* -- cgit v1.2.3-59-g8ed1b From 8c28d52ccd1d6e3a5aca8a37e465a5f8b77edbc1 Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 24 May 2021 09:29:59 +0100 Subject: arm64: sync_icache_aliases to take end parameter instead of size To be consistent with other functions with similar names and functionality in cacheflush.h, cache.S, and cachetlb.rst, change to specify the range in terms of start and end, as opposed to start and size. No functional change intended. Reported-by: Will Deacon Acked-by: Mark Rutland Signed-off-by: Fuad Tabba Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20210524083001.2586635-17-tabba@google.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/cacheflush.h | 2 +- arch/arm64/kernel/probes/uprobes.c | 2 +- arch/arm64/mm/flush.c | 21 ++++++++++----------- 3 files changed, 12 insertions(+), 13 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index f86723047315..70b389a8dea5 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -64,7 +64,7 @@ extern void __clean_dcache_area_poc(unsigned long start, unsigned long end); extern void __clean_dcache_area_pop(unsigned long start, unsigned long end); extern void __clean_dcache_area_pou(unsigned long start, unsigned long end); extern long __flush_cache_user_range(unsigned long start, unsigned long end); -extern void sync_icache_aliases(void *kaddr, unsigned long len); +extern void sync_icache_aliases(unsigned long start, unsigned long end); static inline void flush_icache_range(unsigned long start, unsigned long end) { diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c index 2c247634552b..9be668f3f034 100644 --- a/arch/arm64/kernel/probes/uprobes.c +++ b/arch/arm64/kernel/probes/uprobes.c @@ -21,7 +21,7 @@ void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, memcpy(dst, src, len); /* flush caches (dcache/icache) */ - sync_icache_aliases(dst, len); + sync_icache_aliases((unsigned long)dst, (unsigned long)dst + len); kunmap_atomic(xol_page_kaddr); } diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index 0341bcc6fdf3..c4ca7e05fdb8 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -14,28 +14,25 @@ #include #include -void sync_icache_aliases(void *kaddr, unsigned long len) +void sync_icache_aliases(unsigned long start, unsigned long end) { - unsigned long addr = (unsigned long)kaddr; - if (icache_is_aliasing()) { - __clean_dcache_area_pou(kaddr, kaddr + len); + __clean_dcache_area_pou(start, end); __flush_icache_all(); } else { /* * Don't issue kick_all_cpus_sync() after I-cache invalidation * for user mappings. */ - __flush_icache_range(addr, addr + len); + __flush_icache_range(start, end); } } -static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, - unsigned long uaddr, void *kaddr, - unsigned long len) +static void flush_ptrace_access(struct vm_area_struct *vma, unsigned long start, + unsigned long end) { if (vma->vm_flags & VM_EXEC) - sync_icache_aliases(kaddr, len); + sync_icache_aliases(start, end); } /* @@ -48,7 +45,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long len) { memcpy(dst, src, len); - flush_ptrace_access(vma, page, uaddr, dst, len); + flush_ptrace_access(vma, (unsigned long)dst, (unsigned long)dst + len); } void __sync_icache_dcache(pte_t pte) @@ -56,7 +53,9 @@ void __sync_icache_dcache(pte_t pte) struct page *page = pte_page(pte); if (!test_bit(PG_dcache_clean, &page->flags)) { - sync_icache_aliases(page_address(page), page_size(page)); + sync_icache_aliases((unsigned long)page_address(page), + (unsigned long)page_address(page) + + page_size(page)); set_bit(PG_dcache_clean, &page->flags); } } -- cgit v1.2.3-59-g8ed1b From fade9c2c6ee2baea7df8e6059b3f143c681e5ce4 Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 24 May 2021 09:30:01 +0100 Subject: arm64: Rename arm64-internal cache maintenance functions Although naming across the codebase isn't that consistent, it tends to follow certain patterns. Moreover, the term "flush" isn't defined in the Arm Architecture reference manual, and might be interpreted to mean clean, invalidate, or both for a cache. Rename arm64-internal functions to make the naming internally consistent, as well as making it consistent with the Arm ARM, by specifying whether it applies to the instruction, data, or both caches, whether the operation is a clean, invalidate, or both. Also specify which point the operation applies to, i.e., to the point of unification (PoU), coherency (PoC), or persistence (PoP). This commit applies the following sed transformation to all files under arch/arm64: "s/\b__flush_cache_range\b/caches_clean_inval_pou_macro/g;"\ "s/\b__flush_icache_range\b/caches_clean_inval_pou/g;"\ "s/\binvalidate_icache_range\b/icache_inval_pou/g;"\ "s/\b__flush_dcache_area\b/dcache_clean_inval_poc/g;"\ "s/\b__inval_dcache_area\b/dcache_inval_poc/g;"\ "s/__clean_dcache_area_poc\b/dcache_clean_poc/g;"\ "s/\b__clean_dcache_area_pop\b/dcache_clean_pop/g;"\ "s/\b__clean_dcache_area_pou\b/dcache_clean_pou/g;"\ "s/\b__flush_cache_user_range\b/caches_clean_inval_user_pou/g;"\ "s/\b__flush_icache_all\b/icache_inval_all_pou/g;" Note that __clean_dcache_area_poc is deliberately missing a word boundary check at the beginning in order to match the efistub symbols in image-vars.h. Also note that, despite its name, __flush_icache_range operates on both instruction and data caches. The name change here reflects that. No functional change intended. Acked-by: Mark Rutland Signed-off-by: Fuad Tabba Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20210524083001.2586635-19-tabba@google.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/arch_gicv3.h | 2 +- arch/arm64/include/asm/cacheflush.h | 36 +++++++++++------------ arch/arm64/include/asm/efi.h | 2 +- arch/arm64/include/asm/kvm_mmu.h | 6 ++-- arch/arm64/kernel/alternative.c | 2 +- arch/arm64/kernel/efi-entry.S | 4 +-- arch/arm64/kernel/head.S | 8 ++--- arch/arm64/kernel/hibernate-asm.S | 4 +-- arch/arm64/kernel/hibernate.c | 12 ++++---- arch/arm64/kernel/idreg-override.c | 2 +- arch/arm64/kernel/image-vars.h | 2 +- arch/arm64/kernel/insn.c | 2 +- arch/arm64/kernel/kaslr.c | 6 ++-- arch/arm64/kernel/machine_kexec.c | 10 +++---- arch/arm64/kernel/smp.c | 4 +-- arch/arm64/kernel/smp_spin_table.c | 4 +-- arch/arm64/kernel/sys_compat.c | 2 +- arch/arm64/kvm/arm.c | 2 +- arch/arm64/kvm/hyp/nvhe/cache.S | 4 +-- arch/arm64/kvm/hyp/nvhe/setup.c | 2 +- arch/arm64/kvm/hyp/nvhe/tlb.c | 2 +- arch/arm64/kvm/hyp/pgtable.c | 4 +-- arch/arm64/lib/uaccess_flushcache.c | 4 +-- arch/arm64/mm/cache.S | 58 ++++++++++++++++++------------------- arch/arm64/mm/flush.c | 12 ++++---- 25 files changed, 98 insertions(+), 98 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index ed1cc9d8e6df..4ad22c3135db 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -125,7 +125,7 @@ static inline u32 gic_read_rpr(void) #define gic_write_lpir(v, c) writeq_relaxed(v, c) #define gic_flush_dcache_to_poc(a,l) \ - __flush_dcache_area((unsigned long)(a), (unsigned long)(a)+(l)) + dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l)) #define gits_read_baser(c) readq_relaxed(c) #define gits_write_baser(v, c) writeq_relaxed(v, c) diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 26617df1fa45..543c997eb3b7 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -34,54 +34,54 @@ * - start - virtual start address (inclusive) * - end - virtual end address (exclusive) * - * __flush_icache_range(start, end) + * caches_clean_inval_pou(start, end) * * Ensure coherency between the I-cache and the D-cache region to * the Point of Unification. * - * __flush_cache_user_range(start, end) + * caches_clean_inval_user_pou(start, end) * * Ensure coherency between the I-cache and the D-cache region to * the Point of Unification. * Use only if the region might access user memory. * - * invalidate_icache_range(start, end) + * icache_inval_pou(start, end) * * Invalidate I-cache region to the Point of Unification. * - * __flush_dcache_area(start, end) + * dcache_clean_inval_poc(start, end) * * Clean and invalidate D-cache region to the Point of Coherency. * - * __inval_dcache_area(start, end) + * dcache_inval_poc(start, end) * * Invalidate D-cache region to the Point of Coherency. * - * __clean_dcache_area_poc(start, end) + * dcache_clean_poc(start, end) * * Clean D-cache region to the Point of Coherency. * - * __clean_dcache_area_pop(start, end) + * dcache_clean_pop(start, end) * * Clean D-cache region to the Point of Persistence. * - * __clean_dcache_area_pou(start, end) + * dcache_clean_pou(start, end) * * Clean D-cache region to the Point of Unification. */ -extern void __flush_icache_range(unsigned long start, unsigned long end); -extern void invalidate_icache_range(unsigned long start, unsigned long end); -extern void __flush_dcache_area(unsigned long start, unsigned long end); -extern void __inval_dcache_area(unsigned long start, unsigned long end); -extern void __clean_dcache_area_poc(unsigned long start, unsigned long end); -extern void __clean_dcache_area_pop(unsigned long start, unsigned long end); -extern void __clean_dcache_area_pou(unsigned long start, unsigned long end); -extern long __flush_cache_user_range(unsigned long start, unsigned long end); +extern void caches_clean_inval_pou(unsigned long start, unsigned long end); +extern void icache_inval_pou(unsigned long start, unsigned long end); +extern void dcache_clean_inval_poc(unsigned long start, unsigned long end); +extern void dcache_inval_poc(unsigned long start, unsigned long end); +extern void dcache_clean_poc(unsigned long start, unsigned long end); +extern void dcache_clean_pop(unsigned long start, unsigned long end); +extern void dcache_clean_pou(unsigned long start, unsigned long end); +extern long caches_clean_inval_user_pou(unsigned long start, unsigned long end); extern void sync_icache_aliases(unsigned long start, unsigned long end); static inline void flush_icache_range(unsigned long start, unsigned long end) { - __flush_icache_range(start, end); + caches_clean_inval_pou(start, end); /* * IPI all online CPUs so that they undergo a context synchronization @@ -135,7 +135,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *, #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 extern void flush_dcache_page(struct page *); -static __always_inline void __flush_icache_all(void) +static __always_inline void icache_inval_all_pou(void) { if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) return; diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 0ae2397076fd..1bed37eb013a 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -137,7 +137,7 @@ void efi_virtmap_unload(void); static inline void efi_capsule_flush_cache_range(void *addr, int size) { - __flush_dcache_area((unsigned long)addr, (unsigned long)addr + size); + dcache_clean_inval_poc((unsigned long)addr, (unsigned long)addr + size); } #endif /* _ASM_EFI_H */ diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 33293d5855af..f4cbfa9025a8 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -181,7 +181,7 @@ static inline void *__kvm_vector_slot2addr(void *base, struct kvm; #define kvm_flush_dcache_to_poc(a,l) \ - __flush_dcache_area((unsigned long)(a), (unsigned long)(a)+(l)) + dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l)) static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) { @@ -209,12 +209,12 @@ static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn, { if (icache_is_aliasing()) { /* any kind of VIPT cache */ - __flush_icache_all(); + icache_inval_all_pou(); } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) { /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */ void *va = page_address(pfn_to_page(pfn)); - invalidate_icache_range((unsigned long)va, + icache_inval_pou((unsigned long)va, (unsigned long)va + size); } } diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index c906d20c7b52..3fb79b76e9d9 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -181,7 +181,7 @@ static void __nocfi __apply_alternatives(struct alt_region *region, bool is_modu */ if (!is_module) { dsb(ish); - __flush_icache_all(); + icache_inval_all_pou(); isb(); /* Ignore ARM64_CB bit from feature mask */ diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S index b0f728fb61f0..61a87fa1c305 100644 --- a/arch/arm64/kernel/efi-entry.S +++ b/arch/arm64/kernel/efi-entry.S @@ -29,7 +29,7 @@ SYM_CODE_START(efi_enter_kernel) */ ldr w1, =kernel_size add x1, x0, x1 - bl __clean_dcache_area_poc + bl dcache_clean_poc ic ialluis /* @@ -38,7 +38,7 @@ SYM_CODE_START(efi_enter_kernel) */ adr x0, 0f adr x1, 3f - bl __clean_dcache_area_poc + bl dcache_clean_poc 0: /* Turn off Dcache and MMU */ mrs x0, CurrentEL diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 8df0ac8d9123..6928cb67d3a0 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -118,7 +118,7 @@ SYM_CODE_START_LOCAL(preserve_boot_args) // MMU off add x1, x0, #0x20 // 4 x 8 bytes - b __inval_dcache_area // tail call + b dcache_inval_poc // tail call SYM_CODE_END(preserve_boot_args) /* @@ -268,7 +268,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables) */ adrp x0, init_pg_dir adrp x1, init_pg_end - bl __inval_dcache_area + bl dcache_inval_poc /* * Clear the init page tables. @@ -381,11 +381,11 @@ SYM_FUNC_START_LOCAL(__create_page_tables) adrp x0, idmap_pg_dir adrp x1, idmap_pg_end - bl __inval_dcache_area + bl dcache_inval_poc adrp x0, init_pg_dir adrp x1, init_pg_end - bl __inval_dcache_area + bl dcache_inval_poc ret x28 SYM_FUNC_END(__create_page_tables) diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S index ef2ab7caf815..81c0186a5e32 100644 --- a/arch/arm64/kernel/hibernate-asm.S +++ b/arch/arm64/kernel/hibernate-asm.S @@ -45,7 +45,7 @@ * Because this code has to be copied to a 'safe' page, it can't call out to * other functions by PC-relative address. Also remember that it may be * mid-way through over-writing other functions. For this reason it contains - * code from __flush_icache_range() and uses the copy_page() macro. + * code from caches_clean_inval_pou() and uses the copy_page() macro. * * This 'safe' page is mapped via ttbr0, and executed from there. This function * switches to a copy of the linear map in ttbr1, performs the restore, then @@ -87,7 +87,7 @@ SYM_CODE_START(swsusp_arch_suspend_exit) copy_page x0, x1, x2, x3, x4, x5, x6, x7, x8, x9 add x1, x10, #PAGE_SIZE - /* Clean the copied page to PoU - based on __flush_icache_range() */ + /* Clean the copied page to PoU - based on caches_clean_inval_pou() */ raw_dcache_line_size x2, x3 sub x3, x2, #1 bic x4, x10, x3 diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index b40ddce71507..46a0b4d6e251 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -210,7 +210,7 @@ static int create_safe_exec_page(void *src_start, size_t length, return -ENOMEM; memcpy(page, src_start, length); - __flush_icache_range((unsigned long)page, (unsigned long)page + length); + caches_clean_inval_pou((unsigned long)page, (unsigned long)page + length); rc = trans_pgd_idmap_page(&trans_info, &trans_ttbr0, &t0sz, page); if (rc) return rc; @@ -381,17 +381,17 @@ int swsusp_arch_suspend(void) ret = swsusp_save(); } else { /* Clean kernel core startup/idle code to PoC*/ - __flush_dcache_area((unsigned long)__mmuoff_data_start, + dcache_clean_inval_poc((unsigned long)__mmuoff_data_start, (unsigned long)__mmuoff_data_end); - __flush_dcache_area((unsigned long)__idmap_text_start, + dcache_clean_inval_poc((unsigned long)__idmap_text_start, (unsigned long)__idmap_text_end); /* Clean kvm setup code to PoC? */ if (el2_reset_needed()) { - __flush_dcache_area( + dcache_clean_inval_poc( (unsigned long)__hyp_idmap_text_start, (unsigned long)__hyp_idmap_text_end); - __flush_dcache_area((unsigned long)__hyp_text_start, + dcache_clean_inval_poc((unsigned long)__hyp_text_start, (unsigned long)__hyp_text_end); } @@ -477,7 +477,7 @@ int swsusp_arch_resume(void) * The hibernate exit text contains a set of el2 vectors, that will * be executed at el2 with the mmu off in order to reload hyp-stub. */ - __flush_dcache_area((unsigned long)hibernate_exit, + dcache_clean_inval_poc((unsigned long)hibernate_exit, (unsigned long)hibernate_exit + exit_size); /* diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c index 3dd515baf526..53a381a7f65d 100644 --- a/arch/arm64/kernel/idreg-override.c +++ b/arch/arm64/kernel/idreg-override.c @@ -237,7 +237,7 @@ asmlinkage void __init init_feature_override(void) for (i = 0; i < ARRAY_SIZE(regs); i++) { if (regs[i]->override) - __flush_dcache_area((unsigned long)regs[i]->override, + dcache_clean_inval_poc((unsigned long)regs[i]->override, (unsigned long)regs[i]->override + sizeof(*regs[i]->override)); } diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index bcf3c2755370..c96a9a0043bf 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -35,7 +35,7 @@ __efistub_strnlen = __pi_strnlen; __efistub_strcmp = __pi_strcmp; __efistub_strncmp = __pi_strncmp; __efistub_strrchr = __pi_strrchr; -__efistub___clean_dcache_area_poc = __pi___clean_dcache_area_poc; +__efistub_dcache_clean_poc = __pi_dcache_clean_poc; #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) __efistub___memcpy = __pi_memcpy; diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 6c0de2f60ea9..51cb8dc98d00 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -198,7 +198,7 @@ int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn) ret = aarch64_insn_write(tp, insn); if (ret == 0) - __flush_icache_range((uintptr_t)tp, + caches_clean_inval_pou((uintptr_t)tp, (uintptr_t)tp + AARCH64_INSN_SIZE); return ret; diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index 49cccd03cb37..cfa2cfde3019 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -72,7 +72,7 @@ u64 __init kaslr_early_init(void) * we end up running with module randomization disabled. */ module_alloc_base = (u64)_etext - MODULES_VSIZE; - __flush_dcache_area((unsigned long)&module_alloc_base, + dcache_clean_inval_poc((unsigned long)&module_alloc_base, (unsigned long)&module_alloc_base + sizeof(module_alloc_base)); @@ -172,10 +172,10 @@ u64 __init kaslr_early_init(void) module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; module_alloc_base &= PAGE_MASK; - __flush_dcache_area((unsigned long)&module_alloc_base, + dcache_clean_inval_poc((unsigned long)&module_alloc_base, (unsigned long)&module_alloc_base + sizeof(module_alloc_base)); - __flush_dcache_area((unsigned long)&memstart_offset_seed, + dcache_clean_inval_poc((unsigned long)&memstart_offset_seed, (unsigned long)&memstart_offset_seed + sizeof(memstart_offset_seed)); diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index 3e79110c8f3a..03ceabe4d912 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -72,10 +72,10 @@ int machine_kexec_post_load(struct kimage *kimage) * For execution with the MMU off, reloc_code needs to be cleaned to the * PoC and invalidated from the I-cache. */ - __flush_dcache_area((unsigned long)reloc_code, + dcache_clean_inval_poc((unsigned long)reloc_code, (unsigned long)reloc_code + arm64_relocate_new_kernel_size); - invalidate_icache_range((uintptr_t)reloc_code, + icache_inval_pou((uintptr_t)reloc_code, (uintptr_t)reloc_code + arm64_relocate_new_kernel_size); @@ -111,7 +111,7 @@ static void kexec_list_flush(struct kimage *kimage) unsigned long addr; /* flush the list entries. */ - __flush_dcache_area((unsigned long)entry, + dcache_clean_inval_poc((unsigned long)entry, (unsigned long)entry + sizeof(kimage_entry_t)); @@ -128,7 +128,7 @@ static void kexec_list_flush(struct kimage *kimage) break; case IND_SOURCE: /* flush the source pages. */ - __flush_dcache_area(addr, addr + PAGE_SIZE); + dcache_clean_inval_poc(addr, addr + PAGE_SIZE); break; case IND_DESTINATION: break; @@ -155,7 +155,7 @@ static void kexec_segment_flush(const struct kimage *kimage) kimage->segment[i].memsz, kimage->segment[i].memsz / PAGE_SIZE); - __flush_dcache_area( + dcache_clean_inval_poc( (unsigned long)phys_to_virt(kimage->segment[i].mem), (unsigned long)phys_to_virt(kimage->segment[i].mem) + kimage->segment[i].memsz); diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 5fcdee331087..9b4c1118194d 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -122,7 +122,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) secondary_data.task = idle; secondary_data.stack = task_stack_page(idle) + THREAD_SIZE; update_cpu_boot_status(CPU_MMU_OFF); - __flush_dcache_area((unsigned long)&secondary_data, + dcache_clean_inval_poc((unsigned long)&secondary_data, (unsigned long)&secondary_data + sizeof(secondary_data)); @@ -145,7 +145,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) pr_crit("CPU%u: failed to come online\n", cpu); secondary_data.task = NULL; secondary_data.stack = NULL; - __flush_dcache_area((unsigned long)&secondary_data, + dcache_clean_inval_poc((unsigned long)&secondary_data, (unsigned long)&secondary_data + sizeof(secondary_data)); status = READ_ONCE(secondary_data.status); diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c index 58d804582a35..7e1624ecab3c 100644 --- a/arch/arm64/kernel/smp_spin_table.c +++ b/arch/arm64/kernel/smp_spin_table.c @@ -36,7 +36,7 @@ static void write_pen_release(u64 val) unsigned long size = sizeof(secondary_holding_pen_release); secondary_holding_pen_release = val; - __flush_dcache_area((unsigned long)start, (unsigned long)start + size); + dcache_clean_inval_poc((unsigned long)start, (unsigned long)start + size); } @@ -90,7 +90,7 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu) * the boot protocol. */ writeq_relaxed(pa_holding_pen, release_addr); - __flush_dcache_area((__force unsigned long)release_addr, + dcache_clean_inval_poc((__force unsigned long)release_addr, (__force unsigned long)release_addr + sizeof(*release_addr)); diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c index 265fe3eb1069..db5159a3055f 100644 --- a/arch/arm64/kernel/sys_compat.c +++ b/arch/arm64/kernel/sys_compat.c @@ -41,7 +41,7 @@ __do_compat_cache_op(unsigned long start, unsigned long end) dsb(ish); } - ret = __flush_cache_user_range(start, start + chunk); + ret = caches_clean_inval_user_pou(start, start + chunk); if (ret) return ret; diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 1cb39c0803a4..c1953f65ca0e 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1064,7 +1064,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) stage2_unmap_vm(vcpu->kvm); else - __flush_icache_all(); + icache_inval_all_pou(); } vcpu_reset_hcr(vcpu); diff --git a/arch/arm64/kvm/hyp/nvhe/cache.S b/arch/arm64/kvm/hyp/nvhe/cache.S index 36cef6915428..958734f4d6b0 100644 --- a/arch/arm64/kvm/hyp/nvhe/cache.S +++ b/arch/arm64/kvm/hyp/nvhe/cache.S @@ -7,7 +7,7 @@ #include #include -SYM_FUNC_START_PI(__flush_dcache_area) +SYM_FUNC_START_PI(dcache_clean_inval_poc) dcache_by_line_op civac, sy, x0, x1, x2, x3 ret -SYM_FUNC_END_PI(__flush_dcache_area) +SYM_FUNC_END_PI(dcache_clean_inval_poc) diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c index 5dffe928f256..8143ebd4fb72 100644 --- a/arch/arm64/kvm/hyp/nvhe/setup.c +++ b/arch/arm64/kvm/hyp/nvhe/setup.c @@ -134,7 +134,7 @@ static void update_nvhe_init_params(void) for (i = 0; i < hyp_nr_cpus; i++) { params = per_cpu_ptr(&kvm_init_params, i); params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd); - __flush_dcache_area((unsigned long)params, + dcache_clean_inval_poc((unsigned long)params, (unsigned long)params + sizeof(*params)); } } diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c index 83dc3b271bc5..38ed0f6f2703 100644 --- a/arch/arm64/kvm/hyp/nvhe/tlb.c +++ b/arch/arm64/kvm/hyp/nvhe/tlb.c @@ -104,7 +104,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, * you should be running with VHE enabled. */ if (icache_is_vpipt()) - __flush_icache_all(); + icache_inval_all_pou(); __tlb_switch_to_host(&cxt); } diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index 10d2f04013d4..e9ad7fb28ee3 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -841,7 +841,7 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, if (need_flush) { kvm_pte_t *pte_follow = kvm_pte_follow(pte, mm_ops); - __flush_dcache_area((unsigned long)pte_follow, + dcache_clean_inval_poc((unsigned long)pte_follow, (unsigned long)pte_follow + kvm_granule_size(level)); } @@ -997,7 +997,7 @@ static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, return 0; pte_follow = kvm_pte_follow(pte, mm_ops); - __flush_dcache_area((unsigned long)pte_follow, + dcache_clean_inval_poc((unsigned long)pte_follow, (unsigned long)pte_follow + kvm_granule_size(level)); return 0; diff --git a/arch/arm64/lib/uaccess_flushcache.c b/arch/arm64/lib/uaccess_flushcache.c index 62ea989effe8..baee22961bdb 100644 --- a/arch/arm64/lib/uaccess_flushcache.c +++ b/arch/arm64/lib/uaccess_flushcache.c @@ -15,7 +15,7 @@ void memcpy_flushcache(void *dst, const void *src, size_t cnt) * barrier to order the cache maintenance against the memcpy. */ memcpy(dst, src, cnt); - __clean_dcache_area_pop((unsigned long)dst, (unsigned long)dst + cnt); + dcache_clean_pop((unsigned long)dst, (unsigned long)dst + cnt); } EXPORT_SYMBOL_GPL(memcpy_flushcache); @@ -33,6 +33,6 @@ unsigned long __copy_user_flushcache(void *to, const void __user *from, rc = raw_copy_from_user(to, from, n); /* See above */ - __clean_dcache_area_pop((unsigned long)to, (unsigned long)to + n - rc); + dcache_clean_pop((unsigned long)to, (unsigned long)to + n - rc); return rc; } diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index ea605d94182f..5051b3c1a4f1 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -15,7 +15,7 @@ #include /* - * __flush_cache_range(start,end) [fixup] + * caches_clean_inval_pou_macro(start,end) [fixup] * * Ensure that the I and D caches are coherent within specified region. * This is typically used when code has been written to a memory region, @@ -25,7 +25,7 @@ * - end - virtual end address of region * - fixup - optional label to branch to on user fault */ -.macro __flush_cache_range, fixup +.macro caches_clean_inval_pou_macro, fixup alternative_if ARM64_HAS_CACHE_IDC dsb ishst b .Ldc_skip_\@ @@ -43,7 +43,7 @@ alternative_else_nop_endif .endm /* - * __flush_icache_range(start,end) + * caches_clean_inval_pou(start,end) * * Ensure that the I and D caches are coherent within specified region. * This is typically used when code has been written to a memory region, @@ -52,13 +52,13 @@ alternative_else_nop_endif * - start - virtual start address of region * - end - virtual end address of region */ -SYM_FUNC_START(__flush_icache_range) - __flush_cache_range +SYM_FUNC_START(caches_clean_inval_pou) + caches_clean_inval_pou_macro ret -SYM_FUNC_END(__flush_icache_range) +SYM_FUNC_END(caches_clean_inval_pou) /* - * __flush_cache_user_range(start,end) + * caches_clean_inval_user_pou(start,end) * * Ensure that the I and D caches are coherent within specified region. * This is typically used when code has been written to a memory region, @@ -67,10 +67,10 @@ SYM_FUNC_END(__flush_icache_range) * - start - virtual start address of region * - end - virtual end address of region */ -SYM_FUNC_START(__flush_cache_user_range) +SYM_FUNC_START(caches_clean_inval_user_pou) uaccess_ttbr0_enable x2, x3, x4 - __flush_cache_range 2f + caches_clean_inval_pou_macro 2f mov x0, xzr 1: uaccess_ttbr0_disable x1, x2 @@ -78,17 +78,17 @@ SYM_FUNC_START(__flush_cache_user_range) 2: mov x0, #-EFAULT b 1b -SYM_FUNC_END(__flush_cache_user_range) +SYM_FUNC_END(caches_clean_inval_user_pou) /* - * invalidate_icache_range(start,end) + * icache_inval_pou(start,end) * * Ensure that the I cache is invalid within specified region. * * - start - virtual start address of region * - end - virtual end address of region */ -SYM_FUNC_START(invalidate_icache_range) +SYM_FUNC_START(icache_inval_pou) alternative_if ARM64_HAS_CACHE_DIC isb ret @@ -96,10 +96,10 @@ alternative_else_nop_endif invalidate_icache_by_line x0, x1, x2, x3 ret -SYM_FUNC_END(invalidate_icache_range) +SYM_FUNC_END(icache_inval_pou) /* - * __flush_dcache_area(start, end) + * dcache_clean_inval_poc(start, end) * * Ensure that any D-cache lines for the interval [start, end) * are cleaned and invalidated to the PoC. @@ -107,13 +107,13 @@ SYM_FUNC_END(invalidate_icache_range) * - start - virtual start address of region * - end - virtual end address of region */ -SYM_FUNC_START_PI(__flush_dcache_area) +SYM_FUNC_START_PI(dcache_clean_inval_poc) dcache_by_line_op civac, sy, x0, x1, x2, x3 ret -SYM_FUNC_END_PI(__flush_dcache_area) +SYM_FUNC_END_PI(dcache_clean_inval_poc) /* - * __clean_dcache_area_pou(start, end) + * dcache_clean_pou(start, end) * * Ensure that any D-cache lines for the interval [start, end) * are cleaned to the PoU. @@ -121,17 +121,17 @@ SYM_FUNC_END_PI(__flush_dcache_area) * - start - virtual start address of region * - end - virtual end address of region */ -SYM_FUNC_START(__clean_dcache_area_pou) +SYM_FUNC_START(dcache_clean_pou) alternative_if ARM64_HAS_CACHE_IDC dsb ishst ret alternative_else_nop_endif dcache_by_line_op cvau, ish, x0, x1, x2, x3 ret -SYM_FUNC_END(__clean_dcache_area_pou) +SYM_FUNC_END(dcache_clean_pou) /* - * __inval_dcache_area(start, end) + * dcache_inval_poc(start, end) * * Ensure that any D-cache lines for the interval [start, end) * are invalidated. Any partial lines at the ends of the interval are @@ -141,7 +141,7 @@ SYM_FUNC_END(__clean_dcache_area_pou) * - end - kernel end address of region */ SYM_FUNC_START_LOCAL(__dma_inv_area) -SYM_FUNC_START_PI(__inval_dcache_area) +SYM_FUNC_START_PI(dcache_inval_poc) /* FALLTHROUGH */ /* @@ -166,11 +166,11 @@ SYM_FUNC_START_PI(__inval_dcache_area) b.lo 2b dsb sy ret -SYM_FUNC_END_PI(__inval_dcache_area) +SYM_FUNC_END_PI(dcache_inval_poc) SYM_FUNC_END(__dma_inv_area) /* - * __clean_dcache_area_poc(start, end) + * dcache_clean_poc(start, end) * * Ensure that any D-cache lines for the interval [start, end) * are cleaned to the PoC. @@ -179,7 +179,7 @@ SYM_FUNC_END(__dma_inv_area) * - end - virtual end address of region */ SYM_FUNC_START_LOCAL(__dma_clean_area) -SYM_FUNC_START_PI(__clean_dcache_area_poc) +SYM_FUNC_START_PI(dcache_clean_poc) /* FALLTHROUGH */ /* @@ -189,11 +189,11 @@ SYM_FUNC_START_PI(__clean_dcache_area_poc) */ dcache_by_line_op cvac, sy, x0, x1, x2, x3 ret -SYM_FUNC_END_PI(__clean_dcache_area_poc) +SYM_FUNC_END_PI(dcache_clean_poc) SYM_FUNC_END(__dma_clean_area) /* - * __clean_dcache_area_pop(start, end) + * dcache_clean_pop(start, end) * * Ensure that any D-cache lines for the interval [start, end) * are cleaned to the PoP. @@ -201,13 +201,13 @@ SYM_FUNC_END(__dma_clean_area) * - start - virtual start address of region * - end - virtual end address of region */ -SYM_FUNC_START_PI(__clean_dcache_area_pop) +SYM_FUNC_START_PI(dcache_clean_pop) alternative_if_not ARM64_HAS_DCPOP - b __clean_dcache_area_poc + b dcache_clean_poc alternative_else_nop_endif dcache_by_line_op cvap, sy, x0, x1, x2, x3 ret -SYM_FUNC_END_PI(__clean_dcache_area_pop) +SYM_FUNC_END_PI(dcache_clean_pop) /* * __dma_flush_area(start, size) diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index c4ca7e05fdb8..2aaf950b906c 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -17,14 +17,14 @@ void sync_icache_aliases(unsigned long start, unsigned long end) { if (icache_is_aliasing()) { - __clean_dcache_area_pou(start, end); - __flush_icache_all(); + dcache_clean_pou(start, end); + icache_inval_all_pou(); } else { /* * Don't issue kick_all_cpus_sync() after I-cache invalidation * for user mappings. */ - __flush_icache_range(start, end); + caches_clean_inval_pou(start, end); } } @@ -76,20 +76,20 @@ EXPORT_SYMBOL(flush_dcache_page); /* * Additional functions defined in assembly. */ -EXPORT_SYMBOL(__flush_icache_range); +EXPORT_SYMBOL(caches_clean_inval_pou); #ifdef CONFIG_ARCH_HAS_PMEM_API void arch_wb_cache_pmem(void *addr, size_t size) { /* Ensure order against any prior non-cacheable writes */ dmb(osh); - __clean_dcache_area_pop((unsigned long)addr, (unsigned long)addr + size); + dcache_clean_pop((unsigned long)addr, (unsigned long)addr + size); } EXPORT_SYMBOL_GPL(arch_wb_cache_pmem); void arch_invalidate_pmem(void *addr, size_t size) { - __inval_dcache_area((unsigned long)addr, (unsigned long)addr + size); + dcache_inval_poc((unsigned long)addr, (unsigned long)addr + size); } EXPORT_SYMBOL_GPL(arch_invalidate_pmem); #endif -- cgit v1.2.3-59-g8ed1b From e176e2677cccd458f99c69d16d27f86adcdd02e4 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 20 May 2021 12:50:27 +0100 Subject: arm64: assembler: add set_this_cpu_offset There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Ard Biesheuvel Cc: Catalin Marinas Cc: James Morse Cc: Marc Zyngier Cc: Suzuki Poulose Cc: Will Deacon Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20210520115031.18509-3-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 18 +++++++++++++----- arch/arm64/mm/proc.S | 12 ++---------- 2 files changed, 15 insertions(+), 15 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 8418c1bd8f04..f0188903557f 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -232,15 +232,23 @@ lr .req x30 // link register * @dst: destination register */ #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__) - .macro this_cpu_offset, dst + .macro get_this_cpu_offset, dst mrs \dst, tpidr_el2 .endm #else - .macro this_cpu_offset, dst + .macro get_this_cpu_offset, dst alternative_if_not ARM64_HAS_VIRT_HOST_EXTN mrs \dst, tpidr_el1 alternative_else mrs \dst, tpidr_el2 +alternative_endif + .endm + + .macro set_this_cpu_offset, src +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN + msr tpidr_el1, \src +alternative_else + msr tpidr_el2, \src alternative_endif .endm #endif @@ -253,7 +261,7 @@ alternative_endif .macro adr_this_cpu, dst, sym, tmp adrp \tmp, \sym add \dst, \tmp, #:lo12:\sym - this_cpu_offset \tmp + get_this_cpu_offset \tmp add \dst, \dst, \tmp .endm @@ -264,7 +272,7 @@ alternative_endif */ .macro ldr_this_cpu dst, sym, tmp adr_l \dst, \sym - this_cpu_offset \tmp + get_this_cpu_offset \tmp ldr \dst, [\dst, \tmp] .endm @@ -745,7 +753,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU cbz \tmp, \lbl #endif adr_l \tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING - this_cpu_offset \tmp2 + get_this_cpu_offset \tmp2 ldr w\tmp, [\tmp, \tmp2] cbnz w\tmp, \lbl // yield on pending softirq in task context .Lnoyield_\@: diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 97d7bcd8d4f2..bc555cd5e6b1 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -83,11 +83,7 @@ SYM_FUNC_START(cpu_do_suspend) mrs x9, mdscr_el1 mrs x10, oslsr_el1 mrs x11, sctlr_el1 -alternative_if_not ARM64_HAS_VIRT_HOST_EXTN - mrs x12, tpidr_el1 -alternative_else - mrs x12, tpidr_el2 -alternative_endif + get_this_cpu_offset x12 mrs x13, sp_el0 stp x2, x3, [x0] stp x4, x5, [x0, #16] @@ -145,11 +141,7 @@ SYM_FUNC_START(cpu_do_resume) msr mdscr_el1, x10 msr sctlr_el1, x12 -alternative_if_not ARM64_HAS_VIRT_HOST_EXTN - msr tpidr_el1, x13 -alternative_else - msr tpidr_el2, x13 -alternative_endif + set_this_cpu_offset x13 msr sp_el0, x14 /* * Restore oslsr_el1 by writing oslar_el1 -- cgit v1.2.3-59-g8ed1b From 5ae632ed356c2f2e42a3e7ea447e98a9e684539c Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Sat, 29 May 2021 19:15:10 +0800 Subject: arm64: mm: Use better bitmap_zalloc() Use better bitmap_zalloc() to allocate bitmap. Cc: Catalin Marinas Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Signed-off-by: Kefeng Wang Link: https://lore.kernel.org/r/20210529111510.186355-1-wangkefeng.wang@huawei.com Signed-off-by: Will Deacon --- arch/arm64/mm/context.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index 001737a8f309..cd72576ae2b7 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -402,14 +402,12 @@ static int asids_init(void) { asid_bits = get_cpu_asid_bits(); atomic64_set(&asid_generation, ASID_FIRST_VERSION); - asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map), - GFP_KERNEL); + asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL); if (!asid_map) panic("Failed to allocate bitmap for %lu ASIDs\n", NUM_USER_ASIDS); - pinned_asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), - sizeof(*pinned_asid_map), GFP_KERNEL); + pinned_asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL); nr_pinned_asids = 0; /* -- cgit v1.2.3-59-g8ed1b From 58cc6b72a2127475296502fcb4d2b5006b7f4742 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 27 May 2021 12:03:17 +0100 Subject: arm64: mm: Remove unused support for Device-GRE memory type The Device-GRE memory type is unused, so remove it and reclaim a MAIR. Cc: Christoph Hellwig Acked-by: Catalin Marinas Suggested-by: Catalin Marinas Link: https://lore.kernel.org/r/20210505180228.GA3874@arm.com Link: https://lore.kernel.org/r/20210527110319.22157-2-will@kernel.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/memory.h | 1 - arch/arm64/include/asm/sysreg.h | 1 - arch/arm64/mm/proc.S | 1 - arch/arm64/mm/ptdump.c | 4 ---- 4 files changed, 7 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 87b90dc27a43..1e025e3b655e 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -138,7 +138,6 @@ #define MT_NORMAL_WT 3 #define MT_DEVICE_nGnRnE 4 #define MT_DEVICE_nGnRE 5 -#define MT_DEVICE_GRE 6 /* * Memory types for Stage-2 translation diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 65d15700a168..baeb33cd7685 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -703,7 +703,6 @@ /* MAIR_ELx memory attributes (used by Linux) */ #define MAIR_ATTR_DEVICE_nGnRnE UL(0x00) #define MAIR_ATTR_DEVICE_nGnRE UL(0x04) -#define MAIR_ATTR_DEVICE_GRE UL(0x0c) #define MAIR_ATTR_NORMAL_NC UL(0x44) #define MAIR_ATTR_NORMAL_WT UL(0xbb) #define MAIR_ATTR_NORMAL_TAGGED UL(0xf0) diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 97d7bcd8d4f2..add026fcc88c 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -58,7 +58,6 @@ #define MAIR_EL1_SET \ (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \ MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \ - MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) | \ MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \ MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \ MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT) | \ diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c index a1937dfff31c..1c403536c9bb 100644 --- a/arch/arm64/mm/ptdump.c +++ b/arch/arm64/mm/ptdump.c @@ -157,10 +157,6 @@ static const struct prot_bits pte_bits[] = { .mask = PTE_ATTRINDX_MASK, .val = PTE_ATTRINDX(MT_DEVICE_nGnRE), .set = "DEVICE/nGnRE", - }, { - .mask = PTE_ATTRINDX_MASK, - .val = PTE_ATTRINDX(MT_DEVICE_GRE), - .set = "DEVICE/GRE", }, { .mask = PTE_ATTRINDX_MASK, .val = PTE_ATTRINDX(MT_NORMAL_NC), -- cgit v1.2.3-59-g8ed1b From 21cfe6edbadb703b674ae2ddf78862d00d24bfc5 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 27 May 2021 12:03:19 +0100 Subject: arm64: mm: Remove unused support for Normal-WT memory type The Normal-WT memory type is unused, so remove it and reclaim a MAIR. Cc: Christoph Hellwig Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20210527110319.22157-4-will@kernel.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/memory.h | 5 ++--- arch/arm64/include/asm/pgtable-prot.h | 1 - arch/arm64/include/asm/sysreg.h | 1 - arch/arm64/mm/proc.S | 1 - 4 files changed, 2 insertions(+), 6 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 1e025e3b655e..7b360960cc35 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -135,9 +135,8 @@ #define MT_NORMAL 0 #define MT_NORMAL_TAGGED 1 #define MT_NORMAL_NC 2 -#define MT_NORMAL_WT 3 -#define MT_DEVICE_nGnRnE 4 -#define MT_DEVICE_nGnRE 5 +#define MT_DEVICE_nGnRnE 3 +#define MT_DEVICE_nGnRE 4 /* * Memory types for Stage-2 translation diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 938092df76cf..7032f04c8ac6 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -55,7 +55,6 @@ extern bool arm64_use_ng_mappings; #define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE)) #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC)) -#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT)) #define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL)) #define PROT_NORMAL_TAGGED (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_TAGGED)) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index baeb33cd7685..9ea84bcddf85 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -704,7 +704,6 @@ #define MAIR_ATTR_DEVICE_nGnRnE UL(0x00) #define MAIR_ATTR_DEVICE_nGnRE UL(0x04) #define MAIR_ATTR_NORMAL_NC UL(0x44) -#define MAIR_ATTR_NORMAL_WT UL(0xbb) #define MAIR_ATTR_NORMAL_TAGGED UL(0xf0) #define MAIR_ATTR_NORMAL UL(0xff) #define MAIR_ATTR_MASK UL(0xff) diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index add026fcc88c..6e640fa9788e 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -60,7 +60,6 @@ MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \ MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \ MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \ - MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT) | \ MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED)) #ifdef CONFIG_CPU_PM -- cgit v1.2.3-59-g8ed1b From fcf9dc02f83949b3261eefe03e7bb81c59bfaa9c Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Thu, 3 Jun 2021 20:02:39 +0800 Subject: arm64: mm: Add is_el1_data_abort() helper We alread have is_el1_instruction_abort(), add is_el1_data_abort() helper and use it. Signed-off-by: Kefeng Wang Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20210603120239.169018-1-wangkefeng.wang@huawei.com Signed-off-by: Will Deacon --- arch/arm64/mm/fault.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 871c82ab0a30..5c855b2ab93b 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -232,13 +232,17 @@ static bool is_el1_instruction_abort(unsigned int esr) return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR; } +static bool is_el1_data_abort(unsigned int esr) +{ + return ESR_ELx_EC(esr) == ESR_ELx_EC_DABT_CUR; +} + static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr, struct pt_regs *regs) { - unsigned int ec = ESR_ELx_EC(esr); unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE; - if (ec != ESR_ELx_EC_DABT_CUR && ec != ESR_ELx_EC_IABT_CUR) + if (!is_el1_data_abort(esr) && !is_el1_instruction_abort(esr)) return false; if (fsc_type == ESR_ELx_FSC_PERM) @@ -258,7 +262,7 @@ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr, unsigned long flags; u64 par, dfsc; - if (ESR_ELx_EC(esr) != ESR_ELx_EC_DABT_CUR || + if (!is_el1_data_abort(esr) || (esr & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT) return false; @@ -346,10 +350,9 @@ static void do_tag_recovery(unsigned long addr, unsigned int esr, static bool is_el1_mte_sync_tag_check_fault(unsigned int esr) { - unsigned int ec = ESR_ELx_EC(esr); unsigned int fsc = esr & ESR_ELx_FSC; - if (ec != ESR_ELx_EC_DABT_CUR) + if (!is_el1_data_abort(esr)) return false; if (fsc == ESR_ELx_FSC_MTE) -- cgit v1.2.3-59-g8ed1b From 013bb59dbb7cf876449df860946458a595a96d51 Mon Sep 17 00:00:00 2001 From: Peter Collingbourne Date: Wed, 2 Jun 2021 16:52:29 -0700 Subject: arm64: mte: handle tags zeroing at page allocation time Currently, on an anonymous page fault, the kernel allocates a zeroed page and maps it in user space. If the mapping is tagged (PROT_MTE), set_pte_at() additionally clears the tags. It is, however, more efficient to clear the tags at the same time as zeroing the data on allocation. To avoid clearing the tags on any page (which may not be mapped as tagged), only do this if the vma flags contain VM_MTE. This requires introducing a new GFP flag that is used to determine whether to clear the tags. The DC GZVA instruction with a 0 top byte (and 0 tag) requires top-byte-ignore. Set the TCR_EL1.{TBI1,TBID1} bits irrespective of whether KASAN_HW is enabled. Signed-off-by: Peter Collingbourne Co-developed-by: Catalin Marinas Signed-off-by: Catalin Marinas Link: https://linux-review.googlesource.com/id/Id46dc94e30fe11474f7e54f5d65e7658dbdddb26 Reviewed-by: Catalin Marinas Reviewed-by: Andrey Konovalov Link: https://lore.kernel.org/r/20210602235230.3928842-4-pcc@google.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/mte.h | 4 ++++ arch/arm64/include/asm/page.h | 8 ++++++-- arch/arm64/lib/mte.S | 20 ++++++++++++++++++++ arch/arm64/mm/fault.c | 26 ++++++++++++++++++++++++++ arch/arm64/mm/proc.S | 10 +++++++--- include/linux/gfp.h | 9 +++++++-- include/linux/highmem.h | 8 ++++++++ mm/kasan/hw_tags.c | 9 ++++++++- mm/page_alloc.c | 13 ++++++++++--- 9 files changed, 96 insertions(+), 11 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h index bc88a1ced0d7..67bf259ae768 100644 --- a/arch/arm64/include/asm/mte.h +++ b/arch/arm64/include/asm/mte.h @@ -37,6 +37,7 @@ void mte_free_tag_storage(char *storage); /* track which pages have valid allocation tags */ #define PG_mte_tagged PG_arch_2 +void mte_zero_clear_page_tags(void *addr); void mte_sync_tags(pte_t *ptep, pte_t pte); void mte_copy_page_tags(void *kto, const void *kfrom); void mte_thread_init_user(void); @@ -53,6 +54,9 @@ int mte_ptrace_copy_tags(struct task_struct *child, long request, /* unused if !CONFIG_ARM64_MTE, silence the compiler */ #define PG_mte_tagged 0 +static inline void mte_zero_clear_page_tags(void *addr) +{ +} static inline void mte_sync_tags(pte_t *ptep, pte_t pte) { } diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index e1fc0f60e79f..ed1b9dcf12b2 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -13,6 +13,7 @@ #ifndef __ASSEMBLY__ #include /* for READ_IMPLIES_EXEC */ +#include /* for gfp_t */ #include struct page; @@ -28,10 +29,13 @@ void copy_user_highpage(struct page *to, struct page *from, void copy_highpage(struct page *to, struct page *from); #define __HAVE_ARCH_COPY_HIGHPAGE -#define alloc_zeroed_user_highpage_movable(vma, vaddr) \ - alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr) +struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, + unsigned long vaddr); #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE +void tag_clear_highpage(struct page *to); +#define __HAVE_ARCH_TAG_CLEAR_HIGHPAGE + #define clear_user_page(page, vaddr, pg) clear_page(page) #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S index 351537c12f36..e83643b3995f 100644 --- a/arch/arm64/lib/mte.S +++ b/arch/arm64/lib/mte.S @@ -36,6 +36,26 @@ SYM_FUNC_START(mte_clear_page_tags) ret SYM_FUNC_END(mte_clear_page_tags) +/* + * Zero the page and tags at the same time + * + * Parameters: + * x0 - address to the beginning of the page + */ +SYM_FUNC_START(mte_zero_clear_page_tags) + mrs x1, dczid_el0 + and w1, w1, #0xf + mov x2, #4 + lsl x1, x2, x1 + and x0, x0, #(1 << MTE_TAG_SHIFT) - 1 // clear the tag + +1: dc gzva, x0 + add x0, x0, x1 + tst x0, #(PAGE_SIZE - 1) + b.ne 1b + ret +SYM_FUNC_END(mte_zero_clear_page_tags) + /* * Copy the tags from the source page to the destination one * x0 - address of the destination page diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 871c82ab0a30..180c0343d82a 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -921,3 +921,29 @@ void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr, debug_exception_exit(regs); } NOKPROBE_SYMBOL(do_debug_exception); + +/* + * Used during anonymous page fault handling. + */ +struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, + unsigned long vaddr) +{ + gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO; + + /* + * If the page is mapped with PROT_MTE, initialise the tags at the + * point of allocation and page zeroing as this is usually faster than + * separate DC ZVA and STGM. + */ + if (vma->vm_flags & VM_MTE) + flags |= __GFP_ZEROTAGS; + + return alloc_page_vma(flags, vma, vaddr); +} + +void tag_clear_highpage(struct page *page) +{ + mte_zero_clear_page_tags(page_address(page)); + page_kasan_tag_reset(page); + set_bit(PG_mte_tagged, &page->flags); +} diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 97d7bcd8d4f2..48fd1df3d05a 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -46,9 +46,13 @@ #endif #ifdef CONFIG_KASAN_HW_TAGS -#define TCR_KASAN_HW_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 | TCR_TBID1 +#define TCR_MTE_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 | TCR_TBID1 #else -#define TCR_KASAN_HW_FLAGS 0 +/* + * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on + * TBI being enabled at EL1. + */ +#define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1 #endif /* @@ -464,7 +468,7 @@ SYM_FUNC_START(__cpu_setup) msr_s SYS_TFSRE0_EL1, xzr /* set the TCR_EL1 bits */ - mov_q x10, TCR_KASAN_HW_FLAGS + mov_q x10, TCR_MTE_FLAGS orr tcr, tcr, x10 1: #endif diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 11da8af06704..68ba237365dc 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -53,8 +53,9 @@ struct vm_area_struct; #define ___GFP_HARDWALL 0x100000u #define ___GFP_THISNODE 0x200000u #define ___GFP_ACCOUNT 0x400000u +#define ___GFP_ZEROTAGS 0x800000u #ifdef CONFIG_LOCKDEP -#define ___GFP_NOLOCKDEP 0x800000u +#define ___GFP_NOLOCKDEP 0x1000000u #else #define ___GFP_NOLOCKDEP 0 #endif @@ -229,16 +230,20 @@ struct vm_area_struct; * %__GFP_COMP address compound page metadata. * * %__GFP_ZERO returns a zeroed page on success. + * + * %__GFP_ZEROTAGS returns a page with zeroed memory tags on success, if + * __GFP_ZERO is set. */ #define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) #define __GFP_COMP ((__force gfp_t)___GFP_COMP) #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) +#define __GFP_ZEROTAGS ((__force gfp_t)___GFP_ZEROTAGS) /* Disable lockdep for GFP context tracking */ #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) /* Room for N __GFP_FOO bits */ -#define __GFP_BITS_SHIFT (23 + IS_ENABLED(CONFIG_LOCKDEP)) +#define __GFP_BITS_SHIFT (24 + IS_ENABLED(CONFIG_LOCKDEP)) #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /** diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 54d0643b8fcf..8c6e8e996c87 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -185,6 +185,14 @@ static inline void clear_highpage(struct page *page) kunmap_atomic(kaddr); } +#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE + +static inline void tag_clear_highpage(struct page *page) +{ +} + +#endif + /* * If we pass in a base or tail page, we can zero up to PAGE_SIZE. * If we pass in a head page, we can zero up to the size of the compound page. diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c index 9d0f6f934016..41fd5326ee0a 100644 --- a/mm/kasan/hw_tags.c +++ b/mm/kasan/hw_tags.c @@ -246,7 +246,14 @@ void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags) */ bool init = !want_init_on_free() && want_init_on_alloc(flags); - kasan_unpoison_pages(page, order, init); + if (flags & __GFP_ZEROTAGS) { + int i; + + for (i = 0; i != 1 << order; ++i) + tag_clear_highpage(page + i); + } else { + kasan_unpoison_pages(page, order, init); + } } void kasan_free_pages(struct page *page, unsigned int order) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4fddb7cac3c6..13937e793fda 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1219,10 +1219,16 @@ out: return ret; } -static void kernel_init_free_pages(struct page *page, int numpages) +static void kernel_init_free_pages(struct page *page, int numpages, bool zero_tags) { int i; + if (zero_tags) { + for (i = 0; i < numpages; i++) + tag_clear_highpage(page + i); + return; + } + /* s390's use of memset() could override KASAN redzones. */ kasan_disable_current(); for (i = 0; i < numpages; i++) { @@ -1314,7 +1320,7 @@ static __always_inline bool free_pages_prepare(struct page *page, bool init = want_init_on_free(); if (init) - kernel_init_free_pages(page, 1 << order); + kernel_init_free_pages(page, 1 << order, false); if (!skip_kasan_poison) kasan_poison_pages(page, order, init); } @@ -2349,7 +2355,8 @@ inline void post_alloc_hook(struct page *page, unsigned int order, kasan_unpoison_pages(page, order, init); if (init) - kernel_init_free_pages(page, 1 << order); + kernel_init_free_pages(page, 1 << order, + gfp_flags & __GFP_ZEROTAGS); } set_page_owner(page, order, gfp_flags); -- cgit v1.2.3-59-g8ed1b From 064dbfb4169141943ec7d9dbfd02974dd008f2ce Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 7 Jun 2021 10:46:11 +0100 Subject: arm64: entry: convert IRQ+FIQ handlers to C For various reasons we'd like to convert the bulk of arm64's exception triage logic to C. As a step towards that, this patch converts the EL1 and EL0 IRQ+FIQ triage logic to C. Separate C functions are added for the native and compat cases so that in subsequent patches we can handle native/compat differences in C. Since the triage functions can now call arm64_apply_bp_hardening() directly, the do_el0_irq_bp_hardening() wrapper function is removed. Since the user_exit_irqoff macro is now unused, it is removed. The user_enter_irqoff macro is still used by the ret_to_user code, and cannot be removed at this time. Signed-off-by: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Reviewed-by: Joey Gouly Cc: James Morse Cc: Will Deacon Link: https://lore.kernel.org/r/20210607094624.34689-8-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/exception.h | 8 ++- arch/arm64/include/asm/processor.h | 2 - arch/arm64/kernel/entry-common.c | 93 ++++++++++++++++++++++++++++++-- arch/arm64/kernel/entry.S | 108 +++++-------------------------------- arch/arm64/mm/fault.c | 7 --- 5 files changed, 110 insertions(+), 108 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index c24b69c0c589..4284ee57a9a5 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -32,14 +32,18 @@ static inline u32 disr_to_esr(u64 disr) } asmlinkage void el1_sync_handler(struct pt_regs *regs); +asmlinkage void el1_irq_handler(struct pt_regs *regs); +asmlinkage void el1_fiq_handler(struct pt_regs *regs); asmlinkage void el1_error_handler(struct pt_regs *regs); asmlinkage void el0_sync_handler(struct pt_regs *regs); +asmlinkage void el0_irq_handler(struct pt_regs *regs); +asmlinkage void el0_fiq_handler(struct pt_regs *regs); asmlinkage void el0_error_handler(struct pt_regs *regs); asmlinkage void el0_sync_compat_handler(struct pt_regs *regs); +asmlinkage void el0_irq_compat_handler(struct pt_regs *regs); +asmlinkage void el0_fiq_compat_handler(struct pt_regs *regs); asmlinkage void el0_error_compat_handler(struct pt_regs *regs); -asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs); -asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs); asmlinkage void call_on_irq_stack(struct pt_regs *regs, void (*func)(struct pt_regs *)); asmlinkage void enter_from_user_mode(void); diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 9df3feeee890..2f21c76324bb 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -257,8 +257,6 @@ void set_task_sctlr_el1(u64 sctlr); extern struct task_struct *cpu_switch_to(struct task_struct *prev, struct task_struct *next); -asmlinkage void arm64_preempt_schedule_irq(void); - #define task_pt_regs(p) \ ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1) diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 08d17eb0ce13..ae1b6d7c00e1 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -19,6 +19,8 @@ #include #include #include +#include +#include #include /* @@ -101,7 +103,7 @@ void noinstr arm64_exit_nmi(struct pt_regs *regs) __nmi_exit(); } -asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs) +static void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs) { if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) arm64_enter_nmi(regs); @@ -109,7 +111,7 @@ asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs) enter_from_kernel_mode(regs); } -asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs) +static void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs) { if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) arm64_exit_nmi(regs); @@ -117,7 +119,7 @@ asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs) exit_to_kernel_mode(regs); } -asmlinkage void __sched arm64_preempt_schedule_irq(void) +static void __sched arm64_preempt_schedule_irq(void) { lockdep_assert_irqs_disabled(); @@ -142,6 +144,18 @@ asmlinkage void __sched arm64_preempt_schedule_irq(void) preempt_schedule_irq(); } +static void do_interrupt_handler(struct pt_regs *regs, + void (*handler)(struct pt_regs *)) +{ + if (on_thread_stack()) + call_on_irq_stack(regs, handler); + else + handler(regs); +} + +extern void (*handle_arch_irq)(struct pt_regs *); +extern void (*handle_arch_fiq)(struct pt_regs *); + #ifdef CONFIG_ARM64_ERRATUM_1463225 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); @@ -308,6 +322,36 @@ asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs) } } +static void noinstr el1_interrupt(struct pt_regs *regs, + void (*handler)(struct pt_regs *)) +{ + write_sysreg(DAIF_PROCCTX_NOIRQ, daif); + + enter_el1_irq_or_nmi(regs); + do_interrupt_handler(regs, handler); + + /* + * Note: thread_info::preempt_count includes both thread_info::count + * and thread_info::need_resched, and is not equivalent to + * preempt_count(). + */ + if (IS_ENABLED(CONFIG_PREEMPTION) && + READ_ONCE(current_thread_info()->preempt_count) == 0) + arm64_preempt_schedule_irq(); + + exit_el1_irq_or_nmi(regs); +} + +asmlinkage void noinstr el1_irq_handler(struct pt_regs *regs) +{ + el1_interrupt(regs, handle_arch_irq); +} + +asmlinkage void noinstr el1_fiq_handler(struct pt_regs *regs) +{ + el1_interrupt(regs, handle_arch_fiq); +} + asmlinkage void noinstr el1_error_handler(struct pt_regs *regs) { unsigned long esr = read_sysreg(esr_el1); @@ -507,6 +551,39 @@ asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs) } } +static void noinstr el0_interrupt(struct pt_regs *regs, + void (*handler)(struct pt_regs *)) +{ + enter_from_user_mode(); + + write_sysreg(DAIF_PROCCTX_NOIRQ, daif); + + if (regs->pc & BIT(55)) + arm64_apply_bp_hardening(); + + do_interrupt_handler(regs, handler); +} + +static void noinstr __el0_irq_handler_common(struct pt_regs *regs) +{ + el0_interrupt(regs, handle_arch_irq); +} + +asmlinkage void noinstr el0_irq_handler(struct pt_regs *regs) +{ + __el0_irq_handler_common(regs); +} + +static void noinstr __el0_fiq_handler_common(struct pt_regs *regs) +{ + el0_interrupt(regs, handle_arch_fiq); +} + +asmlinkage void noinstr el0_fiq_handler(struct pt_regs *regs) +{ + __el0_fiq_handler_common(regs); +} + static void __el0_error_handler_common(struct pt_regs *regs) { unsigned long esr = read_sysreg(esr_el1); @@ -583,6 +660,16 @@ asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs) } } +asmlinkage void noinstr el0_irq_compat_handler(struct pt_regs *regs) +{ + __el0_irq_handler_common(regs); +} + +asmlinkage void noinstr el0_fiq_compat_handler(struct pt_regs *regs) +{ + __el0_fiq_handler_common(regs); +} + asmlinkage void noinstr el0_error_compat_handler(struct pt_regs *regs) { __el0_error_handler_common(regs); diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 8ca74ce115ee..8eb3a0a51413 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -33,12 +33,6 @@ * Context tracking and irqflag tracing need to instrument transitions between * user and kernel mode. */ - .macro user_exit_irqoff -#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS) - bl enter_from_user_mode -#endif - .endm - .macro user_enter_irqoff #if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS) bl exit_to_user_mode @@ -486,63 +480,12 @@ SYM_CODE_START_LOCAL(__swpan_exit_el0) SYM_CODE_END(__swpan_exit_el0) #endif - .macro irq_stack_entry - mov x19, sp // preserve the original sp -#ifdef CONFIG_SHADOW_CALL_STACK - mov x24, scs_sp // preserve the original shadow stack -#endif - - /* - * Compare sp with the base of the task stack. - * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack, - * and should switch to the irq stack. - */ - ldr x25, [tsk, TSK_STACK] - eor x25, x25, x19 - and x25, x25, #~(THREAD_SIZE - 1) - cbnz x25, 9998f - - ldr_this_cpu x25, irq_stack_ptr, x26 - mov x26, #IRQ_STACK_SIZE - add x26, x25, x26 - - /* switch to the irq stack */ - mov sp, x26 - -#ifdef CONFIG_SHADOW_CALL_STACK - /* also switch to the irq shadow stack */ - ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x26 -#endif - -9998: - .endm - - /* - * The callee-saved regs (x19-x29) should be preserved between - * irq_stack_entry and irq_stack_exit, but note that kernel_entry - * uses x20-x23 to store data for later use. - */ - .macro irq_stack_exit - mov sp, x19 -#ifdef CONFIG_SHADOW_CALL_STACK - mov scs_sp, x24 -#endif - .endm - /* GPRs used by entry code */ tsk .req x28 // current thread_info /* * Interrupt handling. */ - .macro irq_handler, handler:req - ldr_l x1, \handler - mov x0, sp - irq_stack_entry - blr x1 - irq_stack_exit - .endm - .macro gic_prio_kentry_setup, tmp:req #ifdef CONFIG_ARM64_PSEUDO_NMI alternative_if ARM64_HAS_IRQ_PRIO_MASKING @@ -552,35 +495,6 @@ tsk .req x28 // current thread_info #endif .endm - .macro el1_interrupt_handler, handler:req - enable_da - - mov x0, sp - bl enter_el1_irq_or_nmi - - irq_handler \handler - -#ifdef CONFIG_PREEMPTION - ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count - cbnz x24, 1f // preempt count != 0 - bl arm64_preempt_schedule_irq // irq en/disable is done inside -1: -#endif - - mov x0, sp - bl exit_el1_irq_or_nmi - .endm - - .macro el0_interrupt_handler, handler:req - user_exit_irqoff - enable_da - - tbz x22, #55, 1f - bl do_el0_irq_bp_hardening -1: - irq_handler \handler - .endm - .text /* @@ -704,13 +618,15 @@ SYM_CODE_END(el1_sync) .align 6 SYM_CODE_START_LOCAL_NOALIGN(el1_irq) kernel_entry 1 - el1_interrupt_handler handle_arch_irq + mov x0, sp + bl el1_irq_handler kernel_exit 1 SYM_CODE_END(el1_irq) SYM_CODE_START_LOCAL_NOALIGN(el1_fiq) kernel_entry 1 - el1_interrupt_handler handle_arch_fiq + mov x0, sp + bl el1_fiq_handler kernel_exit 1 SYM_CODE_END(el1_fiq) @@ -737,12 +653,16 @@ SYM_CODE_END(el0_sync_compat) .align 6 SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat) kernel_entry 0, 32 - b el0_irq_naked + mov x0, sp + bl el0_irq_compat_handler + b ret_to_user SYM_CODE_END(el0_irq_compat) SYM_CODE_START_LOCAL_NOALIGN(el0_fiq_compat) kernel_entry 0, 32 - b el0_fiq_naked + mov x0, sp + bl el0_fiq_compat_handler + b ret_to_user SYM_CODE_END(el0_fiq_compat) SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat) @@ -756,15 +676,15 @@ SYM_CODE_END(el0_error_compat) .align 6 SYM_CODE_START_LOCAL_NOALIGN(el0_irq) kernel_entry 0 -el0_irq_naked: - el0_interrupt_handler handle_arch_irq + mov x0, sp + bl el0_irq_handler b ret_to_user SYM_CODE_END(el0_irq) SYM_CODE_START_LOCAL_NOALIGN(el0_fiq) kernel_entry 0 -el0_fiq_naked: - el0_interrupt_handler handle_arch_fiq + mov x0, sp + bl el0_fiq_handler b ret_to_user SYM_CODE_END(el0_fiq) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 871c82ab0a30..3b4a4adfddfd 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -836,13 +836,6 @@ void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs) } NOKPROBE_SYMBOL(do_mem_abort); -void do_el0_irq_bp_hardening(void) -{ - /* PC has already been checked in entry.S */ - arm64_apply_bp_hardening(); -} -NOKPROBE_SYMBOL(do_el0_irq_bp_hardening); - void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs) { arm64_notify_die("SP/PC alignment exception", regs, SIGBUS, BUS_ADRALN, -- cgit v1.2.3-59-g8ed1b From e0e3903f83d5e41ab7e7737ebe41ef36f578dc0a Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 8 Jun 2021 13:37:42 +0100 Subject: arm64: mm: decode xFSC in mem_abort_decode() It would be helpful if mem_abort_decode() could decode the DFSC/IFSC, as this can make it easier to identify common bugs (e.g. accesses which trigger alignment faults) without having to manually decode the xFSC value. Decode the xFSC in mem_abort_decode(). Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: Robin Murphy Cc: Will Deacon Link: https://lore.kernel.org/r/20210608123742.11921-1-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/mm/fault.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 5c855b2ab93b..6786cf152666 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -99,6 +99,8 @@ static void mem_abort_decode(unsigned int esr) pr_alert(" EA = %lu, S1PTW = %lu\n", (esr & ESR_ELx_EA) >> ESR_ELx_EA_SHIFT, (esr & ESR_ELx_S1PTW) >> ESR_ELx_S1PTW_SHIFT); + pr_alert(" FSC = 0x%02x: %s\n", (esr & ESR_ELx_FSC), + esr_to_fault_info(esr)->name); if (esr_is_data_abort(esr)) data_abort_decode(esr); -- cgit v1.2.3-59-g8ed1b From 4aaa87ab3d2de485d8aae7a88cc9cb02dcd2c450 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Mon, 14 Jun 2021 13:48:26 +0530 Subject: arm64/mm: Drop SECTION_[SHIFT|SIZE|MASK] SECTION_[SHIFT|SIZE|MASK] are essentially PMD_[SHIFT|SIZE|MASK]. But these create confusion being similar to generic sparsemem memory sections, which are derived from SECTION_SIZE_BITS. Section references have always implied PMD level block mapping. Instead just use all PMD level macros which would make it explicit and also remove confusion with sparsmem memory sections. Cc: Catalin Marinas Cc: Will Deacon Cc: Mark Rutland Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Reviewed-by: Gavin Shan Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/1623658706-7182-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/kernel-pgtable.h | 4 ++-- arch/arm64/include/asm/pgtable-hwdef.h | 7 ------- arch/arm64/mm/mmu.c | 2 +- 3 files changed, 3 insertions(+), 10 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index c5f18f2408b5..1260187adb31 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -91,8 +91,8 @@ /* Initial memory map size */ #if ARM64_SWAPPER_USES_SECTION_MAPS -#define SWAPPER_BLOCK_SHIFT SECTION_SHIFT -#define SWAPPER_BLOCK_SIZE SECTION_SIZE +#define SWAPPER_BLOCK_SHIFT PMD_SHIFT +#define SWAPPER_BLOCK_SIZE PMD_SIZE #define SWAPPER_TABLE_SHIFT PUD_SHIFT #else #define SWAPPER_BLOCK_SHIFT PAGE_SHIFT diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index b82575a33f8b..40085e53f573 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -71,13 +71,6 @@ #define PGDIR_MASK (~(PGDIR_SIZE-1)) #define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT)) -/* - * Section address mask and size definitions. - */ -#define SECTION_SHIFT PMD_SHIFT -#define SECTION_SIZE (_AC(1, UL) << SECTION_SHIFT) -#define SECTION_MASK (~(SECTION_SIZE-1)) - /* * Contiguous page definitions. */ diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 3d34cd127f6b..5b75f7eefb72 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -228,7 +228,7 @@ static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end, next = pmd_addr_end(addr, end); /* try section mapping first */ - if (((addr | next | phys) & ~SECTION_MASK) == 0 && + if (((addr | next | phys) & ~PMD_MASK) == 0 && (flags & NO_BLOCK_MAPPINGS) == 0) { pmd_set_huge(pmdp, phys, prot); -- cgit v1.2.3-59-g8ed1b From 84c5e23edecd7013ceaed8460deed5c33842cb8d Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Mon, 14 Jun 2021 20:27:01 +0800 Subject: arm64: mm: Pass original fault address to handle_mm_fault() Currently, the lower bits of fault address is cleared before it's passed to handle_mm_fault(). It's unnecessary since generic code does same thing since the commit 1a29d85eb0f19 ("mm: use vmf->address instead of of vmf->virtual_address"). This passes the original fault address to handle_mm_fault() in case the generic code needs to know the exact fault address. Signed-off-by: Gavin Shan Acked-by: Catalin Marinas Reviewed-by: Anshuman Khandual Link: https://lore.kernel.org/r/20210614122701.100515-1-gshan@redhat.com Signed-off-by: Will Deacon --- arch/arm64/mm/fault.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 6786cf152666..bd9a0bb5fb56 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -509,7 +509,7 @@ static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, */ if (!(vma->vm_flags & vm_flags)) return VM_FAULT_BADACCESS; - return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags, regs); + return handle_mm_fault(vma, addr, mm_flags, regs); } static bool is_el0_instruction_abort(unsigned int esr) -- cgit v1.2.3-59-g8ed1b From 2062d44da3499eed3c7d005df8f0b54d300ac0b5 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Fri, 18 Jun 2021 10:17:02 +0530 Subject: arm64/mm: Rename ARM64_SWAPPER_USES_SECTION_MAPS ARM64_SWAPPER_USES_SECTION_MAPS implies that a PMD level huge page mappings are used for swapper, idmap and vmemmap. Lets make it PMD explicit removing any possible confusion with generic memory sections and also bit generic as it's applicable for idmap and vmemmap mappings as well. Hence rename it as ARM64_KERNEL_USES_PMD_MAPS instead. Cc: Catalin Marinas Cc: Will Deacon Cc: Mark Rutland Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Anshuman Khandual Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/1623991622-24294-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/kernel-pgtable.h | 10 +++++----- arch/arm64/mm/mmu.c | 6 +++--- 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'arch/arm64/mm') diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index 1260187adb31..3512184cfec1 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -18,9 +18,9 @@ * 64K (section size = 512M). */ #ifdef CONFIG_ARM64_4K_PAGES -#define ARM64_SWAPPER_USES_SECTION_MAPS 1 +#define ARM64_KERNEL_USES_PMD_MAPS 1 #else -#define ARM64_SWAPPER_USES_SECTION_MAPS 0 +#define ARM64_KERNEL_USES_PMD_MAPS 0 #endif /* @@ -33,7 +33,7 @@ * VA range, so pages required to map highest possible PA are reserved in all * cases. */ -#if ARM64_SWAPPER_USES_SECTION_MAPS +#if ARM64_KERNEL_USES_PMD_MAPS #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1) #define IDMAP_PGTABLE_LEVELS (ARM64_HW_PGTABLE_LEVELS(PHYS_MASK_SHIFT) - 1) #else @@ -90,7 +90,7 @@ #define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE) /* Initial memory map size */ -#if ARM64_SWAPPER_USES_SECTION_MAPS +#if ARM64_KERNEL_USES_PMD_MAPS #define SWAPPER_BLOCK_SHIFT PMD_SHIFT #define SWAPPER_BLOCK_SIZE PMD_SIZE #define SWAPPER_TABLE_SHIFT PUD_SHIFT @@ -106,7 +106,7 @@ #define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) #define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) -#if ARM64_SWAPPER_USES_SECTION_MAPS +#if ARM64_KERNEL_USES_PMD_MAPS #define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS) #else #define SWAPPER_MM_MMUFLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 5b75f7eefb72..e04e4b6bdf16 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1113,14 +1113,14 @@ static void free_empty_tables(unsigned long addr, unsigned long end, } #endif -#if !ARM64_SWAPPER_USES_SECTION_MAPS +#if !ARM64_KERNEL_USES_PMD_MAPS int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) { WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); return vmemmap_populate_basepages(start, end, node, altmap); } -#else /* !ARM64_SWAPPER_USES_SECTION_MAPS */ +#else /* !ARM64_KERNEL_USES_PMD_MAPS */ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) { @@ -1165,7 +1165,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, return 0; } -#endif /* !ARM64_SWAPPER_USES_SECTION_MAPS */ +#endif /* !ARM64_KERNEL_USES_PMD_MAPS */ #ifdef CONFIG_MEMORY_HOTPLUG void vmemmap_free(unsigned long start, unsigned long end, -- cgit v1.2.3-59-g8ed1b