From 6aae3425aa9ca776e8201a93494a4a482353d2c3 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Sat, 30 Nov 2019 17:51:06 -0800 Subject: ARC: mm: remove __ARCH_USE_5LEVEL_HACK Patch series "elide extraneous generated code for folded p4d/pud/pmd", v3. This series came out of seemingly benign excursion into understanding/removing __ARCH_USE_5LEVEL_HACK from ARC port showing some extraneous code being generated despite folded p4d/pud/pmd | bloat-o-meter2 vmlinux-[AB]* | add/remove: 0/0 grow/shrink: 3/0 up/down: 130/0 (130) | function old new delta | free_pgd_range 548 660 +112 | p4d_clear_bad 2 20 +18 The patches here address that | bloat-o-meter2 vmlinux-[BF]* | add/remove: 0/2 grow/shrink: 0/1 up/down: 0/-386 (-386) | function old new delta | pud_clear_bad 20 - -20 | p4d_clear_bad 20 - -20 | free_pgd_range 660 314 -346 The code savings are not a whole lot, but still worthwhile IMHO. This patch (of 5): With paging code made 5-level compliant, this is no longer needed. ARC has software page walker with 2 lookup levels (pgd -> pte) This was expected to be non functional change but ended with slight code bloat due to needless inclusions of p*d_free_tlb() macros which will be addressed in further patches. | bloat-o-meter2 vmlinux-[AB]* | add/remove: 0/0 grow/shrink: 2/0 up/down: 128/0 (128) | function old new delta | free_pgd_range 546 656 +110 | p4d_clear_bad 2 20 +18 | Total: Before=4137148, After=4137276, chg 0.000000% Link: http://lkml.kernel.org/r/20191016162400.14796-2-vgupta@synopsys.com Signed-off-by: Vineet Gupta Acked-by: Kirill A. Shutemov Cc: "Aneesh Kumar K . V" Cc: Arnd Bergmann Cc: Nick Piggin Cc: Peter Zijlstra Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arc/include/asm/pgtable.h | 1 - arch/arc/mm/fault.c | 10 ++++++++-- arch/arc/mm/highmem.c | 4 +++- 3 files changed, 11 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index 7addd0301c51..b917b596f7fb 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h @@ -33,7 +33,6 @@ #define _ASM_ARC_PGTABLE_H #include -#define __ARCH_USE_5LEVEL_HACK #include #include #include /* to propagate CONFIG_ARC_MMU_VER */ diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index 3861543b66a0..fb86bc3e9b35 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -30,6 +30,7 @@ noinline static int handle_kernel_vaddr_fault(unsigned long address) * with the 'reference' page table. */ pgd_t *pgd, *pgd_k; + p4d_t *p4d, *p4d_k; pud_t *pud, *pud_k; pmd_t *pmd, *pmd_k; @@ -39,8 +40,13 @@ noinline static int handle_kernel_vaddr_fault(unsigned long address) if (!pgd_present(*pgd_k)) goto bad_area; - pud = pud_offset(pgd, address); - pud_k = pud_offset(pgd_k, address); + p4d = p4d_offset(pgd, address); + p4d_k = p4d_offset(pgd_k, address); + if (!p4d_present(*p4d_k)) + goto bad_area; + + pud = pud_offset(p4d, address); + pud_k = pud_offset(p4d_k, address); if (!pud_present(*pud_k)) goto bad_area; diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c index a4856bfaedf3..fc8849e4f72e 100644 --- a/arch/arc/mm/highmem.c +++ b/arch/arc/mm/highmem.c @@ -111,12 +111,14 @@ EXPORT_SYMBOL(__kunmap_atomic); static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr) { pgd_t *pgd_k; + p4d_t *p4d_k; pud_t *pud_k; pmd_t *pmd_k; pte_t *pte_k; pgd_k = pgd_offset_k(kvaddr); - pud_k = pud_offset(pgd_k, kvaddr); + p4d_k = p4d_offset(pgd_k, kvaddr); + pud_k = pud_offset(p4d_k, kvaddr); pmd_k = pmd_offset(pud_k, kvaddr); pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); -- cgit v1.2.3-59-g8ed1b From eafb149ed73a8bb8359c0ce027b98acd4e95b070 Mon Sep 17 00:00:00 2001 From: Daniel Axtens Date: Sat, 30 Nov 2019 17:54:57 -0800 Subject: fork: support VMAP_STACK with KASAN_VMALLOC Supporting VMAP_STACK with KASAN_VMALLOC is straightforward: - clear the shadow region of vmapped stacks when swapping them in - tweak Kconfig to allow VMAP_STACK to be turned on with KASAN Link: http://lkml.kernel.org/r/20191031093909.9228-4-dja@axtens.net Signed-off-by: Daniel Axtens Reviewed-by: Dmitry Vyukov Reviewed-by: Andrey Ryabinin Cc: Alexander Potapenko Cc: Christophe Leroy Cc: Mark Rutland Cc: Vasily Gorbik Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/Kconfig | 9 +++++---- kernel/fork.c | 4 ++++ 2 files changed, 9 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/Kconfig b/arch/Kconfig index 17c42bc36321..ec07f9ba1152 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -843,16 +843,17 @@ config HAVE_ARCH_VMAP_STACK config VMAP_STACK default y bool "Use a virtually-mapped stack" - depends on HAVE_ARCH_VMAP_STACK && !KASAN + depends on HAVE_ARCH_VMAP_STACK + depends on !KASAN || KASAN_VMALLOC ---help--- Enable this if you want the use virtually-mapped kernel stacks with guard pages. This causes kernel stack overflows to be caught immediately rather than causing difficult-to-diagnose corruption. - This is presently incompatible with KASAN because KASAN expects - the stack to map directly to the KASAN shadow map using a formula - that is incorrect if the stack is in vmalloc space. + To use this with KASAN, the architecture must support backing + virtual mappings with real shadow memory, and KASAN_VMALLOC must + be enabled. config ARCH_OPTIONAL_KERNEL_RWX def_bool n diff --git a/kernel/fork.c b/kernel/fork.c index 0f0bac8318dd..21c6c1e29b98 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -93,6 +93,7 @@ #include #include #include +#include #include #include @@ -223,6 +224,9 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) if (!s) continue; + /* Clear the KASAN shadow of the stack. */ + kasan_unpoison_shadow(s->addr, THREAD_SIZE); + /* Clear stale pointers from reused stack. */ memset(s->addr, 0, THREAD_SIZE); -- cgit v1.2.3-59-g8ed1b From 0609ae011deb41c9629b7f5fd626dfa1ac9d16b0 Mon Sep 17 00:00:00 2001 From: Daniel Axtens Date: Sat, 30 Nov 2019 17:55:00 -0800 Subject: x86/kasan: support KASAN_VMALLOC In the case where KASAN directly allocates memory to back vmalloc space, don't map the early shadow page over it. We prepopulate pgds/p4ds for the range that would otherwise be empty. This is required to get it synced to hardware on boot, allowing the lower levels of the page tables to be filled dynamically. Link: http://lkml.kernel.org/r/20191031093909.9228-5-dja@axtens.net Signed-off-by: Daniel Axtens Acked-by: Dmitry Vyukov Reviewed-by: Andrey Ryabinin Cc: Alexander Potapenko Cc: Christophe Leroy Cc: Mark Rutland Cc: Vasily Gorbik Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/Kconfig | 1 + arch/x86/mm/kasan_init_64.c | 61 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 0cb1756223be..5e8949953660 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -134,6 +134,7 @@ config X86 select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_KASAN if X86_64 + select HAVE_ARCH_KASAN_VMALLOC if X86_64 select HAVE_ARCH_KGDB select HAVE_ARCH_MMAP_RND_BITS if MMU select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 296da58f3013..cf5bc37c90ac 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -245,6 +245,49 @@ static void __init kasan_map_early_shadow(pgd_t *pgd) } while (pgd++, addr = next, addr != end); } +static void __init kasan_shallow_populate_p4ds(pgd_t *pgd, + unsigned long addr, + unsigned long end) +{ + p4d_t *p4d; + unsigned long next; + void *p; + + p4d = p4d_offset(pgd, addr); + do { + next = p4d_addr_end(addr, end); + + if (p4d_none(*p4d)) { + p = early_alloc(PAGE_SIZE, NUMA_NO_NODE, true); + p4d_populate(&init_mm, p4d, p); + } + } while (p4d++, addr = next, addr != end); +} + +static void __init kasan_shallow_populate_pgds(void *start, void *end) +{ + unsigned long addr, next; + pgd_t *pgd; + void *p; + + addr = (unsigned long)start; + pgd = pgd_offset_k(addr); + do { + next = pgd_addr_end(addr, (unsigned long)end); + + if (pgd_none(*pgd)) { + p = early_alloc(PAGE_SIZE, NUMA_NO_NODE, true); + pgd_populate(&init_mm, pgd, p); + } + + /* + * we need to populate p4ds to be synced when running in + * four level mode - see sync_global_pgds_l4() + */ + kasan_shallow_populate_p4ds(pgd, addr, next); + } while (pgd++, addr = next, addr != (unsigned long)end); +} + #ifdef CONFIG_KASAN_INLINE static int kasan_die_handler(struct notifier_block *self, unsigned long val, @@ -354,6 +397,24 @@ void __init kasan_init(void) kasan_populate_early_shadow( kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), + kasan_mem_to_shadow((void *)VMALLOC_START)); + + /* + * If we're in full vmalloc mode, don't back vmalloc space with early + * shadow pages. Instead, prepopulate pgds/p4ds so they are synced to + * the global table and we can populate the lower levels on demand. + */ + if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) + kasan_shallow_populate_pgds( + kasan_mem_to_shadow((void *)VMALLOC_START), + kasan_mem_to_shadow((void *)VMALLOC_END)); + else + kasan_populate_early_shadow( + kasan_mem_to_shadow((void *)VMALLOC_START), + kasan_mem_to_shadow((void *)VMALLOC_END)); + + kasan_populate_early_shadow( + kasan_mem_to_shadow((void *)VMALLOC_END + 1), shadow_cpu_entry_begin); kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin, -- cgit v1.2.3-59-g8ed1b From 997cdcb068eb58d37f9f9b1d219368000066d272 Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Sat, 30 Nov 2019 17:56:37 -0800 Subject: powerpc/mm: remove pmd_huge/pud_huge stubs and include hugetlb.h Patch series "hugetlbfs: convert macros to static inline, fix sparse warning". The definition for huge_pte_offset() in causes a sparse warning in the !CONFIG_HUGETLB_PAGE. Fix this as well as converting all macros in this block of definitions to static inlines for better type checking. When making the above changes, build errors were found in powerpc due to duplicate definitions. A separate powerpc specific patch is included as a requisite to remove the definitions and get them from . This patch (of 2): This removes the power specific stubs created by commit aad71e3928be ("powerpc/mm: Fix build break with RADIX=y & HUGETLBFS=n") used when !CONFIG_HUGETLB_PAGE. Instead, it addresses the build break by getting the definitions from . This allows the macros in to be replaced with static inlines. Link: http://lkml.kernel.org/r/20191112194558.139389-2-mike.kravetz@oracle.com Signed-off-by: Mike Kravetz Acked-by: Michael Ellerman Cc: Ben Dooks Cc: Jason Gunthorpe Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/include/asm/book3s/64/pgtable-4k.h | 3 --- arch/powerpc/include/asm/book3s/64/pgtable-64k.h | 3 --- arch/powerpc/mm/book3s64/radix_pgtable.c | 1 + 3 files changed, 1 insertion(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h index a069dfcac9a9..4e697bc2f4cd 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h @@ -70,9 +70,6 @@ static inline int get_hugepd_cache_index(int index) /* should not reach */ } -#else /* !CONFIG_HUGETLB_PAGE */ -static inline int pmd_huge(pmd_t pmd) { return 0; } -static inline int pud_huge(pud_t pud) { return 0; } #endif /* CONFIG_HUGETLB_PAGE */ #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h index e3d4dd4ae2fa..34d1018896b3 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h @@ -59,9 +59,6 @@ static inline int get_hugepd_cache_index(int index) BUG(); } -#else /* !CONFIG_HUGETLB_PAGE */ -static inline int pmd_huge(pmd_t pmd) { return 0; } -static inline int pud_huge(pud_t pud) { return 0; } #endif /* CONFIG_HUGETLB_PAGE */ static inline int remap_4k_pfn(struct vm_area_struct *vma, unsigned long addr, diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 6ee17d09649c..974109bb85db 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include -- cgit v1.2.3-59-g8ed1b