From 0a2c13d9cd76c84f2520f573ff83f777eb7464aa Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Wed, 12 Jul 2017 14:33:01 -0700 Subject: include/linux/dcache.h: use unsigned chars in struct name_snapshot "kernel.h: handle pointers to arrays better in container_of()" triggers: In file included from include/uapi/linux/stddef.h:1:0, from include/linux/stddef.h:4, from include/uapi/linux/posix_types.h:4, from include/uapi/linux/types.h:13, from include/linux/types.h:5, from include/linux/syscalls.h:71, from fs/dcache.c:17: fs/dcache.c: In function 'release_dentry_name_snapshot': include/linux/compiler.h:542:38: error: call to '__compiletime_assert_305' declared with attribute error: pointer type mismatch in container_of() _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) ^ include/linux/compiler.h:525:4: note: in definition of macro '__compiletime_assert' prefix ## suffix(); \ ^ include/linux/compiler.h:542:2: note: in expansion of macro '_compiletime_assert' _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) ^ include/linux/build_bug.h:46:37: note: in expansion of macro 'compiletime_assert' #define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) ^ include/linux/kernel.h:860:2: note: in expansion of macro 'BUILD_BUG_ON_MSG' BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \ ^ fs/dcache.c:305:7: note: in expansion of macro 'container_of' p = container_of(name->name, struct external_name, name[0]); Switch name_snapshot to use unsigned chars, matching struct qstr and struct external_name. Link: http://lkml.kernel.org/r/20170710152134.0f78c1e6@canb.auug.org.au Signed-off-by: Stephen Rothwell Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/dcache.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 025727bf6797..c706eaac692e 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -592,8 +592,8 @@ static inline struct inode *d_real_inode(const struct dentry *dentry) } struct name_snapshot { - const char *name; - char inline_name[DNAME_INLINE_LEN]; + const unsigned char *name; + unsigned char inline_name[DNAME_INLINE_LEN]; }; void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *); void release_dentry_name_snapshot(struct name_snapshot *); -- cgit v1.2.3-59-g8ed1b From c7acec713d14c6ce8a20154f9dfda258d6bcad3b Mon Sep 17 00:00:00 2001 From: Ian Abbott Date: Wed, 12 Jul 2017 14:33:04 -0700 Subject: kernel.h: handle pointers to arrays better in container_of() If the first parameter of container_of() is a pointer to a non-const-qualified array type (and the third parameter names a non-const-qualified array member), the local variable __mptr will be defined with a const-qualified array type. In ISO C, these types are incompatible. They work as expected in GNU C, but some versions will issue warnings. For example, GCC 4.9 produces the warning "initialization from incompatible pointer type". Here is an example of where the problem occurs: ------------------------------------------------------- #include #include MODULE_LICENSE("GPL"); struct st { int a; char b[16]; }; static int __init example_init(void) { struct st t = { .a = 101, .b = "hello" }; char (*p)[16] = &t.b; struct st *x = container_of(p, struct st, b); printk(KERN_DEBUG "%p %p\n", (void *)&t, (void *)x); return 0; } static void __exit example_exit(void) { } module_init(example_init); module_exit(example_exit); ------------------------------------------------------- Building the module with gcc-4.9 results in these warnings (where '{m}' is the module source and '{k}' is the kernel source): ------------------------------------------------------- In file included from {m}/example.c:1:0: {m}/example.c: In function `example_init': {k}/include/linux/kernel.h:854:48: warning: initialization from incompatible pointer type const typeof( ((type *)0)->member ) *__mptr = (ptr); \ ^ {m}/example.c:14:17: note: in expansion of macro `container_of' struct st *x = container_of(p, struct st, b); ^ {k}/include/linux/kernel.h:854:48: warning: (near initialization for `x') const typeof( ((type *)0)->member ) *__mptr = (ptr); \ ^ {m}/example.c:14:17: note: in expansion of macro `container_of' struct st *x = container_of(p, struct st, b); ^ ------------------------------------------------------- Replace the type checking performed by the macro to avoid these warnings. Make sure `*(ptr)` either has type compatible with the member, or has type compatible with `void`, ignoring qualifiers. Raise compiler errors if this is not true. This is stronger than the previous behaviour, which only resulted in compiler warnings for a type mismatch. [arnd@arndb.de: fix new warnings for container_of()] Link: http://lkml.kernel.org/r/20170620200940.90557-1-arnd@arndb.de Link: http://lkml.kernel.org/r/20170525120316.24473-7-abbotti@mev.co.uk Signed-off-by: Ian Abbott Signed-off-by: Arnd Bergmann Acked-by: Michal Nazarewicz Acked-by: Kees Cook Cc: Hidehiro Kawai Cc: Borislav Petkov Cc: Rasmus Villemoes Cc: Johannes Berg Cc: Peter Zijlstra Cc: Alexander Potapenko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kernel.h | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 1c91f26e2996..bd6d96cf80b1 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -854,9 +855,12 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } * @member: the name of the member within the struct. * */ -#define container_of(ptr, type, member) ({ \ - const typeof( ((type *)0)->member ) *__mptr = (ptr); \ - (type *)( (char *)__mptr - offsetof(type,member) );}) +#define container_of(ptr, type, member) ({ \ + void *__mptr = (void *)(ptr); \ + BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \ + !__same_type(*(ptr), void), \ + "pointer type mismatch in container_of()"); \ + ((type *)(__mptr - offsetof(type, member))); }) /* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */ #ifdef CONFIG_FTRACE_MCOUNT_RECORD -- cgit v1.2.3-59-g8ed1b From 91a90140f9987101d730b7dad8c6406321285da8 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 12 Jul 2017 14:33:08 -0700 Subject: mm/memory.c: mark create_huge_pmd() inline to prevent build failure With gcc 4.1.2: mm/memory.o: In function `create_huge_pmd': memory.c:(.text+0x93e): undefined reference to `do_huge_pmd_anonymous_page' Interestingly, create_huge_pmd() is emitted in the assembler output, but never called. Converting transparent_hugepage_enabled() from a macro to a static inline function reduced the ability of the compiler to remove unused code. Fix this by marking create_huge_pmd() inline. Fixes: 16981d763501c0e0 ("mm: improve readability of transparent_hugepage_enabled()") Link: http://lkml.kernel.org/r/1499842660-10665-1-git-send-email-geert@linux-m68k.org Signed-off-by: Geert Uytterhoeven Acked-by: Arnd Bergmann Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/memory.c b/mm/memory.c index cbb57194687e..0e517be91a89 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3591,7 +3591,7 @@ out: return 0; } -static int create_huge_pmd(struct vm_fault *vmf) +static inline int create_huge_pmd(struct vm_fault *vmf) { if (vma_is_anonymous(vmf->vma)) return do_huge_pmd_anonymous_page(vmf); -- cgit v1.2.3-59-g8ed1b From 112166f88cf83dd11486cf1818672d42b540865b Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 12 Jul 2017 14:33:11 -0700 Subject: kernel/fork.c: virtually mapped stacks: do not disable interrupts The reason to disable interrupts seems to be to avoid switching to a different processor while handling per cpu data using individual loads and stores. If we use per cpu RMV primitives we will not have to disable interrupts. Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1705171055130.5898@east.gentwo.org Signed-off-by: Christoph Lameter Cc: Andy Lutomirski Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/fork.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/kernel/fork.c b/kernel/fork.c index 0f69a3e5281e..d2b9d7c31eaf 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -205,19 +205,17 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) void *stack; int i; - local_irq_disable(); for (i = 0; i < NR_CACHED_STACKS; i++) { - struct vm_struct *s = this_cpu_read(cached_stacks[i]); + struct vm_struct *s; + + s = this_cpu_xchg(cached_stacks[i], NULL); if (!s) continue; - this_cpu_write(cached_stacks[i], NULL); tsk->stack_vm_area = s; - local_irq_enable(); return s->addr; } - local_irq_enable(); stack = __vmalloc_node_range(THREAD_SIZE, THREAD_SIZE, VMALLOC_START, VMALLOC_END, @@ -245,19 +243,15 @@ static inline void free_thread_stack(struct task_struct *tsk) { #ifdef CONFIG_VMAP_STACK if (task_stack_vm_area(tsk)) { - unsigned long flags; int i; - local_irq_save(flags); for (i = 0; i < NR_CACHED_STACKS; i++) { - if (this_cpu_read(cached_stacks[i])) + if (this_cpu_cmpxchg(cached_stacks[i], + NULL, tsk->stack_vm_area) != NULL) continue; - this_cpu_write(cached_stacks[i], tsk->stack_vm_area); - local_irq_restore(flags); return; } - local_irq_restore(flags); vfree_atomic(tsk->stack); return; -- cgit v1.2.3-59-g8ed1b From 203e9e41219b4e7357104e525e91ac609fba2c6c Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Wed, 12 Jul 2017 14:33:14 -0700 Subject: kexec: move vmcoreinfo out of the kernel's .bss section As Eric said, "what we need to do is move the variable vmcoreinfo_note out of the kernel's .bss section. And modify the code to regenerate and keep this information in something like the control page. Definitely something like this needs a page all to itself, and ideally far away from any other kernel data structures. I clearly was not watching closely the data someone decided to keep this silly thing in the kernel's .bss section." This patch allocates extra pages for these vmcoreinfo_XXX variables, one advantage is that it enhances some safety of vmcoreinfo, because vmcoreinfo now is kept far away from other kernel data structures. Link: http://lkml.kernel.org/r/1493281021-20737-1-git-send-email-xlpang@redhat.com Signed-off-by: Xunlei Pang Tested-by: Michael Holzheu Reviewed-by: Juergen Gross Suggested-by: Eric Biederman Cc: Benjamin Herrenschmidt Cc: Dave Young Cc: Hari Bathini Cc: Mahesh Salgaonkar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/kernel/machine_kexec.c | 5 ----- arch/s390/kernel/machine_kexec.c | 1 + arch/s390/kernel/setup.c | 6 ------ arch/x86/kernel/crash.c | 2 +- arch/x86/xen/mmu_pv.c | 4 ++-- include/linux/crash_core.h | 4 ++-- kernel/crash_core.c | 26 ++++++++++++++++++++++---- kernel/ksysfs.c | 2 +- 8 files changed, 29 insertions(+), 21 deletions(-) diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c index 599507bcec91..c14815dca747 100644 --- a/arch/ia64/kernel/machine_kexec.c +++ b/arch/ia64/kernel/machine_kexec.c @@ -163,8 +163,3 @@ void arch_crash_save_vmcoreinfo(void) #endif } -phys_addr_t paddr_vmcoreinfo_note(void) -{ - return ia64_tpa((unsigned long)(char *)&vmcoreinfo_note); -} - diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index 49a6bd45957b..3d0b14afa232 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -246,6 +246,7 @@ void arch_crash_save_vmcoreinfo(void) VMCOREINFO_SYMBOL(lowcore_ptr); VMCOREINFO_SYMBOL(high_memory); VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS); + mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note()); } void machine_shutdown(void) diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 3ae756c0db3d..3d1d808ea8a9 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -496,11 +496,6 @@ static void __init setup_memory_end(void) pr_notice("The maximum memory size is %luMB\n", memory_end >> 20); } -static void __init setup_vmcoreinfo(void) -{ - mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note()); -} - #ifdef CONFIG_CRASH_DUMP /* @@ -939,7 +934,6 @@ void __init setup_arch(char **cmdline_p) #endif setup_resources(); - setup_vmcoreinfo(); setup_lowcore(); smp_fill_possible_mask(); cpu_detect_mhz_feature(); diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index 22217ece26c8..44404e2307bb 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -457,7 +457,7 @@ static int prepare_elf64_headers(struct crash_elf_data *ced, bufp += sizeof(Elf64_Phdr); phdr->p_type = PT_NOTE; phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note(); - phdr->p_filesz = phdr->p_memsz = sizeof(vmcoreinfo_note); + phdr->p_filesz = phdr->p_memsz = VMCOREINFO_NOTE_SIZE; (ehdr->e_phnum)++; #ifdef CONFIG_X86_64 diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 1d7a7213a310..cab28cf2cffb 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -2693,8 +2693,8 @@ EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); phys_addr_t paddr_vmcoreinfo_note(void) { if (xen_pv_domain()) - return virt_to_machine(&vmcoreinfo_note).maddr; + return virt_to_machine(vmcoreinfo_note).maddr; else - return __pa_symbol(&vmcoreinfo_note); + return __pa(vmcoreinfo_note); } #endif /* CONFIG_KEXEC_CORE */ diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h index 4090a42578a8..87506a02e914 100644 --- a/include/linux/crash_core.h +++ b/include/linux/crash_core.h @@ -19,7 +19,7 @@ CRASH_CORE_NOTE_NAME_BYTES + \ CRASH_CORE_NOTE_DESC_BYTES) -#define VMCOREINFO_BYTES (4096) +#define VMCOREINFO_BYTES PAGE_SIZE #define VMCOREINFO_NOTE_NAME "VMCOREINFO" #define VMCOREINFO_NOTE_NAME_BYTES ALIGN(sizeof(VMCOREINFO_NOTE_NAME), 4) #define VMCOREINFO_NOTE_SIZE ((CRASH_CORE_NOTE_HEAD_BYTES * 2) + \ @@ -56,7 +56,7 @@ phys_addr_t paddr_vmcoreinfo_note(void); #define VMCOREINFO_CONFIG(name) \ vmcoreinfo_append_str("CONFIG_%s=y\n", #name) -extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4]; +extern u32 *vmcoreinfo_note; extern size_t vmcoreinfo_size; extern size_t vmcoreinfo_max_size; diff --git a/kernel/crash_core.c b/kernel/crash_core.c index fcbd568f1e95..2837d6164db8 100644 --- a/kernel/crash_core.c +++ b/kernel/crash_core.c @@ -14,10 +14,10 @@ #include /* vmcoreinfo stuff */ -static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES]; -u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4]; +static unsigned char *vmcoreinfo_data; size_t vmcoreinfo_size; -size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data); +size_t vmcoreinfo_max_size = VMCOREINFO_BYTES; +u32 *vmcoreinfo_note; /* * parsing the "crashkernel" commandline @@ -326,6 +326,9 @@ static void update_vmcoreinfo_note(void) void crash_save_vmcoreinfo(void) { + if (!vmcoreinfo_note) + return; + vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds()); update_vmcoreinfo_note(); } @@ -356,11 +359,26 @@ void __weak arch_crash_save_vmcoreinfo(void) phys_addr_t __weak paddr_vmcoreinfo_note(void) { - return __pa_symbol((unsigned long)(char *)&vmcoreinfo_note); + return __pa(vmcoreinfo_note); } static int __init crash_save_vmcoreinfo_init(void) { + vmcoreinfo_data = (unsigned char *)get_zeroed_page(GFP_KERNEL); + if (!vmcoreinfo_data) { + pr_warn("Memory allocation for vmcoreinfo_data failed\n"); + return -ENOMEM; + } + + vmcoreinfo_note = alloc_pages_exact(VMCOREINFO_NOTE_SIZE, + GFP_KERNEL | __GFP_ZERO); + if (!vmcoreinfo_note) { + free_page((unsigned long)vmcoreinfo_data); + vmcoreinfo_data = NULL; + pr_warn("Memory allocation for vmcoreinfo_note failed\n"); + return -ENOMEM; + } + VMCOREINFO_OSRELEASE(init_uts_ns.name.release); VMCOREINFO_PAGESIZE(PAGE_SIZE); diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c index df1a9aa602a0..46ba853656f6 100644 --- a/kernel/ksysfs.c +++ b/kernel/ksysfs.c @@ -134,7 +134,7 @@ static ssize_t vmcoreinfo_show(struct kobject *kobj, { phys_addr_t vmcore_base = paddr_vmcoreinfo_note(); return sprintf(buf, "%pa %x\n", &vmcore_base, - (unsigned int)sizeof(vmcoreinfo_note)); + (unsigned int)VMCOREINFO_NOTE_SIZE); } KERNEL_ATTR_RO(vmcoreinfo); -- cgit v1.2.3-59-g8ed1b From 5203f4995d9a87952a83c2ce7866adbbe8f97bb5 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Wed, 12 Jul 2017 14:33:17 -0700 Subject: powerpc/fadump: use the correct VMCOREINFO_NOTE_SIZE for phdr vmcoreinfo_max_size stands for the vmcoreinfo_data, the correct one we should use is vmcoreinfo_note whose total size is VMCOREINFO_NOTE_SIZE. Like explained in commit 77019967f06b ("kdump: fix exported size of vmcoreinfo note"), it should not affect the actual function, but we better fix it, also this change should be safe and backward compatible. After this, we can get rid of variable vmcoreinfo_max_size, let's use the corresponding macros directly, fewer variables means more safety for vmcoreinfo operation. [xlpang@redhat.com: fix build warning] Link: http://lkml.kernel.org/r/1494830606-27736-1-git-send-email-xlpang@redhat.com Link: http://lkml.kernel.org/r/1493281021-20737-2-git-send-email-xlpang@redhat.com Signed-off-by: Xunlei Pang Reviewed-by: Mahesh Salgaonkar Reviewed-by: Dave Young Cc: Hari Bathini Cc: Benjamin Herrenschmidt Cc: Eric Biederman Cc: Juergen Gross Cc: Michael Holzheu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/kernel/fadump.c | 3 +-- include/linux/crash_core.h | 1 - kernel/crash_core.c | 3 +-- 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 3079518f2245..dc0c49cfd90a 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -999,8 +999,7 @@ static int fadump_create_elfcore_headers(char *bufp) phdr->p_paddr = fadump_relocate(paddr_vmcoreinfo_note()); phdr->p_offset = phdr->p_paddr; - phdr->p_memsz = vmcoreinfo_max_size; - phdr->p_filesz = vmcoreinfo_max_size; + phdr->p_memsz = phdr->p_filesz = VMCOREINFO_NOTE_SIZE; /* Increment number of program headers. */ (elf->e_phnum)++; diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h index 87506a02e914..e5df1b3cf072 100644 --- a/include/linux/crash_core.h +++ b/include/linux/crash_core.h @@ -58,7 +58,6 @@ phys_addr_t paddr_vmcoreinfo_note(void); extern u32 *vmcoreinfo_note; extern size_t vmcoreinfo_size; -extern size_t vmcoreinfo_max_size; Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type, void *data, size_t data_len); diff --git a/kernel/crash_core.c b/kernel/crash_core.c index 2837d6164db8..315adbf9cb68 100644 --- a/kernel/crash_core.c +++ b/kernel/crash_core.c @@ -16,7 +16,6 @@ /* vmcoreinfo stuff */ static unsigned char *vmcoreinfo_data; size_t vmcoreinfo_size; -size_t vmcoreinfo_max_size = VMCOREINFO_BYTES; u32 *vmcoreinfo_note; /* @@ -343,7 +342,7 @@ void vmcoreinfo_append_str(const char *fmt, ...) r = vscnprintf(buf, sizeof(buf), fmt, args); va_end(args); - r = min(r, vmcoreinfo_max_size - vmcoreinfo_size); + r = min(r, (size_t)VMCOREINFO_BYTES - vmcoreinfo_size); memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r); -- cgit v1.2.3-59-g8ed1b From 1229384f5b856d83698c38f9dedfd836e26711cb Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Wed, 12 Jul 2017 14:33:21 -0700 Subject: kdump: protect vmcoreinfo data under the crash memory Currently vmcoreinfo data is updated at boot time subsys_initcall(), it has the risk of being modified by some wrong code during system is running. As a result, vmcore dumped may contain the wrong vmcoreinfo. Later on, when using "crash", "makedumpfile", etc utility to parse this vmcore, we probably will get "Segmentation fault" or other unexpected errors. E.g. 1) wrong code overwrites vmcoreinfo_data; 2) further crashes the system; 3) trigger kdump, then we obviously will fail to recognize the crash context correctly due to the corrupted vmcoreinfo. Now except for vmcoreinfo, all the crash data is well protected(including the cpu note which is fully updated in the crash path, thus its correctness is guaranteed). Given that vmcoreinfo data is a large chunk prepared for kdump, we better protect it as well. To solve this, we relocate and copy vmcoreinfo_data to the crash memory when kdump is loading via kexec syscalls. Because the whole crash memory will be protected by existing arch_kexec_protect_crashkres() mechanism, we naturally protect vmcoreinfo_data from write(even read) access under kernel direct mapping after kdump is loaded. Since kdump is usually loaded at the very early stage after boot, we can trust the correctness of the vmcoreinfo data copied. On the other hand, we still need to operate the vmcoreinfo safe copy when crash happens to generate vmcoreinfo_note again, we rely on vmap() to map out a new kernel virtual address and update to use this new one instead in the following crash_save_vmcoreinfo(). BTW, we do not touch vmcoreinfo_note, because it will be fully updated using the protected vmcoreinfo_data after crash which is surely correct just like the cpu crash note. Link: http://lkml.kernel.org/r/1493281021-20737-3-git-send-email-xlpang@redhat.com Signed-off-by: Xunlei Pang Tested-by: Michael Holzheu Cc: Benjamin Herrenschmidt Cc: Dave Young Cc: Eric Biederman Cc: Hari Bathini Cc: Juergen Gross Cc: Mahesh Salgaonkar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/crash_core.h | 2 +- include/linux/kexec.h | 2 ++ kernel/crash_core.c | 17 ++++++++++++++++- kernel/kexec.c | 8 ++++++++ kernel/kexec_core.c | 39 +++++++++++++++++++++++++++++++++++++++ kernel/kexec_file.c | 8 ++++++++ 6 files changed, 74 insertions(+), 2 deletions(-) diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h index e5df1b3cf072..2df2118fbe13 100644 --- a/include/linux/crash_core.h +++ b/include/linux/crash_core.h @@ -28,6 +28,7 @@ typedef u32 note_buf_t[CRASH_CORE_NOTE_BYTES/4]; +void crash_update_vmcoreinfo_safecopy(void *ptr); void crash_save_vmcoreinfo(void); void arch_crash_save_vmcoreinfo(void); __printf(1, 2) @@ -57,7 +58,6 @@ phys_addr_t paddr_vmcoreinfo_note(void); vmcoreinfo_append_str("CONFIG_%s=y\n", #name) extern u32 *vmcoreinfo_note; -extern size_t vmcoreinfo_size; Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type, void *data, size_t data_len); diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 65888418fb69..dd056fab9e35 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -172,6 +172,7 @@ struct kimage { unsigned long start; struct page *control_code_page; struct page *swap_page; + void *vmcoreinfo_data_copy; /* locates in the crash memory */ unsigned long nr_segments; struct kexec_segment segment[KEXEC_SEGMENT_MAX]; @@ -241,6 +242,7 @@ extern void crash_kexec(struct pt_regs *); int kexec_should_crash(struct task_struct *); int kexec_crash_loaded(void); void crash_save_cpu(struct pt_regs *regs, int cpu); +extern int kimage_crash_copy_vmcoreinfo(struct kimage *image); extern struct kimage *kexec_image; extern struct kimage *kexec_crash_image; diff --git a/kernel/crash_core.c b/kernel/crash_core.c index 315adbf9cb68..6db80fc0810b 100644 --- a/kernel/crash_core.c +++ b/kernel/crash_core.c @@ -15,9 +15,12 @@ /* vmcoreinfo stuff */ static unsigned char *vmcoreinfo_data; -size_t vmcoreinfo_size; +static size_t vmcoreinfo_size; u32 *vmcoreinfo_note; +/* trusted vmcoreinfo, e.g. we can make a copy in the crash memory */ +static unsigned char *vmcoreinfo_data_safecopy; + /* * parsing the "crashkernel" commandline * @@ -323,11 +326,23 @@ static void update_vmcoreinfo_note(void) final_note(buf); } +void crash_update_vmcoreinfo_safecopy(void *ptr) +{ + if (ptr) + memcpy(ptr, vmcoreinfo_data, vmcoreinfo_size); + + vmcoreinfo_data_safecopy = ptr; +} + void crash_save_vmcoreinfo(void) { if (!vmcoreinfo_note) return; + /* Use the safe copy to generate vmcoreinfo note if have */ + if (vmcoreinfo_data_safecopy) + vmcoreinfo_data = vmcoreinfo_data_safecopy; + vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds()); update_vmcoreinfo_note(); } diff --git a/kernel/kexec.c b/kernel/kexec.c index 980936a90ee6..e62ec4dc6620 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -144,6 +144,14 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments, if (ret) goto out; + /* + * Some architecture(like S390) may touch the crash memory before + * machine_kexec_prepare(), we must copy vmcoreinfo data after it. + */ + ret = kimage_crash_copy_vmcoreinfo(image); + if (ret) + goto out; + for (i = 0; i < nr_segments; i++) { ret = kimage_load_segment(image, &image->segment[i]); if (ret) diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 154ffb489b93..1ae7c41c33c1 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -482,6 +482,40 @@ struct page *kimage_alloc_control_pages(struct kimage *image, return pages; } +int kimage_crash_copy_vmcoreinfo(struct kimage *image) +{ + struct page *vmcoreinfo_page; + void *safecopy; + + if (image->type != KEXEC_TYPE_CRASH) + return 0; + + /* + * For kdump, allocate one vmcoreinfo safe copy from the + * crash memory. as we have arch_kexec_protect_crashkres() + * after kexec syscall, we naturally protect it from write + * (even read) access under kernel direct mapping. But on + * the other hand, we still need to operate it when crash + * happens to generate vmcoreinfo note, hereby we rely on + * vmap for this purpose. + */ + vmcoreinfo_page = kimage_alloc_control_pages(image, 0); + if (!vmcoreinfo_page) { + pr_warn("Could not allocate vmcoreinfo buffer\n"); + return -ENOMEM; + } + safecopy = vmap(&vmcoreinfo_page, 1, VM_MAP, PAGE_KERNEL); + if (!safecopy) { + pr_warn("Could not vmap vmcoreinfo buffer\n"); + return -ENOMEM; + } + + image->vmcoreinfo_data_copy = safecopy; + crash_update_vmcoreinfo_safecopy(safecopy); + + return 0; +} + static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) { if (*image->entry != 0) @@ -569,6 +603,11 @@ void kimage_free(struct kimage *image) if (!image) return; + if (image->vmcoreinfo_data_copy) { + crash_update_vmcoreinfo_safecopy(NULL); + vunmap(image->vmcoreinfo_data_copy); + } + kimage_free_extra_pages(image); for_each_kimage_entry(image, ptr, entry) { if (entry & IND_INDIRECTION) { diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index 766e7e4d3ad9..c8f7f77e9fa9 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c @@ -298,6 +298,14 @@ SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd, if (ret) goto out; + /* + * Some architecture(like S390) may touch the crash memory before + * machine_kexec_prepare(), we must copy vmcoreinfo data after it. + */ + ret = kimage_crash_copy_vmcoreinfo(image); + if (ret) + goto out; + ret = kexec_calculate_store_digests(image); if (ret) goto out; -- cgit v1.2.3-59-g8ed1b From a711bdc095d2c9b6ad15e737d1cdc46409b09538 Mon Sep 17 00:00:00 2001 From: Bharat Bhushan Date: Wed, 12 Jul 2017 14:33:24 -0700 Subject: kexec/kdump: minor Documentation updates for arm64 and Image Minor updates in Documentation for arm64 as relocatable kernel. Also this patch updates documentation for using uncompressed image "Image" which is used for ARM64. Link: http://lkml.kernel.org/r/1495104793-6563-1-git-send-email-Bharat.Bhushan@nxp.com Signed-off-by: Bharat Bhushan Cc: Dave Young Cc: Baoquan He Cc: Vivek Goyal Cc: Jonathan Corbet Cc: AKASHI Takahiro Cc: Pratyush Anand Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/kdump/kdump.txt | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/Documentation/kdump/kdump.txt b/Documentation/kdump/kdump.txt index 615434d81108..51814450a7f8 100644 --- a/Documentation/kdump/kdump.txt +++ b/Documentation/kdump/kdump.txt @@ -112,8 +112,8 @@ There are two possible methods of using Kdump. 2) Or use the system kernel binary itself as dump-capture kernel and there is no need to build a separate dump-capture kernel. This is possible only with the architectures which support a relocatable kernel. As - of today, i386, x86_64, ppc64, ia64 and arm architectures support relocatable - kernel. + of today, i386, x86_64, ppc64, ia64, arm and arm64 architectures support + relocatable kernel. Building a relocatable kernel is advantageous from the point of view that one does not have to build a second kernel for capturing the dump. But @@ -339,7 +339,7 @@ For arm: For arm64: - Use vmlinux or Image -If you are using a uncompressed vmlinux image then use following command +If you are using an uncompressed vmlinux image then use following command to load dump-capture kernel. kexec -p \ @@ -361,6 +361,12 @@ to load dump-capture kernel. --dtb= \ --append="root= " +If you are using an uncompressed Image, then use following command +to load dump-capture kernel. + + kexec -p \ + --initrd= \ + --append="root= " Please note, that --args-linux does not need to be specified for ia64. It is planned to make this a no-op on that architecture, but for now -- cgit v1.2.3-59-g8ed1b From 89c5b53b16bf577079d4f0311406dbea3c71202c Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Wed, 12 Jul 2017 14:33:27 -0700 Subject: sysctl: fix lax sysctl_check_table() sanity check Patch series "sysctl: few fixes", v5. I've been working on making kmod more deterministic, and as I did that I couldn't help but notice a few issues with sysctl. My end goal was just to fix unsigned int support, which back then was completely broken. Liping Zhang has sent up small atomic fixes, however it still missed yet one more fix and Alexey Dobriyan had also suggested to just drop array support given its complexity. I have inspected array support using Coccinelle and indeed its not that popular, so if in fact we can avoid it for new interfaces, I agree its best. I did develop a sysctl stress driver but will hold that off for another series. This patch (of 5): Commit 7c60c48f58a7 ("sysctl: Improve the sysctl sanity checks") improved sanity checks considerbly, however the enhancements on sysctl_check_table() meant adding a functional change so that only the last table entry's sanity error is propagated. It also changed the way errors were propagated so that each new check reset the err value, this means only last sanity check computed is used for an error. This has been in the kernel since v3.4 days. Fix this by carrying on errors from previous checks and iterations as we traverse the table and ensuring we keep any error from previous checks. We keep iterating on the table even if an error is found so we can complain for all errors found in one shot. This works as -EINVAL is always returned on error anyway, and the check for error is any non-zero value. Fixes: 7c60c48f58a7 ("sysctl: Improve the sysctl sanity checks") Link: http://lkml.kernel.org/r/20170519033554.18592-2-mcgrof@kernel.org Signed-off-by: Luis R. Rodriguez Cc: Al Viro Cc: "Eric W. Biederman" Cc: Alexey Dobriyan Cc: Kees Cook Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/proc_sysctl.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 67985a7233c2..32c9c5630507 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -1066,7 +1066,7 @@ static int sysctl_check_table(const char *path, struct ctl_table *table) int err = 0; for (; table->procname; table++) { if (table->child) - err = sysctl_err(path, table, "Not a file"); + err |= sysctl_err(path, table, "Not a file"); if ((table->proc_handler == proc_dostring) || (table->proc_handler == proc_dointvec) || @@ -1078,15 +1078,15 @@ static int sysctl_check_table(const char *path, struct ctl_table *table) (table->proc_handler == proc_doulongvec_minmax) || (table->proc_handler == proc_doulongvec_ms_jiffies_minmax)) { if (!table->data) - err = sysctl_err(path, table, "No data"); + err |= sysctl_err(path, table, "No data"); if (!table->maxlen) - err = sysctl_err(path, table, "No maxlen"); + err |= sysctl_err(path, table, "No maxlen"); } if (!table->proc_handler) - err = sysctl_err(path, table, "No proc_handler"); + err |= sysctl_err(path, table, "No proc_handler"); if ((table->mode & (S_IRUGO|S_IWUGO)) != table->mode) - err = sysctl_err(path, table, "bogus .mode 0%o", + err |= sysctl_err(path, table, "bogus .mode 0%o", table->mode); } return err; -- cgit v1.2.3-59-g8ed1b From a19ac3374995382a994653ff372b98ea7cbad548 Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Wed, 12 Jul 2017 14:33:30 -0700 Subject: sysctl: kdoc'ify sysctl_writes_strict Document the different sysctl_writes_strict modes in code. Link: http://lkml.kernel.org/r/20170519033554.18592-3-mcgrof@kernel.org Signed-off-by: Luis R. Rodriguez Cc: Al Viro Cc: "Eric W. Biederman" Cc: Alexey Dobriyan Cc: Kees Cook Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sysctl.c | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 4dfba1a76cc3..02725178694a 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -174,11 +174,32 @@ extern int no_unaligned_warning; #ifdef CONFIG_PROC_SYSCTL -#define SYSCTL_WRITES_LEGACY -1 -#define SYSCTL_WRITES_WARN 0 -#define SYSCTL_WRITES_STRICT 1 +/** + * enum sysctl_writes_mode - supported sysctl write modes + * + * @SYSCTL_WRITES_LEGACY: each write syscall must fully contain the sysctl value + * to be written, and multiple writes on the same sysctl file descriptor + * will rewrite the sysctl value, regardless of file position. No warning + * is issued when the initial position is not 0. + * @SYSCTL_WRITES_WARN: same as above but warn when the initial file position is + * not 0. + * @SYSCTL_WRITES_STRICT: writes to numeric sysctl entries must always be at + * file position 0 and the value must be fully contained in the buffer + * sent to the write syscall. If dealing with strings respect the file + * position, but restrict this to the max length of the buffer, anything + * passed the max lenght will be ignored. Multiple writes will append + * to the buffer. + * + * These write modes control how current file position affects the behavior of + * updating sysctl values through the proc interface on each write. + */ +enum sysctl_writes_mode { + SYSCTL_WRITES_LEGACY = -1, + SYSCTL_WRITES_WARN = 0, + SYSCTL_WRITES_STRICT = 1, +}; -static int sysctl_writes_strict = SYSCTL_WRITES_STRICT; +static enum sysctl_writes_mode sysctl_writes_strict = SYSCTL_WRITES_STRICT; static int proc_do_cad_pid(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); -- cgit v1.2.3-59-g8ed1b From d383d48470819e86fe30eb72f0e9494e1ee0e2af Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Wed, 12 Jul 2017 14:33:33 -0700 Subject: sysctl: fold sysctl_writes_strict checks into helper The mode sysctl_writes_strict positional checks keep being copy and pasted as we add new proc handlers. Just add a helper to avoid code duplication. Link: http://lkml.kernel.org/r/20170519033554.18592-4-mcgrof@kernel.org Signed-off-by: Luis R. Rodriguez Suggested-by: Kees Cook Cc: Al Viro Cc: "Eric W. Biederman" Cc: Alexey Dobriyan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sysctl.c | 56 ++++++++++++++++++++++++++++++++------------------------ 1 file changed, 32 insertions(+), 24 deletions(-) diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 02725178694a..6f3bb1f099fa 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1970,6 +1970,32 @@ static void warn_sysctl_write(struct ctl_table *table) current->comm, table->procname); } +/** + * proc_first_pos_non_zero_ignore - check if firs position is allowed + * @ppos: file position + * @table: the sysctl table + * + * Returns true if the first position is non-zero and the sysctl_writes_strict + * mode indicates this is not allowed for numeric input types. String proc + * hadlers can ignore the return value. + */ +static bool proc_first_pos_non_zero_ignore(loff_t *ppos, + struct ctl_table *table) +{ + if (!*ppos) + return false; + + switch (sysctl_writes_strict) { + case SYSCTL_WRITES_STRICT: + return true; + case SYSCTL_WRITES_WARN: + warn_sysctl_write(table); + return false; + default: + return false; + } +} + /** * proc_dostring - read a string sysctl * @table: the sysctl table @@ -1990,8 +2016,8 @@ static void warn_sysctl_write(struct ctl_table *table) int proc_dostring(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { - if (write && *ppos && sysctl_writes_strict == SYSCTL_WRITES_WARN) - warn_sysctl_write(table); + if (write) + proc_first_pos_non_zero_ignore(ppos, table); return _proc_do_string((char *)(table->data), table->maxlen, write, (char __user *)buffer, lenp, ppos); @@ -2193,17 +2219,8 @@ static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table, conv = do_proc_dointvec_conv; if (write) { - if (*ppos) { - switch (sysctl_writes_strict) { - case SYSCTL_WRITES_STRICT: - goto out; - case SYSCTL_WRITES_WARN: - warn_sysctl_write(table); - break; - default: - break; - } - } + if (proc_first_pos_non_zero_ignore(ppos, table)) + goto out; if (left > PAGE_SIZE - 1) left = PAGE_SIZE - 1; @@ -2468,17 +2485,8 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int left = *lenp; if (write) { - if (*ppos) { - switch (sysctl_writes_strict) { - case SYSCTL_WRITES_STRICT: - goto out; - case SYSCTL_WRITES_WARN: - warn_sysctl_write(table); - break; - default: - break; - } - } + if (proc_first_pos_non_zero_ignore(ppos, table)) + goto out; if (left > PAGE_SIZE - 1) left = PAGE_SIZE - 1; -- cgit v1.2.3-59-g8ed1b From 4f2fec00afa60aa8e5d1b7f2a8e0526900f55623 Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Wed, 12 Jul 2017 14:33:36 -0700 Subject: sysctl: simplify unsigned int support Commit e7d316a02f68 ("sysctl: handle error writing UINT_MAX to u32 fields") added proc_douintvec() to start help adding support for unsigned int, this however was only half the work needed. Two fixes have come in since then for the following issues: o Printing the values shows a negative value, this happens since do_proc_dointvec() and this uses proc_put_long() This was fixed by commit 5380e5644afbba9 ("sysctl: don't print negative flag for proc_douintvec"). o We can easily wrap around the int values: UINT_MAX is 4294967295, if we echo in 4294967295 + 1 we end up with 0, using 4294967295 + 2 we end up with 1. o We echo negative values in and they are accepted This was fixed by commit 425fffd886ba ("sysctl: report EINVAL if value is larger than UINT_MAX for proc_douintvec"). It still also failed to be added to sysctl_check_table()... instead of adding it with the current implementation just provide a proper and simplified unsigned int support without any array unsigned int support with no negative support at all. Historically sysctl proc helpers have supported arrays, due to the complexity this adds though we've taken a step back to evaluate array users to determine if its worth upkeeping for unsigned int. An evaluation using Coccinelle has been done to perform a grammatical search to ask ourselves: o How many sysctl proc_dointvec() (int) users exist which likely should be moved over to proc_douintvec() (unsigned int) ? Answer: about 8 - Of these how many are array users ? Answer: Probably only 1 o How many sysctl array users exist ? Answer: about 12 This last question gives us an idea just how popular arrays: they are not. Array support should probably just be kept for strings. The identified uint ports are: drivers/infiniband/core/ucma.c - max_backlog drivers/infiniband/core/iwcm.c - default_backlog net/core/sysctl_net_core.c - rps_sock_flow_sysctl() net/netfilter/nf_conntrack_timestamp.c - nf_conntrack_timestamp -- bool net/netfilter/nf_conntrack_acct.c nf_conntrack_acct -- bool net/netfilter/nf_conntrack_ecache.c - nf_conntrack_events -- bool net/netfilter/nf_conntrack_helper.c - nf_conntrack_helper -- bool net/phonet/sysctl.c proc_local_port_range() The only possible array users is proc_local_port_range() but it does not seem worth it to add array support just for this given the range support works just as well. Unsigned int support should be desirable more for when you *need* more than INT_MAX or using int min/max support then does not suffice for your ranges. If you forget and by mistake happen to register an unsigned int proc entry with an array, the driver will fail and you will get something as follows: sysctl table check failed: debug/test_sysctl//uint_0002 array now allowed CPU: 2 PID: 1342 Comm: modprobe Tainted: G W E Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Call Trace: dump_stack+0x63/0x81 __register_sysctl_table+0x350/0x650 ? kmem_cache_alloc_trace+0x107/0x240 __register_sysctl_paths+0x1b3/0x1e0 ? 0xffffffffc005f000 register_sysctl_table+0x1f/0x30 test_sysctl_init+0x10/0x1000 [test_sysctl] do_one_initcall+0x52/0x1a0 ? kmem_cache_alloc_trace+0x107/0x240 do_init_module+0x5f/0x200 load_module+0x1867/0x1bd0 ? __symbol_put+0x60/0x60 SYSC_finit_module+0xdf/0x110 SyS_finit_module+0xe/0x10 entry_SYSCALL_64_fastpath+0x1e/0xad RIP: 0033:0x7f042b22d119 Fixes: e7d316a02f68 ("sysctl: handle error writing UINT_MAX to u32 fields") Link: http://lkml.kernel.org/r/20170519033554.18592-5-mcgrof@kernel.org Signed-off-by: Luis R. Rodriguez Suggested-by: Alexey Dobriyan Cc: Subash Abhinov Kasiviswanathan Cc: Liping Zhang Cc: Alexey Dobriyan Cc: Heinrich Schuchardt Cc: Kees Cook Cc: "David S. Miller" Cc: Ingo Molnar Cc: Al Viro Cc: "Eric W. Biederman" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/proc_sysctl.c | 14 +++++ kernel/sysctl.c | 153 +++++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 160 insertions(+), 7 deletions(-) diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 32c9c5630507..ee6feba8b6c0 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -1061,6 +1061,18 @@ static int sysctl_err(const char *path, struct ctl_table *table, char *fmt, ...) return -EINVAL; } +static int sysctl_check_table_array(const char *path, struct ctl_table *table) +{ + int err = 0; + + if (table->proc_handler == proc_douintvec) { + if (table->maxlen != sizeof(unsigned int)) + err |= sysctl_err(path, table, "array now allowed"); + } + + return err; +} + static int sysctl_check_table(const char *path, struct ctl_table *table) { int err = 0; @@ -1081,6 +1093,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table) err |= sysctl_err(path, table, "No data"); if (!table->maxlen) err |= sysctl_err(path, table, "No maxlen"); + else + err |= sysctl_check_table_array(path, table); } if (!table->proc_handler) err |= sysctl_err(path, table, "No proc_handler"); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 6f3bb1f099fa..d12078fc215f 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2175,19 +2175,18 @@ static int do_proc_dointvec_conv(bool *negp, unsigned long *lvalp, return 0; } -static int do_proc_douintvec_conv(bool *negp, unsigned long *lvalp, - int *valp, - int write, void *data) +static int do_proc_douintvec_conv(unsigned long *lvalp, + unsigned int *valp, + int write, void *data) { if (write) { - if (*negp) + if (*lvalp > UINT_MAX) return -EINVAL; if (*lvalp > UINT_MAX) return -EINVAL; *valp = *lvalp; } else { unsigned int val = *valp; - *negp = false; *lvalp = (unsigned long)val; } return 0; @@ -2287,6 +2286,146 @@ static int do_proc_dointvec(struct ctl_table *table, int write, buffer, lenp, ppos, conv, data); } +static int do_proc_douintvec_w(unsigned int *tbl_data, + struct ctl_table *table, + void __user *buffer, + size_t *lenp, loff_t *ppos, + int (*conv)(unsigned long *lvalp, + unsigned int *valp, + int write, void *data), + void *data) +{ + unsigned long lval; + int err = 0; + size_t left; + bool neg; + char *kbuf = NULL, *p; + + left = *lenp; + + if (proc_first_pos_non_zero_ignore(ppos, table)) + goto bail_early; + + if (left > PAGE_SIZE - 1) + left = PAGE_SIZE - 1; + + p = kbuf = memdup_user_nul(buffer, left); + if (IS_ERR(kbuf)) + return -EINVAL; + + left -= proc_skip_spaces(&p); + if (!left) { + err = -EINVAL; + goto out_free; + } + + err = proc_get_long(&p, &left, &lval, &neg, + proc_wspace_sep, + sizeof(proc_wspace_sep), NULL); + if (err || neg) { + err = -EINVAL; + goto out_free; + } + + if (conv(&lval, tbl_data, 1, data)) { + err = -EINVAL; + goto out_free; + } + + if (!err && left) + left -= proc_skip_spaces(&p); + +out_free: + kfree(kbuf); + if (err) + return -EINVAL; + + return 0; + + /* This is in keeping with old __do_proc_dointvec() */ +bail_early: + *ppos += *lenp; + return err; +} + +static int do_proc_douintvec_r(unsigned int *tbl_data, void __user *buffer, + size_t *lenp, loff_t *ppos, + int (*conv)(unsigned long *lvalp, + unsigned int *valp, + int write, void *data), + void *data) +{ + unsigned long lval; + int err = 0; + size_t left; + + left = *lenp; + + if (conv(&lval, tbl_data, 0, data)) { + err = -EINVAL; + goto out; + } + + err = proc_put_long(&buffer, &left, lval, false); + if (err || !left) + goto out; + + err = proc_put_char(&buffer, &left, '\n'); + +out: + *lenp -= left; + *ppos += *lenp; + + return err; +} + +static int __do_proc_douintvec(void *tbl_data, struct ctl_table *table, + int write, void __user *buffer, + size_t *lenp, loff_t *ppos, + int (*conv)(unsigned long *lvalp, + unsigned int *valp, + int write, void *data), + void *data) +{ + unsigned int *i, vleft; + + if (!tbl_data || !table->maxlen || !*lenp || (*ppos && !write)) { + *lenp = 0; + return 0; + } + + i = (unsigned int *) tbl_data; + vleft = table->maxlen / sizeof(*i); + + /* + * Arrays are not supported, keep this simple. *Do not* add + * support for them. + */ + if (vleft != 1) { + *lenp = 0; + return -EINVAL; + } + + if (!conv) + conv = do_proc_douintvec_conv; + + if (write) + return do_proc_douintvec_w(i, table, buffer, lenp, ppos, + conv, data); + return do_proc_douintvec_r(i, buffer, lenp, ppos, conv, data); +} + +static int do_proc_douintvec(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos, + int (*conv)(unsigned long *lvalp, + unsigned int *valp, + int write, void *data), + void *data) +{ + return __do_proc_douintvec(table->data, table, write, + buffer, lenp, ppos, conv, data); +} + /** * proc_dointvec - read a vector of integers * @table: the sysctl table @@ -2322,8 +2461,8 @@ int proc_dointvec(struct ctl_table *table, int write, int proc_douintvec(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { - return do_proc_dointvec(table, write, buffer, lenp, ppos, - do_proc_douintvec_conv, NULL); + return do_proc_douintvec(table, write, buffer, lenp, ppos, + do_proc_douintvec_conv, NULL); } /* -- cgit v1.2.3-59-g8ed1b From 61d9b56a89208d8cccd0b4cfec7e6959717e16e3 Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Wed, 12 Jul 2017 14:33:40 -0700 Subject: sysctl: add unsigned int range support To keep parity with regular int interfaces provide the an unsigned int proc_douintvec_minmax() which allows you to specify a range of allowed valid numbers. Adding proc_douintvec_minmax_sysadmin() is easy but we can wait for an actual user for that. Link: http://lkml.kernel.org/r/20170519033554.18592-6-mcgrof@kernel.org Signed-off-by: Luis R. Rodriguez Acked-by: Kees Cook Cc: Subash Abhinov Kasiviswanathan Cc: Heinrich Schuchardt Cc: Kees Cook Cc: "David S. Miller" Cc: Ingo Molnar Cc: Al Viro Cc: "Eric W. Biederman" Cc: Alexey Dobriyan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/proc_sysctl.c | 4 ++- include/linux/sysctl.h | 3 +++ kernel/sysctl.c | 66 ++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 72 insertions(+), 1 deletion(-) diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index ee6feba8b6c0..8f9d564d0969 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -1065,7 +1065,8 @@ static int sysctl_check_table_array(const char *path, struct ctl_table *table) { int err = 0; - if (table->proc_handler == proc_douintvec) { + if ((table->proc_handler == proc_douintvec) || + (table->proc_handler == proc_douintvec_minmax)) { if (table->maxlen != sizeof(unsigned int)) err |= sysctl_err(path, table, "array now allowed"); } @@ -1083,6 +1084,7 @@ static int sysctl_check_table(const char *path, struct ctl_table *table) if ((table->proc_handler == proc_dostring) || (table->proc_handler == proc_dointvec) || (table->proc_handler == proc_douintvec) || + (table->proc_handler == proc_douintvec_minmax) || (table->proc_handler == proc_dointvec_minmax) || (table->proc_handler == proc_dointvec_jiffies) || (table->proc_handler == proc_dointvec_userhz_jiffies) || diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 80d07816def0..225001d437ae 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -47,6 +47,9 @@ extern int proc_douintvec(struct ctl_table *, int, void __user *, size_t *, loff_t *); extern int proc_dointvec_minmax(struct ctl_table *, int, void __user *, size_t *, loff_t *); +extern int proc_douintvec_minmax(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); extern int proc_dointvec_jiffies(struct ctl_table *, int, void __user *, size_t *, loff_t *); extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int, diff --git a/kernel/sysctl.c b/kernel/sysctl.c index d12078fc215f..df9f2a367882 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2567,6 +2567,65 @@ int proc_dointvec_minmax(struct ctl_table *table, int write, do_proc_dointvec_minmax_conv, ¶m); } +struct do_proc_douintvec_minmax_conv_param { + unsigned int *min; + unsigned int *max; +}; + +static int do_proc_douintvec_minmax_conv(unsigned long *lvalp, + unsigned int *valp, + int write, void *data) +{ + struct do_proc_douintvec_minmax_conv_param *param = data; + + if (write) { + unsigned int val = *lvalp; + + if ((param->min && *param->min > val) || + (param->max && *param->max < val)) + return -ERANGE; + + if (*lvalp > UINT_MAX) + return -EINVAL; + *valp = val; + } else { + unsigned int val = *valp; + *lvalp = (unsigned long) val; + } + + return 0; +} + +/** + * proc_douintvec_minmax - read a vector of unsigned ints with min/max values + * @table: the sysctl table + * @write: %TRUE if this is a write to the sysctl file + * @buffer: the user buffer + * @lenp: the size of the user buffer + * @ppos: file position + * + * Reads/writes up to table->maxlen/sizeof(unsigned int) unsigned integer + * values from/to the user buffer, treated as an ASCII string. Negative + * strings are not allowed. + * + * This routine will ensure the values are within the range specified by + * table->extra1 (min) and table->extra2 (max). There is a final sanity + * check for UINT_MAX to avoid having to support wrap around uses from + * userspace. + * + * Returns 0 on success. + */ +int proc_douintvec_minmax(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct do_proc_douintvec_minmax_conv_param param = { + .min = (unsigned int *) table->extra1, + .max = (unsigned int *) table->extra2, + }; + return do_proc_douintvec(table, write, buffer, lenp, ppos, + do_proc_douintvec_minmax_conv, ¶m); +} + static void validate_coredump_safety(void) { #ifdef CONFIG_COREDUMP @@ -3066,6 +3125,12 @@ int proc_dointvec_minmax(struct ctl_table *table, int write, return -ENOSYS; } +int proc_douintvec_minmax(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + return -ENOSYS; +} + int proc_dointvec_jiffies(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { @@ -3108,6 +3173,7 @@ EXPORT_SYMBOL(proc_dointvec); EXPORT_SYMBOL(proc_douintvec); EXPORT_SYMBOL(proc_dointvec_jiffies); EXPORT_SYMBOL(proc_dointvec_minmax); +EXPORT_SYMBOL_GPL(proc_douintvec_minmax); EXPORT_SYMBOL(proc_dointvec_userhz_jiffies); EXPORT_SYMBOL(proc_dointvec_ms_jiffies); EXPORT_SYMBOL(proc_dostring); -- cgit v1.2.3-59-g8ed1b From 9308f2f9e7f055cf3934645ec622bb5259dc1c14 Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Wed, 12 Jul 2017 14:33:43 -0700 Subject: test_sysctl: add dedicated proc sysctl test driver The existing tools/testing/selftests/sysctl/ tests include two test cases, but these use existing production kernel sysctl interfaces. We want to expand test coverage but we can't just be looking for random safe production values to poke at, that's just insane! Instead just dedicate a test driver for debugging purposes and port the existing scripts to use it. This will make it easier for further tests to be added. Subsequent patches will extend our test coverage for sysctl. The stress test driver uses a new license (GPL on Linux, copyleft-next outside of Linux). Linus was fine with this [0] and later due to Ted's and Alans's request ironed out an "or" language clause to use [1] which is already present upstream. [0] https://lkml.kernel.org/r/CA+55aFyhxcvD+q7tp+-yrSFDKfR0mOHgyEAe=f_94aKLsOu0Og@mail.gmail.com [1] https://lkml.kernel.org/r/1495234558.7848.122.camel@linux.intel.com Link: http://lkml.kernel.org/r/20170630224431.17374-2-mcgrof@kernel.org Signed-off-by: Luis R. Rodriguez Acked-by: Kees Cook Cc: "Eric W. Biederman" Cc: Shuah Khan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/Kconfig.debug | 11 +++ lib/Makefile | 1 + lib/test_sysctl.c | 113 ++++++++++++++++++++++++ tools/testing/selftests/sysctl/config | 1 + tools/testing/selftests/sysctl/run_numerictests | 4 +- tools/testing/selftests/sysctl/run_stringtests | 4 +- 6 files changed, 130 insertions(+), 4 deletions(-) create mode 100644 lib/test_sysctl.c create mode 100644 tools/testing/selftests/sysctl/config diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index e20fc079bebd..f28f4252e54a 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1785,6 +1785,17 @@ config TEST_FIRMWARE If unsure, say N. +config TEST_SYSCTL + tristate "sysctl test driver" + default n + depends on PROC_SYSCTL + help + This builds the "test_sysctl" module. This driver enables to test the + proc sysctl interfaces available to drivers safely without affecting + production knobs which might alter system functionality. + + If unsure, say N. + config TEST_UDELAY tristate "udelay test driver" default n diff --git a/lib/Makefile b/lib/Makefile index 5a008329324e..85e91e51a9fe 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -46,6 +46,7 @@ obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o obj-y += kstrtox.o obj-$(CONFIG_TEST_BPF) += test_bpf.o obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o +obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o obj-$(CONFIG_TEST_KASAN) += test_kasan.o obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c new file mode 100644 index 000000000000..b2163bfb6eb2 --- /dev/null +++ b/lib/test_sysctl.c @@ -0,0 +1,113 @@ +/* + * proc sysctl test driver + * + * Copyright (C) 2017 Luis R. Rodriguez + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or at your option any + * later version; or, when distributed separately from the Linux kernel or + * when incorporated into other software packages, subject to the following + * license: + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of copyleft-next (version 0.3.1 or later) as published + * at http://copyleft-next.org/. + */ + +/* + * This module provides an interface to the the proc sysctl interfaces. This + * driver requires CONFIG_PROC_SYSCTL. It will not normally be loaded by the + * system unless explicitly requested by name. You can also build this driver + * into your kernel. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int i_zero; +static int i_one_hundred = 100; + +struct test_sysctl_data { + int int_0001; + char string_0001[65]; +}; + +static struct test_sysctl_data test_data = { + .int_0001 = 60, + .string_0001 = "(none)", +}; + +/* These are all under /proc/sys/debug/test_sysctl/ */ +static struct ctl_table test_table[] = { + { + .procname = "int_0001", + .data = &test_data.int_0001, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &i_zero, + .extra2 = &i_one_hundred, + }, + { + .procname = "string_0001", + .data = &test_data.string_0001, + .maxlen = sizeof(test_data.string_0001), + .mode = 0644, + .proc_handler = proc_dostring, + }, + { } +}; + +static struct ctl_table test_sysctl_table[] = { + { + .procname = "test_sysctl", + .maxlen = 0, + .mode = 0555, + .child = test_table, + }, + { } +}; + +static struct ctl_table test_sysctl_root_table[] = { + { + .procname = "debug", + .maxlen = 0, + .mode = 0555, + .child = test_sysctl_table, + }, + { } +}; + +static struct ctl_table_header *test_sysctl_header; + +static int __init test_sysctl_init(void) +{ + test_sysctl_header = register_sysctl_table(test_sysctl_root_table); + if (!test_sysctl_header) + return -ENOMEM; + return 0; +} +late_initcall(test_sysctl_init); + +static void __exit test_sysctl_exit(void) +{ + if (test_sysctl_header) + unregister_sysctl_table(test_sysctl_header); +} + +module_exit(test_sysctl_exit); + +MODULE_AUTHOR("Luis R. Rodriguez "); +MODULE_LICENSE("GPL"); diff --git a/tools/testing/selftests/sysctl/config b/tools/testing/selftests/sysctl/config new file mode 100644 index 000000000000..6ca14800d755 --- /dev/null +++ b/tools/testing/selftests/sysctl/config @@ -0,0 +1 @@ +CONFIG_TEST_SYSCTL=y diff --git a/tools/testing/selftests/sysctl/run_numerictests b/tools/testing/selftests/sysctl/run_numerictests index e6e76c93d948..c375ce0f4c15 100755 --- a/tools/testing/selftests/sysctl/run_numerictests +++ b/tools/testing/selftests/sysctl/run_numerictests @@ -1,7 +1,7 @@ #!/bin/sh -SYSCTL="/proc/sys" -TARGET="${SYSCTL}/vm/swappiness" +SYSCTL="/proc/sys/debug/test_sysctl/" +TARGET="${SYSCTL}/int_0001" ORIG=$(cat "${TARGET}") TEST_STR=$(( $ORIG + 1 )) diff --git a/tools/testing/selftests/sysctl/run_stringtests b/tools/testing/selftests/sysctl/run_stringtests index 857ec667fb02..a6f2618afeaa 100755 --- a/tools/testing/selftests/sysctl/run_stringtests +++ b/tools/testing/selftests/sysctl/run_stringtests @@ -1,7 +1,7 @@ #!/bin/sh -SYSCTL="/proc/sys" -TARGET="${SYSCTL}/kernel/domainname" +SYSCTL="/proc/sys/debug/test_sysctl/" +TARGET="${SYSCTL}/string_0001" ORIG=$(cat "${TARGET}") TEST_STR="Testing sysctl" -- cgit v1.2.3-59-g8ed1b From 64b671204afd71591e774e7237b7c862ac5bbd97 Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Wed, 12 Jul 2017 14:33:46 -0700 Subject: test_sysctl: add generic script to expand on tests This adds a generic script to let us more easily add more tests cases. Since we really have only two types of tests cases just fold them into the one file. Each test unit is now identified into its separate function: # ./sysctl.sh -l Test ID list: TEST_ID x NUM_TEST TEST_ID: Test ID NUM_TESTS: Number of recommended times to run the test 0001 x 1 - tests proc_dointvec_minmax() 0002 x 1 - tests proc_dostring() For now we start off with what we had before, and run only each test once. We can now watch a test case until it fails: ./sysctl.sh -w 0002 We can also run a test case x number of times, say we want to run a test case 100 times: ./sysctl.sh -c 0001 100 To run a test case only once, for example: ./sysctl.sh -s 0002 The default settings are specified at the top of sysctl.sh. Link: http://lkml.kernel.org/r/20170630224431.17374-3-mcgrof@kernel.org Signed-off-by: Luis R. Rodriguez Cc: Kees Cook Cc: "Eric W. Biederman" Cc: Shuah Khan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- tools/testing/selftests/sysctl/Makefile | 3 +- tools/testing/selftests/sysctl/common_tests | 131 ------- tools/testing/selftests/sysctl/run_numerictests | 10 - tools/testing/selftests/sysctl/run_stringtests | 77 ---- tools/testing/selftests/sysctl/sysctl.sh | 494 ++++++++++++++++++++++++ 5 files changed, 495 insertions(+), 220 deletions(-) delete mode 100644 tools/testing/selftests/sysctl/common_tests delete mode 100755 tools/testing/selftests/sysctl/run_numerictests delete mode 100755 tools/testing/selftests/sysctl/run_stringtests create mode 100644 tools/testing/selftests/sysctl/sysctl.sh diff --git a/tools/testing/selftests/sysctl/Makefile b/tools/testing/selftests/sysctl/Makefile index b3c33e071f10..95c320b354e8 100644 --- a/tools/testing/selftests/sysctl/Makefile +++ b/tools/testing/selftests/sysctl/Makefile @@ -4,8 +4,7 @@ # No binaries, but make sure arg-less "make" doesn't trigger "run_tests". all: -TEST_PROGS := run_numerictests run_stringtests -TEST_FILES := common_tests +TEST_PROGS := sysctl.sh include ../lib.mk diff --git a/tools/testing/selftests/sysctl/common_tests b/tools/testing/selftests/sysctl/common_tests deleted file mode 100644 index b6862322962f..000000000000 --- a/tools/testing/selftests/sysctl/common_tests +++ /dev/null @@ -1,131 +0,0 @@ -#!/bin/sh - -TEST_FILE=$(mktemp) - -echo "== Testing sysctl behavior against ${TARGET} ==" - -set_orig() -{ - echo "${ORIG}" > "${TARGET}" -} - -set_test() -{ - echo "${TEST_STR}" > "${TARGET}" -} - -verify() -{ - local seen - seen=$(cat "$1") - if [ "${seen}" != "${TEST_STR}" ]; then - return 1 - fi - return 0 -} - -exit_test() -{ - if [ ! -z ${old_strict} ]; then - echo ${old_strict} > ${WRITES_STRICT} - fi - exit $rc -} - -trap 'set_orig; rm -f "${TEST_FILE}"' EXIT - -rc=0 - -echo -n "Writing test file ... " -echo "${TEST_STR}" > "${TEST_FILE}" -if ! verify "${TEST_FILE}"; then - echo "FAIL" >&2 - exit 1 -else - echo "ok" -fi - -echo -n "Checking sysctl is not set to test value ... " -if verify "${TARGET}"; then - echo "FAIL" >&2 - exit 1 -else - echo "ok" -fi - -echo -n "Writing sysctl from shell ... " -set_test -if ! verify "${TARGET}"; then - echo "FAIL" >&2 - exit 1 -else - echo "ok" -fi - -echo -n "Resetting sysctl to original value ... " -set_orig -if verify "${TARGET}"; then - echo "FAIL" >&2 - exit 1 -else - echo "ok" -fi - -echo -n "Checking write strict setting ... " -WRITES_STRICT="${SYSCTL}/kernel/sysctl_writes_strict" -if [ ! -e ${WRITES_STRICT} ]; then - echo "FAIL, but skip in case of old kernel" >&2 -else - old_strict=$(cat ${WRITES_STRICT}) - if [ "$old_strict" = "1" ]; then - echo "ok" - else - echo "FAIL, strict value is 0 but force to 1 to continue" >&2 - echo "1" > ${WRITES_STRICT} - fi -fi - -# Now that we've validated the sanity of "set_test" and "set_orig", -# we can use those functions to set starting states before running -# specific behavioral tests. - -echo -n "Writing entire sysctl in single write ... " -set_orig -dd if="${TEST_FILE}" of="${TARGET}" bs=4096 2>/dev/null -if ! verify "${TARGET}"; then - echo "FAIL" >&2 - rc=1 -else - echo "ok" -fi - -echo -n "Writing middle of sysctl after synchronized seek ... " -set_test -dd if="${TEST_FILE}" of="${TARGET}" bs=1 seek=1 skip=1 2>/dev/null -if ! verify "${TARGET}"; then - echo "FAIL" >&2 - rc=1 -else - echo "ok" -fi - -echo -n "Writing beyond end of sysctl ... " -set_orig -dd if="${TEST_FILE}" of="${TARGET}" bs=20 seek=2 2>/dev/null -if verify "${TARGET}"; then - echo "FAIL" >&2 - rc=1 -else - echo "ok" -fi - -echo -n "Writing sysctl with multiple long writes ... " -set_orig -(perl -e 'print "A" x 50;'; echo "${TEST_STR}") | \ - dd of="${TARGET}" bs=50 2>/dev/null -if verify "${TARGET}"; then - echo "FAIL" >&2 - rc=1 -else - echo "ok" -fi diff --git a/tools/testing/selftests/sysctl/run_numerictests b/tools/testing/selftests/sysctl/run_numerictests deleted file mode 100755 index c375ce0f4c15..000000000000 --- a/tools/testing/selftests/sysctl/run_numerictests +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/sh - -SYSCTL="/proc/sys/debug/test_sysctl/" -TARGET="${SYSCTL}/int_0001" -ORIG=$(cat "${TARGET}") -TEST_STR=$(( $ORIG + 1 )) - -. ./common_tests - -exit_test diff --git a/tools/testing/selftests/sysctl/run_stringtests b/tools/testing/selftests/sysctl/run_stringtests deleted file mode 100755 index a6f2618afeaa..000000000000 --- a/tools/testing/selftests/sysctl/run_stringtests +++ /dev/null @@ -1,77 +0,0 @@ -#!/bin/sh - -SYSCTL="/proc/sys/debug/test_sysctl/" -TARGET="${SYSCTL}/string_0001" -ORIG=$(cat "${TARGET}") -TEST_STR="Testing sysctl" - -. ./common_tests - -# Only string sysctls support seeking/appending. -MAXLEN=65 - -echo -n "Writing entire sysctl in short writes ... " -set_orig -dd if="${TEST_FILE}" of="${TARGET}" bs=1 2>/dev/null -if ! verify "${TARGET}"; then - echo "FAIL" >&2 - rc=1 -else - echo "ok" -fi - -echo -n "Writing middle of sysctl after unsynchronized seek ... " -set_test -dd if="${TEST_FILE}" of="${TARGET}" bs=1 seek=1 2>/dev/null -if verify "${TARGET}"; then - echo "FAIL" >&2 - rc=1 -else - echo "ok" -fi - -echo -n "Checking sysctl maxlen is at least $MAXLEN ... " -set_orig -perl -e 'print "A" x ('"${MAXLEN}"'-2), "B";' | \ - dd of="${TARGET}" bs="${MAXLEN}" 2>/dev/null -if ! grep -q B "${TARGET}"; then - echo "FAIL" >&2 - rc=1 -else - echo "ok" -fi - -echo -n "Checking sysctl keeps original string on overflow append ... " -set_orig -perl -e 'print "A" x ('"${MAXLEN}"'-1), "B";' | \ - dd of="${TARGET}" bs=$(( MAXLEN - 1 )) 2>/dev/null -if grep -q B "${TARGET}"; then - echo "FAIL" >&2 - rc=1 -else - echo "ok" -fi - -echo -n "Checking sysctl stays NULL terminated on write ... " -set_orig -perl -e 'print "A" x ('"${MAXLEN}"'-1), "B";' | \ - dd of="${TARGET}" bs="${MAXLEN}" 2>/dev/null -if grep -q B "${TARGET}"; then - echo "FAIL" >&2 - rc=1 -else - echo "ok" -fi - -echo -n "Checking sysctl stays NULL terminated on overwrite ... " -set_orig -perl -e 'print "A" x ('"${MAXLEN}"'-1), "BB";' | \ - dd of="${TARGET}" bs=$(( $MAXLEN + 1 )) 2>/dev/null -if grep -q B "${TARGET}"; then - echo "FAIL" >&2 - rc=1 -else - echo "ok" -fi - -exit_test diff --git a/tools/testing/selftests/sysctl/sysctl.sh b/tools/testing/selftests/sysctl/sysctl.sh new file mode 100644 index 000000000000..cbe1345d7c1d --- /dev/null +++ b/tools/testing/selftests/sysctl/sysctl.sh @@ -0,0 +1,494 @@ +#!/bin/bash +# Copyright (C) 2017 Luis R. Rodriguez +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or at your option any +# later version; or, when distributed separately from the Linux kernel or +# when incorporated into other software packages, subject to the following +# license: +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of copyleft-next (version 0.3.1 or later) as published +# at http://copyleft-next.org/. + +# This performs a series tests against the proc sysctl interface. + +TEST_NAME="sysctl" +TEST_DRIVER="test_${TEST_NAME}" +TEST_DIR=$(dirname $0) +TEST_FILE=$(mktemp) + +# This represents +# +# TEST_ID:TEST_COUNT:ENABLED +# +# TEST_ID: is the test id number +# TEST_COUNT: number of times we should run the test +# ENABLED: 1 if enabled, 0 otherwise +# +# Once these are enabled please leave them as-is. Write your own test, +# we have tons of space. +ALL_TESTS="0001:1:1" +ALL_TESTS="$ALL_TESTS 0002:1:1" + +test_modprobe() +{ + if [ ! -d $DIR ]; then + echo "$0: $DIR not present" >&2 + echo "You must have the following enabled in your kernel:" >&2 + cat $TEST_DIR/config >&2 + exit 1 + fi +} + +function allow_user_defaults() +{ + if [ -z $DIR ]; then + DIR="/sys/module/test_sysctl/" + fi + if [ -z $DEFAULT_NUM_TESTS ]; then + DEFAULT_NUM_TESTS=50 + fi + if [ -z $SYSCTL ]; then + SYSCTL="/proc/sys/debug/test_sysctl" + fi + if [ -z $PROD_SYSCTL ]; then + PROD_SYSCTL="/proc/sys" + fi + if [ -z $WRITES_STRICT ]; then + WRITES_STRICT="${PROD_SYSCTL}/kernel/sysctl_writes_strict" + fi +} + +function check_production_sysctl_writes_strict() +{ + echo -n "Checking production write strict setting ... " + if [ ! -e ${WRITES_STRICT} ]; then + echo "FAIL, but skip in case of old kernel" >&2 + else + old_strict=$(cat ${WRITES_STRICT}) + if [ "$old_strict" = "1" ]; then + echo "ok" + else + echo "FAIL, strict value is 0 but force to 1 to continue" >&2 + echo "1" > ${WRITES_STRICT} + fi + fi +} + +test_reqs() +{ + uid=$(id -u) + if [ $uid -ne 0 ]; then + echo $msg must be run as root >&2 + exit 0 + fi + + if ! which perl 2> /dev/null > /dev/null; then + echo "$0: You need perl installed" + exit 1 + fi +} + +function load_req_mod() +{ + trap "test_modprobe" EXIT + + if [ ! -d $DIR ]; then + modprobe $TEST_DRIVER + if [ $? -ne 0 ]; then + exit + fi + fi +} + +set_orig() +{ + if [ ! -z $TARGET ]; then + echo "${ORIG}" > "${TARGET}" + fi +} + +set_test() +{ + echo "${TEST_STR}" > "${TARGET}" +} + +verify() +{ + local seen + seen=$(cat "$1") + if [ "${seen}" != "${TEST_STR}" ]; then + return 1 + fi + return 0 +} + +test_rc() +{ + if [[ $rc != 0 ]]; then + echo "Failed test, return value: $rc" >&2 + exit $rc + fi +} + +test_finish() +{ + set_orig + rm -f "${TEST_FILE}" + + if [ ! -z ${old_strict} ]; then + echo ${old_strict} > ${WRITES_STRICT} + fi + exit $rc +} + +run_numerictests() +{ + echo "== Testing sysctl behavior against ${TARGET} ==" + + rc=0 + + echo -n "Writing test file ... " + echo "${TEST_STR}" > "${TEST_FILE}" + if ! verify "${TEST_FILE}"; then + echo "FAIL" >&2 + exit 1 + else + echo "ok" + fi + + echo -n "Checking sysctl is not set to test value ... " + if verify "${TARGET}"; then + echo "FAIL" >&2 + exit 1 + else + echo "ok" + fi + + echo -n "Writing sysctl from shell ... " + set_test + if ! verify "${TARGET}"; then + echo "FAIL" >&2 + exit 1 + else + echo "ok" + fi + + echo -n "Resetting sysctl to original value ... " + set_orig + if verify "${TARGET}"; then + echo "FAIL" >&2 + exit 1 + else + echo "ok" + fi + + # Now that we've validated the sanity of "set_test" and "set_orig", + # we can use those functions to set starting states before running + # specific behavioral tests. + + echo -n "Writing entire sysctl in single write ... " + set_orig + dd if="${TEST_FILE}" of="${TARGET}" bs=4096 2>/dev/null + if ! verify "${TARGET}"; then + echo "FAIL" >&2 + rc=1 + else + echo "ok" + fi + + echo -n "Writing middle of sysctl after synchronized seek ... " + set_test + dd if="${TEST_FILE}" of="${TARGET}" bs=1 seek=1 skip=1 2>/dev/null + if ! verify "${TARGET}"; then + echo "FAIL" >&2 + rc=1 + else + echo "ok" + fi + + echo -n "Writing beyond end of sysctl ... " + set_orig + dd if="${TEST_FILE}" of="${TARGET}" bs=20 seek=2 2>/dev/null + if verify "${TARGET}"; then + echo "FAIL" >&2 + rc=1 + else + echo "ok" + fi + + echo -n "Writing sysctl with multiple long writes ... " + set_orig + (perl -e 'print "A" x 50;'; echo "${TEST_STR}") | \ + dd of="${TARGET}" bs=50 2>/dev/null + if verify "${TARGET}"; then + echo "FAIL" >&2 + rc=1 + else + echo "ok" + fi + + test_rc +} + +run_stringtests() +{ + echo -n "Writing entire sysctl in short writes ... " + set_orig + dd if="${TEST_FILE}" of="${TARGET}" bs=1 2>/dev/null + if ! verify "${TARGET}"; then + echo "FAIL" >&2 + rc=1 + else + echo "ok" + fi + + echo -n "Writing middle of sysctl after unsynchronized seek ... " + set_test + dd if="${TEST_FILE}" of="${TARGET}" bs=1 seek=1 2>/dev/null + if verify "${TARGET}"; then + echo "FAIL" >&2 + rc=1 + else + echo "ok" + fi + + echo -n "Checking sysctl maxlen is at least $MAXLEN ... " + set_orig + perl -e 'print "A" x ('"${MAXLEN}"'-2), "B";' | \ + dd of="${TARGET}" bs="${MAXLEN}" 2>/dev/null + if ! grep -q B "${TARGET}"; then + echo "FAIL" >&2 + rc=1 + else + echo "ok" + fi + + echo -n "Checking sysctl keeps original string on overflow append ... " + set_orig + perl -e 'print "A" x ('"${MAXLEN}"'-1), "B";' | \ + dd of="${TARGET}" bs=$(( MAXLEN - 1 )) 2>/dev/null + if grep -q B "${TARGET}"; then + echo "FAIL" >&2 + rc=1 + else + echo "ok" + fi + + echo -n "Checking sysctl stays NULL terminated on write ... " + set_orig + perl -e 'print "A" x ('"${MAXLEN}"'-1), "B";' | \ + dd of="${TARGET}" bs="${MAXLEN}" 2>/dev/null + if grep -q B "${TARGET}"; then + echo "FAIL" >&2 + rc=1 + else + echo "ok" + fi + + echo -n "Checking sysctl stays NULL terminated on overwrite ... " + set_orig + perl -e 'print "A" x ('"${MAXLEN}"'-1), "BB";' | \ + dd of="${TARGET}" bs=$(( $MAXLEN + 1 )) 2>/dev/null + if grep -q B "${TARGET}"; then + echo "FAIL" >&2 + rc=1 + else + echo "ok" + fi + + test_rc +} + +sysctl_test_0001() +{ + TARGET="${SYSCTL}/int_0001" + ORIG=$(cat "${TARGET}") + TEST_STR=$(( $ORIG + 1 )) + + run_numerictests +} + +sysctl_test_0002() +{ + TARGET="${SYSCTL}/string_0001" + ORIG=$(cat "${TARGET}") + TEST_STR="Testing sysctl" + # Only string sysctls support seeking/appending. + MAXLEN=65 + + run_numerictests + run_stringtests +} + +list_tests() +{ + echo "Test ID list:" + echo + echo "TEST_ID x NUM_TEST" + echo "TEST_ID: Test ID" + echo "NUM_TESTS: Number of recommended times to run the test" + echo + echo "0001 x $(get_test_count 0001) - tests proc_dointvec_minmax()" + echo "0002 x $(get_test_count 0002) - tests proc_dostring()" +} + +test_reqs + +usage() +{ + NUM_TESTS=$(grep -o ' ' <<<"$ALL_TESTS" | grep -c .) + let NUM_TESTS=$NUM_TESTS+1 + MAX_TEST=$(printf "%04d\n" $NUM_TESTS) + echo "Usage: $0 [ -t <4-number-digit> ] | [ -w <4-number-digit> ] |" + echo " [ -s <4-number-digit> ] | [ -c <4-number-digit> " + echo " [ all ] [ -h | --help ] [ -l ]" + echo "" + echo "Valid tests: 0001-$MAX_TEST" + echo "" + echo " all Runs all tests (default)" + echo " -t Run test ID the number amount of times is recommended" + echo " -w Watch test ID run until it runs into an error" + echo " -c Run test ID once" + echo " -s Run test ID x test-count number of times" + echo " -l List all test ID list" + echo " -h|--help Help" + echo + echo "If an error every occurs execution will immediately terminate." + echo "If you are adding a new test try using -w first to" + echo "make sure the test passes a series of tests." + echo + echo Example uses: + echo + echo "$TEST_NAME.sh -- executes all tests" + echo "$TEST_NAME.sh -t 0002 -- Executes test ID 0002 number of times is recomended" + echo "$TEST_NAME.sh -w 0002 -- Watch test ID 0002 run until an error occurs" + echo "$TEST_NAME.sh -s 0002 -- Run test ID 0002 once" + echo "$TEST_NAME.sh -c 0002 3 -- Run test ID 0002 three times" + echo + list_tests + exit 1 +} + +function test_num() +{ + re='^[0-9]+$' + if ! [[ $1 =~ $re ]]; then + usage + fi +} + +function get_test_count() +{ + test_num $1 + TEST_DATA=$(echo $ALL_TESTS | awk '{print $'$1'}') + LAST_TWO=${TEST_DATA#*:*} + echo ${LAST_TWO%:*} +} + +function get_test_enabled() +{ + test_num $1 + TEST_DATA=$(echo $ALL_TESTS | awk '{print $'$1'}') + echo ${TEST_DATA#*:*:} +} + +function run_all_tests() +{ + for i in $ALL_TESTS ; do + TEST_ID=${i%:*:*} + ENABLED=$(get_test_enabled $TEST_ID) + TEST_COUNT=$(get_test_count $TEST_ID) + if [[ $ENABLED -eq "1" ]]; then + test_case $TEST_ID $TEST_COUNT + fi + done +} + +function watch_log() +{ + if [ $# -ne 3 ]; then + clear + fi + date + echo "Running test: $2 - run #$1" +} + +function watch_case() +{ + i=0 + while [ 1 ]; do + + if [ $# -eq 1 ]; then + test_num $1 + watch_log $i ${TEST_NAME}_test_$1 + ${TEST_NAME}_test_$1 + else + watch_log $i all + run_all_tests + fi + let i=$i+1 + done +} + +function test_case() +{ + NUM_TESTS=$DEFAULT_NUM_TESTS + if [ $# -eq 2 ]; then + NUM_TESTS=$2 + fi + + i=0 + while [ $i -lt $NUM_TESTS ]; do + test_num $1 + watch_log $i ${TEST_NAME}_test_$1 noclear + RUN_TEST=${TEST_NAME}_test_$1 + $RUN_TEST + let i=$i+1 + done +} + +function parse_args() +{ + if [ $# -eq 0 ]; then + run_all_tests + else + if [[ "$1" = "all" ]]; then + run_all_tests + elif [[ "$1" = "-w" ]]; then + shift + watch_case $@ + elif [[ "$1" = "-t" ]]; then + shift + test_num $1 + test_case $1 $(get_test_count $1) + elif [[ "$1" = "-c" ]]; then + shift + test_num $1 + test_num $2 + test_case $1 $2 + elif [[ "$1" = "-s" ]]; then + shift + test_case $1 1 + elif [[ "$1" = "-l" ]]; then + list_tests + elif [[ "$1" = "-h" || "$1" = "--help" ]]; then + usage + else + usage + fi + fi +} + +test_reqs +allow_user_defaults +check_production_sysctl_writes_strict +load_req_mod + +trap "test_finish" EXIT + +parse_args $@ + +exit 0 -- cgit v1.2.3-59-g8ed1b From 1c0357c846452add7c2c863ec372010e3d2ca943 Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Wed, 12 Jul 2017 14:33:49 -0700 Subject: test_sysctl: test against PAGE_SIZE for int Add the following tests to ensure we do not regress: o Test using a buffer full of space (PAGE_SIZE-1) followed by a single digit works o Test using a buffer full of spaces (PAGE_SIZE or over) will fail As tests increase instead of unloading the module and reloading it we can just do a shell reset_vals() with a reset to values we know are set at init on the driver. Link: http://lkml.kernel.org/r/20170630224431.17374-4-mcgrof@kernel.org Signed-off-by: Luis R. Rodriguez Cc: Kees Cook Cc: "Eric W. Biederman" Cc: Shuah Khan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- tools/testing/selftests/sysctl/sysctl.sh | 66 ++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/tools/testing/selftests/sysctl/sysctl.sh b/tools/testing/selftests/sysctl/sysctl.sh index cbe1345d7c1d..6ec807576f7c 100644 --- a/tools/testing/selftests/sysctl/sysctl.sh +++ b/tools/testing/selftests/sysctl/sysctl.sh @@ -75,6 +75,13 @@ function check_production_sysctl_writes_strict() echo "1" > ${WRITES_STRICT} fi fi + + if [ -z $PAGE_SIZE ]; then + PAGE_SIZE=$(getconf PAGESIZE) + fi + if [ -z $MAX_DIGITS ]; then + MAX_DIGITS=$(($PAGE_SIZE/8)) + fi } test_reqs() @@ -89,6 +96,10 @@ test_reqs() echo "$0: You need perl installed" exit 1 fi + if ! which getconf 2> /dev/null > /dev/null; then + echo "$0: You need getconf installed" + exit 1 + fi } function load_req_mod() @@ -103,6 +114,23 @@ function load_req_mod() fi } +reset_vals() +{ + VAL="" + TRIGGER=$(basename ${TARGET}) + case "$TRIGGER" in + int_0001) + VAL="60" + ;; + string_0001) + VAL="(none)" + ;; + *) + ;; + esac + echo -n $VAL > $TARGET +} + set_orig() { if [ ! -z $TARGET ]; then @@ -229,7 +257,42 @@ run_numerictests() else echo "ok" fi + test_rc +} + +# Your test must accept digits 3 and 4 to use this +run_limit_digit() +{ + echo -n "Checking ignoring spaces up to PAGE_SIZE works on write ..." + reset_vals + LIMIT=$((MAX_DIGITS -1)) + TEST_STR="3" + (perl -e 'print " " x '$LIMIT';'; echo "${TEST_STR}") | \ + dd of="${TARGET}" 2>/dev/null + + if ! verify "${TARGET}"; then + echo "FAIL" >&2 + rc=1 + else + echo "ok" + fi + test_rc + + echo -n "Checking passing PAGE_SIZE of spaces fails on write ..." + reset_vals + + LIMIT=$((MAX_DIGITS)) + TEST_STR="4" + (perl -e 'print " " x '$LIMIT';'; echo "${TEST_STR}") | \ + dd of="${TARGET}" 2>/dev/null + + if verify "${TARGET}"; then + echo "FAIL" >&2 + rc=1 + else + echo "ok" + fi test_rc } @@ -305,15 +368,18 @@ run_stringtests() sysctl_test_0001() { TARGET="${SYSCTL}/int_0001" + reset_vals ORIG=$(cat "${TARGET}") TEST_STR=$(( $ORIG + 1 )) run_numerictests + run_limit_digit } sysctl_test_0002() { TARGET="${SYSCTL}/string_0001" + reset_vals ORIG=$(cat "${TARGET}") TEST_STR="Testing sysctl" # Only string sysctls support seeking/appending. -- cgit v1.2.3-59-g8ed1b From eb965eda1cabf26e62afc07a06cdf2fd5aaa2906 Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Wed, 12 Jul 2017 14:33:52 -0700 Subject: test_sysctl: add simple proc_dointvec() case Test against a simple proc_dointvec() case. While at it, add a test against INT_MAX. Make sure INT_MAX works, and INT_MAX+1 will fail. Also test negative values work. Link: http://lkml.kernel.org/r/20170630224431.17374-5-mcgrof@kernel.org Signed-off-by: Luis R. Rodriguez Cc: Kees Cook Cc: "Eric W. Biederman" Cc: Shuah Khan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/test_sysctl.c | 11 ++++++ tools/testing/selftests/sysctl/sysctl.sh | 62 ++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+) diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c index b2163bfb6eb2..1472e1ae4931 100644 --- a/lib/test_sysctl.c +++ b/lib/test_sysctl.c @@ -41,11 +41,15 @@ static int i_one_hundred = 100; struct test_sysctl_data { int int_0001; + int int_0002; + char string_0001[65]; }; static struct test_sysctl_data test_data = { .int_0001 = 60, + .int_0002 = 1, + .string_0001 = "(none)", }; @@ -60,6 +64,13 @@ static struct ctl_table test_table[] = { .extra1 = &i_zero, .extra2 = &i_one_hundred, }, + { + .procname = "int_0002", + .data = &test_data.int_0002, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, { .procname = "string_0001", .data = &test_data.string_0001, diff --git a/tools/testing/selftests/sysctl/sysctl.sh b/tools/testing/selftests/sysctl/sysctl.sh index 6ec807576f7c..7ba3fa2bbd54 100644 --- a/tools/testing/selftests/sysctl/sysctl.sh +++ b/tools/testing/selftests/sysctl/sysctl.sh @@ -31,6 +31,7 @@ TEST_FILE=$(mktemp) # we have tons of space. ALL_TESTS="0001:1:1" ALL_TESTS="$ALL_TESTS 0002:1:1" +ALL_TESTS="$ALL_TESTS 0003:1:1" test_modprobe() { @@ -82,6 +83,9 @@ function check_production_sysctl_writes_strict() if [ -z $MAX_DIGITS ]; then MAX_DIGITS=$(($PAGE_SIZE/8)) fi + if [ -z $INT_MAX ]; then + INT_MAX=$(getconf INT_MAX) + fi } test_reqs() @@ -122,6 +126,9 @@ reset_vals() int_0001) VAL="60" ;; + int_0002) + VAL="1" + ;; string_0001) VAL="(none)" ;; @@ -296,6 +303,48 @@ run_limit_digit() test_rc } +# You are using an int +run_limit_digit_int() +{ + echo -n "Testing INT_MAX works ..." + reset_vals + TEST_STR="$INT_MAX" + echo -n $TEST_STR > $TARGET + + if ! verify "${TARGET}"; then + echo "FAIL" >&2 + rc=1 + else + echo "ok" + fi + test_rc + + echo -n "Testing INT_MAX + 1 will fail as expected..." + reset_vals + let TEST_STR=$INT_MAX+1 + echo -n $TEST_STR > $TARGET 2> /dev/null + + if verify "${TARGET}"; then + echo "FAIL" >&2 + rc=1 + else + echo "ok" + fi + test_rc + + echo -n "Testing negative values will work as expected..." + reset_vals + TEST_STR="-3" + echo -n $TEST_STR > $TARGET 2> /dev/null + if ! verify "${TARGET}"; then + echo "FAIL" >&2 + rc=1 + else + echo "ok" + fi + test_rc +} + run_stringtests() { echo -n "Writing entire sysctl in short writes ... " @@ -389,6 +438,18 @@ sysctl_test_0002() run_stringtests } +sysctl_test_0003() +{ + TARGET="${SYSCTL}/int_0002" + reset_vals + ORIG=$(cat "${TARGET}") + TEST_STR=$(( $ORIG + 1 )) + + run_numerictests + run_limit_digit + run_limit_digit_int +} + list_tests() { echo "Test ID list:" @@ -399,6 +460,7 @@ list_tests() echo echo "0001 x $(get_test_count 0001) - tests proc_dointvec_minmax()" echo "0002 x $(get_test_count 0002) - tests proc_dostring()" + echo "0003 x $(get_test_count 0003) - tests proc_dointvec()" } test_reqs -- cgit v1.2.3-59-g8ed1b From 2920fad3a5d394b66011c7f35c7b05278354055e Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Wed, 12 Jul 2017 14:33:55 -0700 Subject: test_sysctl: add simple proc_douintvec() case Test against a simple proc_douintvec() case. While at it, add a test against UINT_MAX. Make sure UINT_MAX works, and UINT_MAX+1 will fail and that negative values are not accepted. Link: http://lkml.kernel.org/r/20170630224431.17374-6-mcgrof@kernel.org Signed-off-by: Luis R. Rodriguez Cc: Kees Cook Cc: "Eric W. Biederman" Cc: Shuah Khan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/test_sysctl.c | 11 ++++++ tools/testing/selftests/sysctl/sysctl.sh | 63 ++++++++++++++++++++++++++++++++ 2 files changed, 74 insertions(+) diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c index 1472e1ae4931..53db3513ab08 100644 --- a/lib/test_sysctl.c +++ b/lib/test_sysctl.c @@ -43,6 +43,8 @@ struct test_sysctl_data { int int_0001; int int_0002; + unsigned int uint_0001; + char string_0001[65]; }; @@ -50,6 +52,8 @@ static struct test_sysctl_data test_data = { .int_0001 = 60, .int_0002 = 1, + .uint_0001 = 314, + .string_0001 = "(none)", }; @@ -71,6 +75,13 @@ static struct ctl_table test_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "uint_0001", + .data = &test_data.uint_0001, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_douintvec, + }, { .procname = "string_0001", .data = &test_data.string_0001, diff --git a/tools/testing/selftests/sysctl/sysctl.sh b/tools/testing/selftests/sysctl/sysctl.sh index 7ba3fa2bbd54..abeef675a884 100644 --- a/tools/testing/selftests/sysctl/sysctl.sh +++ b/tools/testing/selftests/sysctl/sysctl.sh @@ -32,6 +32,7 @@ TEST_FILE=$(mktemp) ALL_TESTS="0001:1:1" ALL_TESTS="$ALL_TESTS 0002:1:1" ALL_TESTS="$ALL_TESTS 0003:1:1" +ALL_TESTS="$ALL_TESTS 0004:1:1" test_modprobe() { @@ -86,6 +87,9 @@ function check_production_sysctl_writes_strict() if [ -z $INT_MAX ]; then INT_MAX=$(getconf INT_MAX) fi + if [ -z $UINT_MAX ]; then + UINT_MAX=$(getconf UINT_MAX) + fi } test_reqs() @@ -129,6 +133,9 @@ reset_vals() int_0002) VAL="1" ;; + uint_0001) + VAL="314" + ;; string_0001) VAL="(none)" ;; @@ -345,6 +352,49 @@ run_limit_digit_int() test_rc } +# You are using an unsigned int +run_limit_digit_uint() +{ + echo -n "Testing UINT_MAX works ..." + reset_vals + TEST_STR="$UINT_MAX" + echo -n $TEST_STR > $TARGET + + if ! verify "${TARGET}"; then + echo "FAIL" >&2 + rc=1 + else + echo "ok" + fi + test_rc + + echo -n "Testing UINT_MAX + 1 will fail as expected..." + reset_vals + TEST_STR=$(($UINT_MAX+1)) + echo -n $TEST_STR > $TARGET 2> /dev/null + + if verify "${TARGET}"; then + echo "FAIL" >&2 + rc=1 + else + echo "ok" + fi + test_rc + + echo -n "Testing negative values will not work as expected ..." + reset_vals + TEST_STR="-3" + echo -n $TEST_STR > $TARGET 2> /dev/null + + if verify "${TARGET}"; then + echo "FAIL" >&2 + rc=1 + else + echo "ok" + fi + test_rc +} + run_stringtests() { echo -n "Writing entire sysctl in short writes ... " @@ -450,6 +500,18 @@ sysctl_test_0003() run_limit_digit_int } +sysctl_test_0004() +{ + TARGET="${SYSCTL}/uint_0001" + reset_vals + ORIG=$(cat "${TARGET}") + TEST_STR=$(( $ORIG + 1 )) + + run_numerictests + run_limit_digit + run_limit_digit_uint +} + list_tests() { echo "Test ID list:" @@ -461,6 +523,7 @@ list_tests() echo "0001 x $(get_test_count 0001) - tests proc_dointvec_minmax()" echo "0002 x $(get_test_count 0002) - tests proc_dostring()" echo "0003 x $(get_test_count 0003) - tests proc_dointvec()" + echo "0004 x $(get_test_count 0004) - tests proc_douintvec()" } test_reqs -- cgit v1.2.3-59-g8ed1b From 7c43a657a4beadeb6d2fe1a00732261e313a807f Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Wed, 12 Jul 2017 14:33:58 -0700 Subject: test_sysctl: test against int proc_dointvec() array support Add a few initial respective tests for an array: o Echoing values separated by spaces works o Echoing only first elements will set first elements o Confirm PAGE_SIZE limit still applies even if an array is used Link: http://lkml.kernel.org/r/20170630224431.17374-7-mcgrof@kernel.org Signed-off-by: Luis R. Rodriguez Cc: Kees Cook Cc: "Eric W. Biederman" Cc: Shuah Khan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/test_sysctl.c | 13 +++++ tools/testing/selftests/sysctl/sysctl.sh | 89 ++++++++++++++++++++++++++++++++ 2 files changed, 102 insertions(+) diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c index 53db3513ab08..3dd801c1c85b 100644 --- a/lib/test_sysctl.c +++ b/lib/test_sysctl.c @@ -42,6 +42,7 @@ static int i_one_hundred = 100; struct test_sysctl_data { int int_0001; int int_0002; + int int_0003[4]; unsigned int uint_0001; @@ -52,6 +53,11 @@ static struct test_sysctl_data test_data = { .int_0001 = 60, .int_0002 = 1, + .int_0003[0] = 0, + .int_0003[1] = 1, + .int_0003[2] = 2, + .int_0003[3] = 3, + .uint_0001 = 314, .string_0001 = "(none)", @@ -75,6 +81,13 @@ static struct ctl_table test_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "int_0003", + .data = &test_data.int_0003, + .maxlen = sizeof(test_data.int_0003), + .mode = 0644, + .proc_handler = proc_dointvec, + }, { .procname = "uint_0001", .data = &test_data.uint_0001, diff --git a/tools/testing/selftests/sysctl/sysctl.sh b/tools/testing/selftests/sysctl/sysctl.sh index abeef675a884..ec232c3cfcaa 100644 --- a/tools/testing/selftests/sysctl/sysctl.sh +++ b/tools/testing/selftests/sysctl/sysctl.sh @@ -33,6 +33,7 @@ ALL_TESTS="0001:1:1" ALL_TESTS="$ALL_TESTS 0002:1:1" ALL_TESTS="$ALL_TESTS 0003:1:1" ALL_TESTS="$ALL_TESTS 0004:1:1" +ALL_TESTS="$ALL_TESTS 0005:3:1" test_modprobe() { @@ -108,6 +109,10 @@ test_reqs() echo "$0: You need getconf installed" exit 1 fi + if ! which diff 2> /dev/null > /dev/null; then + echo "$0: You need diff installed" + exit 1 + fi } function load_req_mod() @@ -167,6 +172,12 @@ verify() return 0 } +verify_diff_w() +{ + echo "$TEST_STR" | diff -q -w -u - $1 + return $? +} + test_rc() { if [[ $rc != 0 ]]; then @@ -352,6 +363,74 @@ run_limit_digit_int() test_rc } +# You used an int array +run_limit_digit_int_array() +{ + echo -n "Testing array works as expected ... " + TEST_STR="4 3 2 1" + echo -n $TEST_STR > $TARGET + + if ! verify_diff_w "${TARGET}"; then + echo "FAIL" >&2 + rc=1 + else + echo "ok" + fi + test_rc + + echo -n "Testing skipping trailing array elements works ... " + # Do not reset_vals, carry on the values from the last test. + # If we only echo in two digits the last two are left intact + TEST_STR="100 101" + echo -n $TEST_STR > $TARGET + # After we echo in, to help diff we need to set on TEST_STR what + # we expect the result to be. + TEST_STR="100 101 2 1" + + if ! verify_diff_w "${TARGET}"; then + echo "FAIL" >&2 + rc=1 + else + echo "ok" + fi + test_rc + + echo -n "Testing PAGE_SIZE limit on array works ... " + # Do not reset_vals, carry on the values from the last test. + # Even if you use an int array, you are still restricted to + # MAX_DIGITS, this is a known limitation. Test limit works. + LIMIT=$((MAX_DIGITS -1)) + TEST_STR="9" + (perl -e 'print " " x '$LIMIT';'; echo "${TEST_STR}") | \ + dd of="${TARGET}" 2>/dev/null + + TEST_STR="9 101 2 1" + if ! verify_diff_w "${TARGET}"; then + echo "FAIL" >&2 + rc=1 + else + echo "ok" + fi + test_rc + + echo -n "Testing exceeding PAGE_SIZE limit fails as expected ... " + # Do not reset_vals, carry on the values from the last test. + # Now go over limit. + LIMIT=$((MAX_DIGITS)) + TEST_STR="7" + (perl -e 'print " " x '$LIMIT';'; echo "${TEST_STR}") | \ + dd of="${TARGET}" 2>/dev/null + + TEST_STR="7 101 2 1" + if verify_diff_w "${TARGET}"; then + echo "FAIL" >&2 + rc=1 + else + echo "ok" + fi + test_rc +} + # You are using an unsigned int run_limit_digit_uint() { @@ -512,6 +591,15 @@ sysctl_test_0004() run_limit_digit_uint } +sysctl_test_0005() +{ + TARGET="${SYSCTL}/int_0003" + reset_vals + ORIG=$(cat "${TARGET}") + + run_limit_digit_int_array +} + list_tests() { echo "Test ID list:" @@ -524,6 +612,7 @@ list_tests() echo "0002 x $(get_test_count 0002) - tests proc_dostring()" echo "0003 x $(get_test_count 0003) - tests proc_dointvec()" echo "0004 x $(get_test_count 0004) - tests proc_douintvec()" + echo "0005 x $(get_test_count 0005) - tests proc_douintvec() array" } test_reqs -- cgit v1.2.3-59-g8ed1b From 9380fa60b10ebd6ee7c3fcdb2cf162f4d7cf9fc5 Mon Sep 17 00:00:00 2001 From: Mateusz Jurczyk Date: Wed, 12 Jul 2017 14:34:01 -0700 Subject: kernel/sysctl_binary.c: check name array length in deprecated_sysctl_warning() Prevent use of uninitialized memory (originating from the stack frame of do_sysctl()) by verifying that the name array is filled with sufficient input data before comparing its specific entries with integer constants. Through timing measurement or analyzing the kernel debug logs, a user-mode program could potentially infer the results of comparisons against the uninitialized memory, and acquire some (very limited) information about the state of the kernel stack. The change also eliminates possible future warnings by tools such as KMSAN and other code checkers / instrumentations. Link: http://lkml.kernel.org/r/20170524122139.21333-1-mjurczyk@google.com Signed-off-by: Mateusz Jurczyk Acked-by: Kees Cook Cc: "David S. Miller" Cc: Matthew Whitehead Cc: "Eric W. Biederman" Cc: Tetsuo Handa Cc: Alexander Potapenko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sysctl_binary.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c index 939a158eab11..02e1859f2ca8 100644 --- a/kernel/sysctl_binary.c +++ b/kernel/sysctl_binary.c @@ -1346,7 +1346,7 @@ static void deprecated_sysctl_warning(const int *name, int nlen) * CTL_KERN/KERN_VERSION is used by older glibc and cannot * ever go away. */ - if (name[0] == CTL_KERN && name[1] == KERN_VERSION) + if (nlen >= 2 && name[0] == CTL_KERN && name[1] == KERN_VERSION) return; if (printk_ratelimit()) { -- cgit v1.2.3-59-g8ed1b From ee7998c50c2697737c6530431709f77c852bf0d6 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 12 Jul 2017 14:34:04 -0700 Subject: random: do not ignore early device randomness The add_device_randomness() function would ignore incoming bytes if the crng wasn't ready. This additionally makes sure to make an early enough call to add_latent_entropy() to influence the initial stack canary, which is especially important on non-x86 systems where it stays the same through the life of the boot. Link: http://lkml.kernel.org/r/20170626233038.GA48751@beast Signed-off-by: Kees Cook Cc: "Theodore Ts'o" Cc: Arnd Bergmann Cc: Greg Kroah-Hartman Cc: Ingo Molnar Cc: Jessica Yu Cc: Steven Rostedt (VMware) Cc: Viresh Kumar Cc: Tejun Heo Cc: Prarit Bhargava Cc: Lokesh Vutla Cc: Nicholas Piggin Cc: AKASHI Takahiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/char/random.c | 5 +++++ init/main.c | 1 + 2 files changed, 6 insertions(+) diff --git a/drivers/char/random.c b/drivers/char/random.c index 01a260f67437..23cab7a8c1c1 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -987,6 +987,11 @@ void add_device_randomness(const void *buf, unsigned int size) unsigned long time = random_get_entropy() ^ jiffies; unsigned long flags; + if (!crng_ready()) { + crng_fast_load(buf, size); + return; + } + trace_add_device_randomness(size, _RET_IP_); spin_lock_irqsave(&input_pool.lock, flags); _mix_pool_bytes(&input_pool, buf, size); diff --git a/init/main.c b/init/main.c index df58a416dd1d..052481fbe363 100644 --- a/init/main.c +++ b/init/main.c @@ -518,6 +518,7 @@ asmlinkage __visible void __init start_kernel(void) /* * Set up the initial canary ASAP: */ + add_latent_entropy(); boot_init_stack_canary(); cgroup_init_early(); -- cgit v1.2.3-59-g8ed1b From 5f9f48f5b385457426b4b8f3f4a604019a8e7350 Mon Sep 17 00:00:00 2001 From: Rakesh Pandit Date: Wed, 12 Jul 2017 14:34:07 -0700 Subject: bfs: fix sanity checks for empty files Mount fails if file system image has empty files because of sanity check while reading superblock. For empty files disk offset to end of file (i_eoffset) is cpu_to_le32(-1). Sanity check comparison, which compares disk offset with file system size isn't valid for this value and hence is ignored with this patch. Steps to reproduce: $ dd if=/dev/zero of=bfs-image count=204800 $ mkfs.bfs bfs-image $ mkdir bfs-mount-point $ sudo mount -t bfs -o loop bfs-image bfs-mount-point/ $ cd bfs-mount-point/ $ sudo touch a $ cd .. $ sudo umount bfs-mount-point/ $ sudo mount -t bfs -o loop bfs-image bfs-mount-point/ mount: /dev/loop0: can't read superblock $ dmesg [25526.689580] BFS-fs: bfs_fill_super(): Inode 0x00000003 corrupted Tigran said: "If you had created the filesystem with the proper mkfs under SCO UnixWare 7 you (probably) wouldn't encounter this issue. But since commercial Unix-es are now part of history and the only proper way is the Linux mkfs.bfs utility, your patch is fine" Link: http://lkml.kernel.org/r/20170505201625.GA3097@hercules.tuxera.com Signed-off-by: Rakesh Pandit Acked-by: Tigran Aivazian Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/bfs/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c index 25e312cb6071..9a69392f1fb3 100644 --- a/fs/bfs/inode.c +++ b/fs/bfs/inode.c @@ -419,7 +419,7 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) if (i_sblock > info->si_blocks || i_eblock > info->si_blocks || i_sblock > i_eblock || - i_eoff > s_size || + (i_eoff != le32_to_cpu(-1) && i_eoff > s_size) || i_sblock * BFS_BSIZE > i_eoff) { printf("Inode 0x%08x corrupted\n", i); -- cgit v1.2.3-59-g8ed1b From 59224ac1cf9cacf76a82266854921ca6c9887f20 Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Wed, 12 Jul 2017 14:34:10 -0700 Subject: fs/Kconfig: kill CONFIG_PERCPU_RWSEM some more As of commit bf3eac84c42d ("percpu-rwsem: kill CONFIG_PERCPU_RWSEM") we unconditionally build pcpu-rwsems. Remove a leftover in for FILE_LOCKING. Link: http://lkml.kernel.org/r/20170518180115.2794-1-dave@stgolabs.net Signed-off-by: Davidlohr Bueso Cc: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/Kconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/fs/Kconfig b/fs/Kconfig index b0e42b6a96b9..7aee6d699fd6 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -80,7 +80,6 @@ config EXPORTFS_BLOCK_OPS config FILE_LOCKING bool "Enable POSIX file locking API" if EXPERT default y - select PERCPU_RWSEM help This option enables standard file locking support, required for filesystems like NFS and for the flock() system -- cgit v1.2.3-59-g8ed1b From 821f74402a4c67de63cb6bab5bae7c7a3b298ac2 Mon Sep 17 00:00:00 2001 From: Peter Griffin Date: Wed, 12 Jul 2017 14:34:13 -0700 Subject: scripts/gdb: add lx-fdtdump command lx-fdtdump dumps the flattened device tree passed to the kernel from the bootloader to the filename specified as the command argument. If no argument is provided it defaults to fdtdump.dtb. This then allows further post processing on the machine running GDB. The fdt header is also also printed in the GDB console. For example: (gdb) lx-fdtdump fdt_magic: 0xD00DFEED fdt_totalsize: 0xC108 off_dt_struct: 0x38 off_dt_strings: 0x3804 off_mem_rsvmap: 0x28 version: 17 last_comp_version: 16 Dumped fdt to fdtdump.dtb >fdtdump fdtdump.dtb | less This command is useful as the bootloader can often re-write parts of the device tree, and this can sometimes cause the kernel to not boot. Link: http://lkml.kernel.org/r/1481280065-5336-2-git-send-email-kbingham@kernel.org Signed-off-by: Peter Griffin Signed-off-by: Kieran Bingham Cc: Jason Wessel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- scripts/gdb/linux/constants.py.in | 7 ++++ scripts/gdb/linux/proc.py | 73 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 80 insertions(+) diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in index 7986f4e0da12..7aad82406422 100644 --- a/scripts/gdb/linux/constants.py.in +++ b/scripts/gdb/linux/constants.py.in @@ -14,6 +14,7 @@ #include #include +#include /* We need to stringify expanded macros so that they can be parsed */ @@ -50,3 +51,9 @@ LX_VALUE(MNT_NOEXEC) LX_VALUE(MNT_NOATIME) LX_VALUE(MNT_NODIRATIME) LX_VALUE(MNT_RELATIME) + +/* linux/of_fdt.h> */ +LX_VALUE(OF_DT_HEADER) + +/* Kernel Configs */ +LX_CONFIG(CONFIG_OF) diff --git a/scripts/gdb/linux/proc.py b/scripts/gdb/linux/proc.py index 38b1f09d1cd9..086d27223c0c 100644 --- a/scripts/gdb/linux/proc.py +++ b/scripts/gdb/linux/proc.py @@ -16,6 +16,7 @@ from linux import constants from linux import utils from linux import tasks from linux import lists +from struct import * class LxCmdLine(gdb.Command): @@ -195,3 +196,75 @@ values of that process namespace""" info_opts(MNT_INFO, m_flags))) LxMounts() + + +class LxFdtDump(gdb.Command): + """Output Flattened Device Tree header and dump FDT blob to the filename + specified as the command argument. Equivalent to + 'cat /proc/fdt > fdtdump.dtb' on a running target""" + + def __init__(self): + super(LxFdtDump, self).__init__("lx-fdtdump", gdb.COMMAND_DATA, + gdb.COMPLETE_FILENAME) + + def fdthdr_to_cpu(self, fdt_header): + + fdt_header_be = ">IIIIIII" + fdt_header_le = " Date: Wed, 12 Jul 2017 14:34:16 -0700 Subject: scripts/gdb: lx-dmesg: cast log_buf to void* for addr fetch In some cases it is possible for the str() conversion here to throw encoding errors because log_buf might not point to valid ascii. For example: (gdb) python print str(gdb.parse_and_eval("log_buf")) Traceback (most recent call last): File "", line 1, in UnicodeEncodeError: 'ascii' codec can't encode character u'\u0303' in position 24: ordinal not in range(128) Avoid this by explicitly casting to (void *) inside the gdb expression. Link: http://lkml.kernel.org/r/ba6f85dbb02ca980ebd0e2399b0649423399b565.1498481469.git.leonard.crestez@nxp.com Signed-off-by: Leonard Crestez Reviewed-by: Jan Kiszka Cc: Jason Wessel Cc: Kieran Bingham Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- scripts/gdb/linux/dmesg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/gdb/linux/dmesg.py b/scripts/gdb/linux/dmesg.py index 5afd1098e33a..f5a030333dfd 100644 --- a/scripts/gdb/linux/dmesg.py +++ b/scripts/gdb/linux/dmesg.py @@ -24,7 +24,7 @@ class LxDmesg(gdb.Command): def invoke(self, arg, from_tty): log_buf_addr = int(str(gdb.parse_and_eval( - "'printk.c'::log_buf")).split()[0], 16) + "(void *)'printk.c'::log_buf")).split()[0], 16) log_first_idx = int(gdb.parse_and_eval("'printk.c'::log_first_idx")) log_next_idx = int(gdb.parse_and_eval("'printk.c'::log_next_idx")) log_buf_len = int(gdb.parse_and_eval("'printk.c'::log_buf_len")) -- cgit v1.2.3-59-g8ed1b From 46d10a094353c05144f3b0530516bdac3ce7c435 Mon Sep 17 00:00:00 2001 From: Leonard Crestez Date: Wed, 12 Jul 2017 14:34:19 -0700 Subject: scripts/gdb: lx-dmesg: use explicit encoding=utf8 errors=replace Use errors=replace because it is never desirable for lx-dmesg to fail on string decoding errors, not even if the log buffer is corrupt and we show incorrect info. The kernel will sometimes print utf8, for example the copyright symbol from jffs2. In order to make this work specify 'utf8' everywhere because python2 otherwise defaults to 'ascii'. In theory the second errors='replace' is not be required because everything that can be decoded as utf8 should also be encodable back to utf8. But it's better to be extra safe here. It's worth noting that this is definitely not true for encoding='ascii', unknown characters are replaced with U+FFFD REPLACEMENT CHARACTER and they fail to encode back to ascii. Link: http://lkml.kernel.org/r/acee067f3345954ed41efb77b80eebdc038619c6.1498481469.git.leonard.crestez@nxp.com Signed-off-by: Leonard Crestez Acked-by: Jan Kiszka Cc: Jason Wessel Cc: Kieran Bingham Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- scripts/gdb/linux/dmesg.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/scripts/gdb/linux/dmesg.py b/scripts/gdb/linux/dmesg.py index f5a030333dfd..6d2e09a2ad2f 100644 --- a/scripts/gdb/linux/dmesg.py +++ b/scripts/gdb/linux/dmesg.py @@ -12,6 +12,7 @@ # import gdb +import sys from linux import utils @@ -52,13 +53,19 @@ class LxDmesg(gdb.Command): continue text_len = utils.read_u16(log_buf[pos + 10:pos + 12]) - text = log_buf[pos + 16:pos + 16 + text_len].decode() + text = log_buf[pos + 16:pos + 16 + text_len].decode( + encoding='utf8', errors='replace') time_stamp = utils.read_u64(log_buf[pos:pos + 8]) for line in text.splitlines(): - gdb.write("[{time:12.6f}] {line}\n".format( + msg = u"[{time:12.6f}] {line}\n".format( time=time_stamp / 1000000000.0, - line=line)) + line=line) + # With python2 gdb.write will attempt to convert unicode to + # ascii and might fail so pass an utf8-encoded str instead. + if sys.hexversion < 0x03000000: + msg = msg.encode(encoding='utf8', errors='replace') + gdb.write(msg) pos += length -- cgit v1.2.3-59-g8ed1b From 9263969a46fc899092ba4f8c4206fa2340c9a64e Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Wed, 12 Jul 2017 14:34:22 -0700 Subject: kfifo: clean up example to not use page_link This is a layering violation so we replace the uses with calls to sg_page(). This is a prep patch for replacing page_link and this is one of the very few uses outside of scatterlist.h. Link: http://lkml.kernel.org/r/1495663199-22234-1-git-send-email-logang@deltatee.com Signed-off-by: Logan Gunthorpe Signed-off-by: Stephen Bates Acked-by: Stefani Seibold Cc: Stefani Seibold Cc: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- samples/kfifo/dma-example.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/samples/kfifo/dma-example.c b/samples/kfifo/dma-example.c index aa243db93f01..be0d4a5fdf53 100644 --- a/samples/kfifo/dma-example.c +++ b/samples/kfifo/dma-example.c @@ -75,8 +75,8 @@ static int __init example_init(void) for (i = 0; i < nents; i++) { printk(KERN_INFO "sg[%d] -> " - "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n", - i, sg[i].page_link, sg[i].offset, sg[i].length); + "page %p offset 0x%.8x length 0x%.8x\n", + i, sg_page(&sg[i]), sg[i].offset, sg[i].length); if (sg_is_last(&sg[i])) break; @@ -104,8 +104,8 @@ static int __init example_init(void) for (i = 0; i < nents; i++) { printk(KERN_INFO "sg[%d] -> " - "page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n", - i, sg[i].page_link, sg[i].offset, sg[i].length); + "page %p offset 0x%.8x length 0x%.8x\n", + i, sg_page(&sg[i]), sg[i].offset, sg[i].length); if (sg_is_last(&sg[i])) break; -- cgit v1.2.3-59-g8ed1b From 77493f04b74cdff3a61fb3fb14b1f5a71d88fd5f Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Wed, 12 Jul 2017 14:34:25 -0700 Subject: procfs: fdinfo: extend information about epoll target files Since it is possbile to have same number in tfd field (say file added, closed, then nother file dup'ed to same number and added back) it is imposible to distinguish such target files solely by their numbers. Strictly speaking regular applications don't need to recognize these targets at all but for checkpoint/restore sake we need to collect targets to be able to push them back on restore stage in a proper order. Thus lets add file position, inode and device number where this target lays. This three fields can be used as a primary key for sorting, and together with kcmp help CRIU can find out an exact file target (from the whole set of processes being checkpointed). Link: http://lkml.kernel.org/r/20170424154423.436491881@gmail.com Signed-off-by: Cyrill Gorcunov Acked-by: Andrei Vagin Cc: Al Viro Cc: Pavel Emelyanov Cc: Michael Kerrisk Cc: Jason Baron Cc: Andy Lutomirski Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/filesystems/proc.txt | 6 +++++- fs/eventpoll.c | 8 ++++++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index 4cddbce85ac9..adba21b5ada7 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -1786,12 +1786,16 @@ pair provide additional information particular to the objects they represent. pos: 0 flags: 02 mnt_id: 9 - tfd: 5 events: 1d data: ffffffffffffffff + tfd: 5 events: 1d data: ffffffffffffffff pos:0 ino:61af sdev:7 where 'tfd' is a target file descriptor number in decimal form, 'events' is events mask being watched and the 'data' is data associated with a target [see epoll(7) for more details]. + The 'pos' is current offset of the target file in decimal form + [see lseek(2)], 'ino' and 'sdev' are inode and device numbers + where target file resides, all in hex format. + Fsnotify files ~~~~~~~~~~~~~~ For inotify files the format is the following diff --git a/fs/eventpoll.c b/fs/eventpoll.c index a6d194831ed8..322904c3ebdf 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -960,10 +960,14 @@ static void ep_show_fdinfo(struct seq_file *m, struct file *f) mutex_lock(&ep->mtx); for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { struct epitem *epi = rb_entry(rbp, struct epitem, rbn); + struct inode *inode = file_inode(epi->ffd.file); - seq_printf(m, "tfd: %8d events: %8x data: %16llx\n", + seq_printf(m, "tfd: %8d events: %8x data: %16llx " + " pos:%lli ino:%lx sdev:%x\n", epi->ffd.fd, epi->event.events, - (long long)epi->event.data); + (long long)epi->event.data, + (long long)epi->ffd.file->f_pos, + inode->i_ino, inode->i_sb->s_dev); if (seq_has_overflowed(m)) break; } -- cgit v1.2.3-59-g8ed1b From 0791e3644e5ef21646fe565b9061788d05ec71d4 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Wed, 12 Jul 2017 14:34:28 -0700 Subject: kcmp: add KCMP_EPOLL_TFD mode to compare epoll target files With current epoll architecture target files are addressed with file_struct and file descriptor number, where the last is not unique. Moreover files can be transferred from another process via unix socket, added into queue and closed then so we won't find this descriptor in the task fdinfo list. Thus to checkpoint and restore such processes CRIU needs to find out where exactly the target file is present to add it into epoll queue. For this sake one can use kcmp call where some particular target file from the queue is compared with arbitrary file passed as an argument. Because epoll target files can have same file descriptor number but different file_struct a caller should explicitly specify the offset within. To test if some particular file is matching entry inside epoll one have to - fill kcmp_epoll_slot structure with epoll file descriptor, target file number and target file offset (in case if only one target is present then it should be 0) - call kcmp as kcmp(pid1, pid2, KCMP_EPOLL_TFD, fd, &kcmp_epoll_slot) - the kernel fetch file pointer matching file descriptor @fd of pid1 - lookups for file struct in epoll queue of pid2 and returns traditional 0,1,2 result for sorting purpose Link: http://lkml.kernel.org/r/20170424154423.511592110@gmail.com Signed-off-by: Cyrill Gorcunov Acked-by: Andrey Vagin Cc: Al Viro Cc: Pavel Emelyanov Cc: Michael Kerrisk Cc: Jason Baron Cc: Andy Lutomirski Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/eventpoll.c | 42 ++++++++++++++++++++++++++++++++++ include/linux/eventpoll.h | 3 +++ include/uapi/linux/kcmp.h | 10 +++++++++ kernel/kcmp.c | 57 +++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 112 insertions(+) diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 322904c3ebdf..e7e9901c3790 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1077,6 +1077,48 @@ static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd) return epir; } +static struct epitem *ep_find_tfd(struct eventpoll *ep, int tfd, unsigned long toff) +{ + struct rb_node *rbp; + struct epitem *epi; + + for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { + epi = rb_entry(rbp, struct epitem, rbn); + if (epi->ffd.fd == tfd) { + if (toff == 0) + return epi; + else + toff--; + } + cond_resched(); + } + + return NULL; +} + +struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, + unsigned long toff) +{ + struct file *file_raw; + struct eventpoll *ep; + struct epitem *epi; + + if (!is_file_epoll(file)) + return ERR_PTR(-EINVAL); + + ep = file->private_data; + + mutex_lock(&ep->mtx); + epi = ep_find_tfd(ep, tfd, toff); + if (epi) + file_raw = epi->ffd.file; + else + file_raw = ERR_PTR(-ENOENT); + mutex_unlock(&ep->mtx); + + return file_raw; +} + /* * This is the callback that is passed to the wait queue wakeup * mechanism. It is called by the stored file descriptors when they diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h index 6daf6d4971f6..d8625d214ea7 100644 --- a/include/linux/eventpoll.h +++ b/include/linux/eventpoll.h @@ -14,6 +14,7 @@ #define _LINUX_EVENTPOLL_H #include +#include /* Forward declarations to avoid compiler errors */ @@ -22,6 +23,8 @@ struct file; #ifdef CONFIG_EPOLL +struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, unsigned long toff); + /* Used to initialize the epoll bits inside the "struct file" */ static inline void eventpoll_init_file(struct file *file) { diff --git a/include/uapi/linux/kcmp.h b/include/uapi/linux/kcmp.h index 84df14b37360..481e103da78e 100644 --- a/include/uapi/linux/kcmp.h +++ b/include/uapi/linux/kcmp.h @@ -1,6 +1,8 @@ #ifndef _UAPI_LINUX_KCMP_H #define _UAPI_LINUX_KCMP_H +#include + /* Comparison type */ enum kcmp_type { KCMP_FILE, @@ -10,8 +12,16 @@ enum kcmp_type { KCMP_SIGHAND, KCMP_IO, KCMP_SYSVSEM, + KCMP_EPOLL_TFD, KCMP_TYPES, }; +/* Slot for KCMP_EPOLL_TFD */ +struct kcmp_epoll_slot { + __u32 efd; /* epoll file descriptor */ + __u32 tfd; /* target file number */ + __u32 toff; /* target offset within same numbered sequence */ +}; + #endif /* _UAPI_LINUX_KCMP_H */ diff --git a/kernel/kcmp.c b/kernel/kcmp.c index 3a47fa998fe0..ea34ed8bb952 100644 --- a/kernel/kcmp.c +++ b/kernel/kcmp.c @@ -11,6 +11,10 @@ #include #include #include +#include +#include +#include +#include #include @@ -94,6 +98,56 @@ static int kcmp_lock(struct mutex *m1, struct mutex *m2) return err; } +#ifdef CONFIG_EPOLL +static int kcmp_epoll_target(struct task_struct *task1, + struct task_struct *task2, + unsigned long idx1, + struct kcmp_epoll_slot __user *uslot) +{ + struct file *filp, *filp_epoll, *filp_tgt; + struct kcmp_epoll_slot slot; + struct files_struct *files; + + if (copy_from_user(&slot, uslot, sizeof(slot))) + return -EFAULT; + + filp = get_file_raw_ptr(task1, idx1); + if (!filp) + return -EBADF; + + files = get_files_struct(task2); + if (!files) + return -EBADF; + + spin_lock(&files->file_lock); + filp_epoll = fcheck_files(files, slot.efd); + if (filp_epoll) + get_file(filp_epoll); + else + filp_tgt = ERR_PTR(-EBADF); + spin_unlock(&files->file_lock); + put_files_struct(files); + + if (filp_epoll) { + filp_tgt = get_epoll_tfile_raw_ptr(filp_epoll, slot.tfd, slot.toff); + fput(filp_epoll); + } else + + if (IS_ERR(filp_tgt)) + return PTR_ERR(filp_tgt); + + return kcmp_ptr(filp, filp_tgt, KCMP_FILE); +} +#else +static int kcmp_epoll_target(struct task_struct *task1, + struct task_struct *task2, + unsigned long idx1, + struct kcmp_epoll_slot __user *uslot) +{ + return -EOPNOTSUPP; +} +#endif + SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type, unsigned long, idx1, unsigned long, idx2) { @@ -165,6 +219,9 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type, ret = -EOPNOTSUPP; #endif break; + case KCMP_EPOLL_TFD: + ret = kcmp_epoll_target(task1, task2, idx1, (void *)idx2); + break; default: ret = -EINVAL; break; -- cgit v1.2.3-59-g8ed1b From 92ef6da3d06ff551a86de41ae37df9cc4b58d7a0 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Wed, 12 Jul 2017 14:34:31 -0700 Subject: kcmp: fs/epoll: wrap kcmp code with CONFIG_CHECKPOINT_RESTORE kcmp syscall is build iif CONFIG_CHECKPOINT_RESTORE is selected, so wrap appropriate helpers in epoll code with the config to build it conditionally. Link: http://lkml.kernel.org/r/20170513083456.GG1881@uranus.lan Signed-off-by: Cyrill Gorcunov Reported-by: Andrew Morton Cc: Andrey Vagin Cc: Al Viro Cc: Pavel Emelyanov Cc: Michael Kerrisk Cc: Jason Baron Cc: Andy Lutomirski Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/eventpoll.c | 2 ++ include/linux/eventpoll.h | 2 ++ 2 files changed, 4 insertions(+) diff --git a/fs/eventpoll.c b/fs/eventpoll.c index e7e9901c3790..e767e4389cb1 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1077,6 +1077,7 @@ static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd) return epir; } +#ifdef CONFIG_CHECKPOINT_RESTORE static struct epitem *ep_find_tfd(struct eventpoll *ep, int tfd, unsigned long toff) { struct rb_node *rbp; @@ -1118,6 +1119,7 @@ struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, return file_raw; } +#endif /* CONFIG_CHECKPOINT_RESTORE */ /* * This is the callback that is passed to the wait queue wakeup diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h index d8625d214ea7..2f14ac73d01d 100644 --- a/include/linux/eventpoll.h +++ b/include/linux/eventpoll.h @@ -23,7 +23,9 @@ struct file; #ifdef CONFIG_EPOLL +#ifdef CONFIG_CHECKPOINT_RESTORE struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, unsigned long toff); +#endif /* Used to initialize the epoll bits inside the "struct file" */ static inline void eventpoll_init_file(struct file *file) -- cgit v1.2.3-59-g8ed1b From e41d58185f1444368873d4d7422f7664a68be61d Mon Sep 17 00:00:00 2001 From: Dmitry Vyukov Date: Wed, 12 Jul 2017 14:34:35 -0700 Subject: fault-inject: support systematic fault injection Add /proc/self/task//fail-nth file that allows failing 0-th, 1-st, 2-nd and so on calls systematically. Excerpt from the added documentation: "Write to this file of integer N makes N-th call in the current task fail (N is 0-based). Read from this file returns a single char 'Y' or 'N' that says if the fault setup with a previous write to this file was injected or not, and disables the fault if it wasn't yet injected. Note that this file enables all types of faults (slab, futex, etc). This setting takes precedence over all other generic settings like probability, interval, times, etc. But per-capability settings (e.g. fail_futex/ignore-private) take precedence over it. This feature is intended for systematic testing of faults in a single system call. See an example below" Why add a new setting: 1. Existing settings are global rather than per-task. So parallel testing is not possible. 2. attr->interval is close but it depends on attr->count which is non reset to 0, so interval does not work as expected. 3. Trying to model this with existing settings requires manipulations of all of probability, interval, times, space, task-filter and unexposed count and per-task make-it-fail files. 4. Existing settings are per-failure-type, and the set of failure types is potentially expanding. 5. make-it-fail can't be changed by unprivileged user and aggressive stress testing better be done from an unprivileged user. Similarly, this would require opening the debugfs files to the unprivileged user, as he would need to reopen at least times file (not possible to pre-open before dropping privs). The proposed interface solves all of the above (see the example). We want to integrate this into syzkaller fuzzer. A prototype has found 10 bugs in kernel in first day of usage: https://groups.google.com/forum/#!searchin/syzkaller/%22FAULT_INJECTION%22%7Csort:relevance I've made the current interface work with all types of our sandboxes. For setuid the secret sauce was prctl(PR_SET_DUMPABLE, 1, 0, 0, 0) to make /proc entries non-root owned. So I am fine with the current version of the code. [akpm@linux-foundation.org: fix build] Link: http://lkml.kernel.org/r/20170328130128.101773-1-dvyukov@google.com Signed-off-by: Dmitry Vyukov Cc: Akinobu Mita Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/fault-injection/fault-injection.txt | 78 +++++++++++++++++++++++ fs/proc/base.c | 52 +++++++++++++++ include/linux/sched.h | 1 + kernel/fork.c | 4 ++ lib/fault-inject.c | 7 ++ 5 files changed, 142 insertions(+) diff --git a/Documentation/fault-injection/fault-injection.txt b/Documentation/fault-injection/fault-injection.txt index 415484f3d59a..192d8cbcc5f9 100644 --- a/Documentation/fault-injection/fault-injection.txt +++ b/Documentation/fault-injection/fault-injection.txt @@ -134,6 +134,22 @@ use the boot option: fail_futex= mmc_core.fail_request=,,, +o proc entries + +- /proc/self/task//fail-nth: + + Write to this file of integer N makes N-th call in the current task fail + (N is 0-based). Read from this file returns a single char 'Y' or 'N' + that says if the fault setup with a previous write to this file was + injected or not, and disables the fault if it wasn't yet injected. + Note that this file enables all types of faults (slab, futex, etc). + This setting takes precedence over all other generic debugfs settings + like probability, interval, times, etc. But per-capability settings + (e.g. fail_futex/ignore-private) take precedence over it. + + This feature is intended for systematic testing of faults in a single + system call. See an example below. + How to add new fault injection capability ----------------------------------------- @@ -278,3 +294,65 @@ allocation failure. # env FAILCMD_TYPE=fail_page_alloc \ ./tools/testing/fault-injection/failcmd.sh --times=100 \ -- make -C tools/testing/selftests/ run_tests + +Systematic faults using fail-nth +--------------------------------- + +The following code systematically faults 0-th, 1-st, 2-nd and so on +capabilities in the socketpair() system call. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int main() +{ + int i, err, res, fail_nth, fds[2]; + char buf[128]; + + system("echo N > /sys/kernel/debug/failslab/ignore-gfp-wait"); + sprintf(buf, "/proc/self/task/%ld/fail-nth", syscall(SYS_gettid)); + fail_nth = open(buf, O_RDWR); + for (i = 0;; i++) { + sprintf(buf, "%d", i); + write(fail_nth, buf, strlen(buf)); + res = socketpair(AF_LOCAL, SOCK_STREAM, 0, fds); + err = errno; + read(fail_nth, buf, 1); + if (res == 0) { + close(fds[0]); + close(fds[1]); + } + printf("%d-th fault %c: res=%d/%d\n", i, buf[0], res, err); + if (buf[0] != 'Y') + break; + } + return 0; +} + +An example output: + +0-th fault Y: res=-1/23 +1-th fault Y: res=-1/23 +2-th fault Y: res=-1/23 +3-th fault Y: res=-1/12 +4-th fault Y: res=-1/12 +5-th fault Y: res=-1/23 +6-th fault Y: res=-1/23 +7-th fault Y: res=-1/23 +8-th fault Y: res=-1/12 +9-th fault Y: res=-1/12 +10-th fault Y: res=-1/12 +11-th fault Y: res=-1/12 +12-th fault Y: res=-1/12 +13-th fault Y: res=-1/12 +14-th fault Y: res=-1/12 +15-th fault Y: res=-1/12 +16-th fault N: res=0/12 diff --git a/fs/proc/base.c b/fs/proc/base.c index f1e1927ccd48..88b773f318cd 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1355,6 +1355,53 @@ static const struct file_operations proc_fault_inject_operations = { .write = proc_fault_inject_write, .llseek = generic_file_llseek, }; + +static ssize_t proc_fail_nth_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct task_struct *task; + int err, n; + + task = get_proc_task(file_inode(file)); + if (!task) + return -ESRCH; + put_task_struct(task); + if (task != current) + return -EPERM; + err = kstrtoint_from_user(buf, count, 10, &n); + if (err) + return err; + if (n < 0 || n == INT_MAX) + return -EINVAL; + current->fail_nth = n + 1; + return count; +} + +static ssize_t proc_fail_nth_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct task_struct *task; + int err; + + task = get_proc_task(file_inode(file)); + if (!task) + return -ESRCH; + put_task_struct(task); + if (task != current) + return -EPERM; + if (count < 1) + return -EINVAL; + err = put_user((char)(current->fail_nth ? 'N' : 'Y'), buf); + if (err) + return err; + current->fail_nth = 0; + return 1; +} + +static const struct file_operations proc_fail_nth_operations = { + .read = proc_fail_nth_read, + .write = proc_fail_nth_write, +}; #endif @@ -3311,6 +3358,11 @@ static const struct pid_entry tid_base_stuff[] = { #endif #ifdef CONFIG_FAULT_INJECTION REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations), + /* + * Operations on the file check that the task is current, + * so we create it with 0666 to support testing under unprivileged user. + */ + REG("fail-nth", 0666, proc_fail_nth_operations), #endif #ifdef CONFIG_TASK_IO_ACCOUNTING ONE("io", S_IRUSR, proc_tid_io_accounting), diff --git a/include/linux/sched.h b/include/linux/sched.h index 20814b7d7d70..3822d749fc9e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -974,6 +974,7 @@ struct task_struct { #ifdef CONFIG_FAULT_INJECTION int make_it_fail; + int fail_nth; #endif /* * When (nr_dirtied >= nr_dirtied_pause), it's time to call diff --git a/kernel/fork.c b/kernel/fork.c index d2b9d7c31eaf..ade237a96308 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -573,6 +573,10 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) kcov_task_init(tsk); +#ifdef CONFIG_FAULT_INJECTION + tsk->fail_nth = 0; +#endif + return tsk; free_stack: diff --git a/lib/fault-inject.c b/lib/fault-inject.c index 4ff157159a0d..09ac73c177fd 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c @@ -107,6 +107,12 @@ static inline bool fail_stacktrace(struct fault_attr *attr) bool should_fail(struct fault_attr *attr, ssize_t size) { + if (in_task() && current->fail_nth) { + if (--current->fail_nth == 0) + goto fail; + return false; + } + /* No need to check any other properties if the probability is 0 */ if (attr->probability == 0) return false; @@ -134,6 +140,7 @@ bool should_fail(struct fault_attr *attr, ssize_t size) if (!fail_stacktrace(attr)) return false; +fail: fail_dump(attr); if (atomic_read(&attr->times) != -1) -- cgit v1.2.3-59-g8ed1b From 1a23395672658969a4035dcc518ea6cab835c579 Mon Sep 17 00:00:00 2001 From: Manfred Spraul Date: Wed, 12 Jul 2017 14:34:38 -0700 Subject: ipc/sem.c: remove sem_base, embed struct sem sma->sem_base is initialized with sma->sem_base = (struct sem *) &sma[1]; The current code has four problems: - There is an unnecessary pointer dereference - sem_base is not needed. - Alignment for struct sem only works by chance. - The current code causes false positive for static code analysis. - This is a cast between different non-void types, which the future randstruct GCC plugin warns on. And, as bonus, the code size gets smaller: Before: 0 .text 00003770 After: 0 .text 0000374e [manfred@colorfullife.com: s/[0]/[]/, per hch] Link: http://lkml.kernel.org/r/20170525185107.12869-2-manfred@colorfullife.com Link: http://lkml.kernel.org/r/20170515171912.6298-2-manfred@colorfullife.com Signed-off-by: Manfred Spraul Acked-by: Kees Cook Cc: Kees Cook Cc: <1vier1@web.de> Cc: Davidlohr Bueso Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Fabian Frederick Cc: Christoph Hellwig Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sem.h | 22 +++++++++++++- ipc/sem.c | 88 +++++++++++++++++++++-------------------------------- 2 files changed, 55 insertions(+), 55 deletions(-) diff --git a/include/linux/sem.h b/include/linux/sem.h index 9edec926e9d9..9db14093b73c 100644 --- a/include/linux/sem.h +++ b/include/linux/sem.h @@ -8,11 +8,29 @@ struct task_struct; +/* One semaphore structure for each semaphore in the system. */ +struct sem { + int semval; /* current value */ + /* + * PID of the process that last modified the semaphore. For + * Linux, specifically these are: + * - semop + * - semctl, via SETVAL and SETALL. + * - at task exit when performing undo adjustments (see exit_sem). + */ + int sempid; + spinlock_t lock; /* spinlock for fine-grained semtimedop */ + struct list_head pending_alter; /* pending single-sop operations */ + /* that alter the semaphore */ + struct list_head pending_const; /* pending single-sop operations */ + /* that do not alter the semaphore*/ + time_t sem_otime; /* candidate for sem_otime */ +} ____cacheline_aligned_in_smp; + /* One sem_array data structure for each set of semaphores in the system. */ struct sem_array { struct kern_ipc_perm sem_perm; /* permissions .. see ipc.h */ time_t sem_ctime; /* last change time */ - struct sem *sem_base; /* ptr to first semaphore in array */ struct list_head pending_alter; /* pending operations */ /* that alter the array */ struct list_head pending_const; /* pending complex operations */ @@ -21,6 +39,8 @@ struct sem_array { int sem_nsems; /* no. of semaphores in array */ int complex_count; /* pending complex operations */ unsigned int use_global_lock;/* >0: global lock required */ + + struct sem sems[]; }; #ifdef CONFIG_SYSVIPC diff --git a/ipc/sem.c b/ipc/sem.c index 947dc2348271..fff8337ebab3 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -87,24 +87,6 @@ #include #include "util.h" -/* One semaphore structure for each semaphore in the system. */ -struct sem { - int semval; /* current value */ - /* - * PID of the process that last modified the semaphore. For - * Linux, specifically these are: - * - semop - * - semctl, via SETVAL and SETALL. - * - at task exit when performing undo adjustments (see exit_sem). - */ - int sempid; - spinlock_t lock; /* spinlock for fine-grained semtimedop */ - struct list_head pending_alter; /* pending single-sop operations */ - /* that alter the semaphore */ - struct list_head pending_const; /* pending single-sop operations */ - /* that do not alter the semaphore*/ - time_t sem_otime; /* candidate for sem_otime */ -} ____cacheline_aligned_in_smp; /* One queue for each sleeping process in the system. */ struct sem_queue { @@ -175,7 +157,7 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it); * sem_array.sem_undo * * b) global or semaphore sem_lock() for read/write: - * sem_array.sem_base[i].pending_{const,alter}: + * sem_array.sems[i].pending_{const,alter}: * * c) special: * sem_undo_list.list_proc: @@ -250,7 +232,7 @@ static void unmerge_queues(struct sem_array *sma) */ list_for_each_entry_safe(q, tq, &sma->pending_alter, list) { struct sem *curr; - curr = &sma->sem_base[q->sops[0].sem_num]; + curr = &sma->sems[q->sops[0].sem_num]; list_add_tail(&q->list, &curr->pending_alter); } @@ -270,7 +252,7 @@ static void merge_queues(struct sem_array *sma) { int i; for (i = 0; i < sma->sem_nsems; i++) { - struct sem *sem = sma->sem_base + i; + struct sem *sem = &sma->sems[i]; list_splice_init(&sem->pending_alter, &sma->pending_alter); } @@ -306,7 +288,7 @@ static void complexmode_enter(struct sem_array *sma) sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS; for (i = 0; i < sma->sem_nsems; i++) { - sem = sma->sem_base + i; + sem = &sma->sems[i]; spin_lock(&sem->lock); spin_unlock(&sem->lock); } @@ -366,7 +348,7 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, * * Both facts are tracked by use_global_mode. */ - sem = sma->sem_base + sops->sem_num; + sem = &sma->sems[sops->sem_num]; /* * Initial check for use_global_lock. Just an optimization, @@ -421,7 +403,7 @@ static inline void sem_unlock(struct sem_array *sma, int locknum) complexmode_tryleave(sma); ipc_unlock_object(&sma->sem_perm); } else { - struct sem *sem = sma->sem_base + locknum; + struct sem *sem = &sma->sems[locknum]; spin_unlock(&sem->lock); } } @@ -487,7 +469,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) if (ns->used_sems + nsems > ns->sc_semmns) return -ENOSPC; - size = sizeof(*sma) + nsems * sizeof(struct sem); + size = sizeof(*sma) + nsems * sizeof(sma->sems[0]); sma = ipc_rcu_alloc(size); if (!sma) return -ENOMEM; @@ -504,12 +486,10 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) return retval; } - sma->sem_base = (struct sem *) &sma[1]; - for (i = 0; i < nsems; i++) { - INIT_LIST_HEAD(&sma->sem_base[i].pending_alter); - INIT_LIST_HEAD(&sma->sem_base[i].pending_const); - spin_lock_init(&sma->sem_base[i].lock); + INIT_LIST_HEAD(&sma->sems[i].pending_alter); + INIT_LIST_HEAD(&sma->sems[i].pending_const); + spin_lock_init(&sma->sems[i].lock); } sma->complex_count = 0; @@ -612,7 +592,7 @@ static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q) un = q->undo; for (sop = sops; sop < sops + nsops; sop++) { - curr = sma->sem_base + sop->sem_num; + curr = &sma->sems[sop->sem_num]; sem_op = sop->sem_op; result = curr->semval; @@ -639,7 +619,7 @@ static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q) sop--; pid = q->pid; while (sop >= sops) { - sma->sem_base[sop->sem_num].sempid = pid; + sma->sems[sop->sem_num].sempid = pid; sop--; } @@ -661,7 +641,7 @@ undo: sop--; while (sop >= sops) { sem_op = sop->sem_op; - sma->sem_base[sop->sem_num].semval -= sem_op; + sma->sems[sop->sem_num].semval -= sem_op; if (sop->sem_flg & SEM_UNDO) un->semadj[sop->sem_num] += sem_op; sop--; @@ -692,7 +672,7 @@ static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q) * until the operations can go through. */ for (sop = sops; sop < sops + nsops; sop++) { - curr = sma->sem_base + sop->sem_num; + curr = &sma->sems[sop->sem_num]; sem_op = sop->sem_op; result = curr->semval; @@ -716,7 +696,7 @@ static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q) } for (sop = sops; sop < sops + nsops; sop++) { - curr = sma->sem_base + sop->sem_num; + curr = &sma->sems[sop->sem_num]; sem_op = sop->sem_op; result = curr->semval; @@ -815,7 +795,7 @@ static int wake_const_ops(struct sem_array *sma, int semnum, if (semnum == -1) pending_list = &sma->pending_const; else - pending_list = &sma->sem_base[semnum].pending_const; + pending_list = &sma->sems[semnum].pending_const; list_for_each_entry_safe(q, tmp, pending_list, list) { int error = perform_atomic_semop(sma, q); @@ -856,7 +836,7 @@ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, for (i = 0; i < nsops; i++) { int num = sops[i].sem_num; - if (sma->sem_base[num].semval == 0) { + if (sma->sems[num].semval == 0) { got_zero = 1; semop_completed |= wake_const_ops(sma, num, wake_q); } @@ -867,7 +847,7 @@ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, * Assume all were changed. */ for (i = 0; i < sma->sem_nsems; i++) { - if (sma->sem_base[i].semval == 0) { + if (sma->sems[i].semval == 0) { got_zero = 1; semop_completed |= wake_const_ops(sma, i, wake_q); } @@ -909,7 +889,7 @@ static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *w if (semnum == -1) pending_list = &sma->pending_alter; else - pending_list = &sma->sem_base[semnum].pending_alter; + pending_list = &sma->sems[semnum].pending_alter; again: list_for_each_entry_safe(q, tmp, pending_list, list) { @@ -922,7 +902,7 @@ again: * be in the per semaphore pending queue, and decrements * cannot be successful if the value is already 0. */ - if (semnum != -1 && sma->sem_base[semnum].semval == 0) + if (semnum != -1 && sma->sems[semnum].semval == 0) break; error = perform_atomic_semop(sma, q); @@ -959,9 +939,9 @@ again: static void set_semotime(struct sem_array *sma, struct sembuf *sops) { if (sops == NULL) { - sma->sem_base[0].sem_otime = get_seconds(); + sma->sems[0].sem_otime = get_seconds(); } else { - sma->sem_base[sops[0].sem_num].sem_otime = + sma->sems[sops[0].sem_num].sem_otime = get_seconds(); } } @@ -1067,9 +1047,9 @@ static int count_semcnt(struct sem_array *sma, ushort semnum, semcnt = 0; /* First: check the simple operations. They are easy to evaluate */ if (count_zero) - l = &sma->sem_base[semnum].pending_const; + l = &sma->sems[semnum].pending_const; else - l = &sma->sem_base[semnum].pending_alter; + l = &sma->sems[semnum].pending_alter; list_for_each_entry(q, l, list) { /* all task on a per-semaphore list sleep on exactly @@ -1124,7 +1104,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); } for (i = 0; i < sma->sem_nsems; i++) { - struct sem *sem = sma->sem_base + i; + struct sem *sem = &sma->sems[i]; list_for_each_entry_safe(q, tq, &sem->pending_const, list) { unlink_queue(sma, q); wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); @@ -1174,9 +1154,9 @@ static time_t get_semotime(struct sem_array *sma) int i; time_t res; - res = sma->sem_base[0].sem_otime; + res = sma->sems[0].sem_otime; for (i = 1; i < sma->sem_nsems; i++) { - time_t to = sma->sem_base[i].sem_otime; + time_t to = sma->sems[i].sem_otime; if (to > res) res = to; @@ -1325,7 +1305,7 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum, return -EIDRM; } - curr = &sma->sem_base[semnum]; + curr = &sma->sems[semnum]; ipc_assert_locked_object(&sma->sem_perm); list_for_each_entry(un, &sma->list_id, list_id) @@ -1402,7 +1382,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, } } for (i = 0; i < sma->sem_nsems; i++) - sem_io[i] = sma->sem_base[i].semval; + sem_io[i] = sma->sems[i].semval; sem_unlock(sma, -1); rcu_read_unlock(); err = 0; @@ -1450,8 +1430,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, } for (i = 0; i < nsems; i++) { - sma->sem_base[i].semval = sem_io[i]; - sma->sem_base[i].sempid = task_tgid_vnr(current); + sma->sems[i].semval = sem_io[i]; + sma->sems[i].sempid = task_tgid_vnr(current); } ipc_assert_locked_object(&sma->sem_perm); @@ -1476,7 +1456,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, err = -EIDRM; goto out_unlock; } - curr = &sma->sem_base[semnum]; + curr = &sma->sems[semnum]; switch (cmd) { case GETVAL: @@ -1932,7 +1912,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, */ if (nsops == 1) { struct sem *curr; - curr = &sma->sem_base[sops->sem_num]; + curr = &sma->sems[sops->sem_num]; if (alter) { if (sma->complex_count) { @@ -2146,7 +2126,7 @@ void exit_sem(struct task_struct *tsk) /* perform adjustments registered in un */ for (i = 0; i < sma->sem_nsems; i++) { - struct sem *semaphore = &sma->sem_base[i]; + struct sem *semaphore = &sma->sems[i]; if (un->semadj[i]) { semaphore->semval += un->semadj[i]; /* -- cgit v1.2.3-59-g8ed1b From dba4cdd39e698d8dcdad0656825423052ac90ccd Mon Sep 17 00:00:00 2001 From: Manfred Spraul Date: Wed, 12 Jul 2017 14:34:41 -0700 Subject: ipc: merge ipc_rcu and kern_ipc_perm ipc has two management structures that exist for every id: - struct kern_ipc_perm, it contains e.g. the permissions. - struct ipc_rcu, it contains the rcu head for rcu handling and the refcount. The patch merges both structures. As a bonus, we may save one cacheline, because both structures are cacheline aligned. In addition, it reduces the number of casts, instead most codepaths can use container_of. To simplify code, the ipc_rcu_alloc initializes the allocation to 0. [manfred@colorfullife.com: really include the memset() into ipc_alloc_rcu()] Link: http://lkml.kernel.org/r/564f8612-0601-b267-514f-a9f650ec9b32@colorfullife.com Link: http://lkml.kernel.org/r/20170525185107.12869-3-manfred@colorfullife.com Signed-off-by: Manfred Spraul Cc: Davidlohr Bueso Cc: Kees Cook Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/ipc.h | 3 +++ ipc/msg.c | 19 +++++++++++-------- ipc/sem.c | 34 +++++++++++++++++----------------- ipc/shm.c | 18 +++++++++++------- ipc/util.c | 35 +++++++++++++++++------------------ ipc/util.h | 18 +++++++----------- 6 files changed, 66 insertions(+), 61 deletions(-) diff --git a/include/linux/ipc.h b/include/linux/ipc.h index 71fd92d81b26..5591f055e13f 100644 --- a/include/linux/ipc.h +++ b/include/linux/ipc.h @@ -20,6 +20,9 @@ struct kern_ipc_perm { umode_t mode; unsigned long seq; void *security; + + struct rcu_head rcu; + atomic_t refcount; } ____cacheline_aligned_in_smp; #endif /* _LINUX_IPC_H */ diff --git a/ipc/msg.c b/ipc/msg.c index 104926dc72be..0ed7dae7d4e8 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -97,8 +97,8 @@ static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s) static void msg_rcu_free(struct rcu_head *head) { - struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); - struct msg_queue *msq = ipc_rcu_to_struct(p); + struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu); + struct msg_queue *msq = container_of(p, struct msg_queue, q_perm); security_msg_queue_free(msq); ipc_rcu_free(head); @@ -118,7 +118,10 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) key_t key = params->key; int msgflg = params->flg; - msq = ipc_rcu_alloc(sizeof(*msq)); + BUILD_BUG_ON(offsetof(struct msg_queue, q_perm) != 0); + + msq = container_of(ipc_rcu_alloc(sizeof(*msq)), struct msg_queue, + q_perm); if (!msq) return -ENOMEM; @@ -128,7 +131,7 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) msq->q_perm.security = NULL; retval = security_msg_queue_alloc(msq); if (retval) { - ipc_rcu_putref(msq, ipc_rcu_free); + ipc_rcu_putref(&msq->q_perm, ipc_rcu_free); return retval; } @@ -144,7 +147,7 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) /* ipc_addid() locks msq upon success. */ id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); if (id < 0) { - ipc_rcu_putref(msq, msg_rcu_free); + ipc_rcu_putref(&msq->q_perm, msg_rcu_free); return id; } @@ -249,7 +252,7 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) free_msg(msg); } atomic_sub(msq->q_cbytes, &ns->msg_bytes); - ipc_rcu_putref(msq, msg_rcu_free); + ipc_rcu_putref(&msq->q_perm, msg_rcu_free); } /* @@ -688,7 +691,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, /* enqueue the sender and prepare to block */ ss_add(msq, &s, msgsz); - if (!ipc_rcu_getref(msq)) { + if (!ipc_rcu_getref(&msq->q_perm)) { err = -EIDRM; goto out_unlock0; } @@ -700,7 +703,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, rcu_read_lock(); ipc_lock_object(&msq->q_perm); - ipc_rcu_putref(msq, msg_rcu_free); + ipc_rcu_putref(&msq->q_perm, msg_rcu_free); /* raced with RMID? */ if (!ipc_valid_object(&msq->q_perm)) { err = -EIDRM; diff --git a/ipc/sem.c b/ipc/sem.c index fff8337ebab3..bdff6d93d2c7 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -260,8 +260,8 @@ static void merge_queues(struct sem_array *sma) static void sem_rcu_free(struct rcu_head *head) { - struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); - struct sem_array *sma = ipc_rcu_to_struct(p); + struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu); + struct sem_array *sma = container_of(p, struct sem_array, sem_perm); security_sem_free(sma); ipc_rcu_free(head); @@ -438,7 +438,7 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns static inline void sem_lock_and_putref(struct sem_array *sma) { sem_lock(sma, NULL, -1); - ipc_rcu_putref(sma, sem_rcu_free); + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); } static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) @@ -469,20 +469,20 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) if (ns->used_sems + nsems > ns->sc_semmns) return -ENOSPC; + BUILD_BUG_ON(offsetof(struct sem_array, sem_perm) != 0); + size = sizeof(*sma) + nsems * sizeof(sma->sems[0]); - sma = ipc_rcu_alloc(size); + sma = container_of(ipc_rcu_alloc(size), struct sem_array, sem_perm); if (!sma) return -ENOMEM; - memset(sma, 0, size); - sma->sem_perm.mode = (semflg & S_IRWXUGO); sma->sem_perm.key = key; sma->sem_perm.security = NULL; retval = security_sem_alloc(sma); if (retval) { - ipc_rcu_putref(sma, ipc_rcu_free); + ipc_rcu_putref(&sma->sem_perm, ipc_rcu_free); return retval; } @@ -502,7 +502,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); if (id < 0) { - ipc_rcu_putref(sma, sem_rcu_free); + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); return id; } ns->used_sems += nsems; @@ -1122,7 +1122,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) wake_up_q(&wake_q); ns->used_sems -= sma->sem_nsems; - ipc_rcu_putref(sma, sem_rcu_free); + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); } static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version) @@ -1362,7 +1362,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, goto out_unlock; } if (nsems > SEMMSL_FAST) { - if (!ipc_rcu_getref(sma)) { + if (!ipc_rcu_getref(&sma->sem_perm)) { err = -EIDRM; goto out_unlock; } @@ -1370,7 +1370,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, rcu_read_unlock(); sem_io = ipc_alloc(sizeof(ushort)*nsems); if (sem_io == NULL) { - ipc_rcu_putref(sma, sem_rcu_free); + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); return -ENOMEM; } @@ -1395,7 +1395,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, int i; struct sem_undo *un; - if (!ipc_rcu_getref(sma)) { + if (!ipc_rcu_getref(&sma->sem_perm)) { err = -EIDRM; goto out_rcu_wakeup; } @@ -1404,20 +1404,20 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, if (nsems > SEMMSL_FAST) { sem_io = ipc_alloc(sizeof(ushort)*nsems); if (sem_io == NULL) { - ipc_rcu_putref(sma, sem_rcu_free); + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); return -ENOMEM; } } if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) { - ipc_rcu_putref(sma, sem_rcu_free); + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); err = -EFAULT; goto out_free; } for (i = 0; i < nsems; i++) { if (sem_io[i] > SEMVMX) { - ipc_rcu_putref(sma, sem_rcu_free); + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); err = -ERANGE; goto out_free; } @@ -1699,7 +1699,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) } nsems = sma->sem_nsems; - if (!ipc_rcu_getref(sma)) { + if (!ipc_rcu_getref(&sma->sem_perm)) { rcu_read_unlock(); un = ERR_PTR(-EIDRM); goto out; @@ -1709,7 +1709,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) /* step 2: allocate new undo structure */ new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); if (!new) { - ipc_rcu_putref(sma, sem_rcu_free); + ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); return ERR_PTR(-ENOMEM); } diff --git a/ipc/shm.c b/ipc/shm.c index f45c7959b264..5ef6d31a52c5 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -174,9 +174,10 @@ static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp) static void shm_rcu_free(struct rcu_head *head) { - struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); - struct shmid_kernel *shp = ipc_rcu_to_struct(p); - + struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm, + rcu); + struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel, + shm_perm); security_shm_free(shp); ipc_rcu_free(head); } @@ -241,7 +242,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) user_shm_unlock(i_size_read(file_inode(shm_file)), shp->mlock_user); fput(shm_file); - ipc_rcu_putref(shp, shm_rcu_free); + ipc_rcu_putref(&shp->shm_perm, shm_rcu_free); } /* @@ -542,7 +543,10 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) ns->shm_tot + numpages > ns->shm_ctlall) return -ENOSPC; - shp = ipc_rcu_alloc(sizeof(*shp)); + BUILD_BUG_ON(offsetof(struct shmid_kernel, shm_perm) != 0); + + shp = container_of(ipc_rcu_alloc(sizeof(*shp)), struct shmid_kernel, + shm_perm); if (!shp) return -ENOMEM; @@ -553,7 +557,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) shp->shm_perm.security = NULL; error = security_shm_alloc(shp); if (error) { - ipc_rcu_putref(shp, ipc_rcu_free); + ipc_rcu_putref(&shp->shm_perm, ipc_rcu_free); return error; } @@ -624,7 +628,7 @@ no_id: user_shm_unlock(size, shp->mlock_user); fput(file); no_file: - ipc_rcu_putref(shp, shm_rcu_free); + ipc_rcu_putref(&shp->shm_perm, shm_rcu_free); return error; } diff --git a/ipc/util.c b/ipc/util.c index caec7b1bfaa3..5d1ff1035efe 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -418,46 +418,45 @@ void ipc_free(void *ptr) } /** - * ipc_rcu_alloc - allocate ipc and rcu space + * ipc_rcu_alloc - allocate ipc space * @size: size desired * - * Allocate memory for the rcu header structure + the object. - * Returns the pointer to the object or NULL upon failure. + * Allocate memory for an ipc object. + * The first member must be struct kern_ipc_perm. */ -void *ipc_rcu_alloc(int size) +struct kern_ipc_perm *ipc_rcu_alloc(int size) { /* * We prepend the allocation with the rcu struct */ - struct ipc_rcu *out = ipc_alloc(sizeof(struct ipc_rcu) + size); + struct kern_ipc_perm *out = ipc_alloc(size); if (unlikely(!out)) return NULL; + + memset(out, 0, size); atomic_set(&out->refcount, 1); - return out + 1; + return out; } -int ipc_rcu_getref(void *ptr) +int ipc_rcu_getref(struct kern_ipc_perm *ptr) { - struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1; - - return atomic_inc_not_zero(&p->refcount); + return atomic_inc_not_zero(&ptr->refcount); } -void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head)) +void ipc_rcu_putref(struct kern_ipc_perm *ptr, + void (*func)(struct rcu_head *head)) { - struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1; - - if (!atomic_dec_and_test(&p->refcount)) + if (!atomic_dec_and_test(&ptr->refcount)) return; - call_rcu(&p->rcu, func); + call_rcu(&ptr->rcu, func); } -void ipc_rcu_free(struct rcu_head *head) +void ipc_rcu_free(struct rcu_head *h) { - struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); + struct kern_ipc_perm *ptr = container_of(h, struct kern_ipc_perm, rcu); - kvfree(p); + kvfree(ptr); } /** diff --git a/ipc/util.h b/ipc/util.h index 60ddccca464d..09d0f918c3e2 100644 --- a/ipc/util.h +++ b/ipc/util.h @@ -47,13 +47,6 @@ static inline void msg_exit_ns(struct ipc_namespace *ns) { } static inline void shm_exit_ns(struct ipc_namespace *ns) { } #endif -struct ipc_rcu { - struct rcu_head rcu; - atomic_t refcount; -} ____cacheline_aligned_in_smp; - -#define ipc_rcu_to_struct(p) ((void *)(p+1)) - /* * Structure that holds the parameters needed by the ipc operations * (see after) @@ -125,11 +118,14 @@ void ipc_free(void *ptr); * Objects are reference counted, they start with reference count 1. * getref increases the refcount, the putref call that reduces the recount * to 0 schedules the rcu destruction. Caller must guarantee locking. + * + * struct kern_ipc_perm must be the first member in the allocated structure. */ -void *ipc_rcu_alloc(int size); -int ipc_rcu_getref(void *ptr); -void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head)); -void ipc_rcu_free(struct rcu_head *head); +struct kern_ipc_perm *ipc_rcu_alloc(int size); +int ipc_rcu_getref(struct kern_ipc_perm *ptr); +void ipc_rcu_putref(struct kern_ipc_perm *ptr, + void (*func)(struct rcu_head *head)); +void ipc_rcu_free(struct rcu_head *h); struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int); struct kern_ipc_perm *ipc_obtain_object_idr(struct ipc_ids *ids, int id); -- cgit v1.2.3-59-g8ed1b From 2cd648c110b5570c3280bd645797658cabbe5f5c Mon Sep 17 00:00:00 2001 From: Manfred Spraul Date: Wed, 12 Jul 2017 14:34:44 -0700 Subject: include/linux/sem.h: correctly document sem_ctime sem_ctime is initialized to the semget() time and then updated at every semctl() that changes the array. Thus it does not represent the time of the last change. Especially, semop() calls are only stored in sem_otime, not in sem_ctime. This is already described in ipc/sem.c, I just overlooked that there is a comment in include/linux/sem.h and man semctl(2) as well. So: Correct wrong comments. Link: http://lkml.kernel.org/r/20170515171912.6298-4-manfred@colorfullife.com Signed-off-by: Manfred Spraul Cc: Kees Cook Cc: <1vier1@web.de> Cc: Davidlohr Bueso Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Fabian Frederick Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sem.h | 2 +- include/uapi/linux/sem.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/sem.h b/include/linux/sem.h index 9db14093b73c..be5cf2ea14ad 100644 --- a/include/linux/sem.h +++ b/include/linux/sem.h @@ -30,7 +30,7 @@ struct sem { /* One sem_array data structure for each set of semaphores in the system. */ struct sem_array { struct kern_ipc_perm sem_perm; /* permissions .. see ipc.h */ - time_t sem_ctime; /* last change time */ + time_t sem_ctime; /* create/last semctl() time */ struct list_head pending_alter; /* pending operations */ /* that alter the array */ struct list_head pending_const; /* pending complex operations */ diff --git a/include/uapi/linux/sem.h b/include/uapi/linux/sem.h index dd73b908b2f3..67eb90361692 100644 --- a/include/uapi/linux/sem.h +++ b/include/uapi/linux/sem.h @@ -23,7 +23,7 @@ struct semid_ds { struct ipc_perm sem_perm; /* permissions .. see ipc.h */ __kernel_time_t sem_otime; /* last semop time */ - __kernel_time_t sem_ctime; /* last change time */ + __kernel_time_t sem_ctime; /* create/last semctl() time */ struct sem *sem_base; /* ptr to first semaphore in array */ struct sem_queue *sem_pending; /* pending operations to be processed */ struct sem_queue **sem_pending_last; /* last pending operation */ -- cgit v1.2.3-59-g8ed1b From f8dbe8d290637ac3f68600e30d092393fe9b40a5 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 12 Jul 2017 14:34:47 -0700 Subject: ipc: drop non-RCU allocation The only users of ipc_alloc() were ipc_rcu_alloc() and the on-heap sem_io fall-back memory. Better to just open-code these to make things easier to read. [manfred@colorfullife.com: Rediff due to inclusion of memset() into ipc_rcu_alloc()] Link: http://lkml.kernel.org/r/20170525185107.12869-5-manfred@colorfullife.com Signed-off-by: Kees Cook Signed-off-by: Manfred Spraul Cc: Davidlohr Bueso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/sem.c | 8 +++++--- ipc/util.c | 25 +------------------------ ipc/util.h | 6 ------ 3 files changed, 6 insertions(+), 33 deletions(-) diff --git a/ipc/sem.c b/ipc/sem.c index bdff6d93d2c7..484ccf83cf85 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -1368,7 +1368,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, } sem_unlock(sma, -1); rcu_read_unlock(); - sem_io = ipc_alloc(sizeof(ushort)*nsems); + sem_io = kvmalloc_array(nsems, sizeof(ushort), + GFP_KERNEL); if (sem_io == NULL) { ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); return -ENOMEM; @@ -1402,7 +1403,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, rcu_read_unlock(); if (nsems > SEMMSL_FAST) { - sem_io = ipc_alloc(sizeof(ushort)*nsems); + sem_io = kvmalloc_array(nsems, sizeof(ushort), + GFP_KERNEL); if (sem_io == NULL) { ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); return -ENOMEM; @@ -1480,7 +1482,7 @@ out_rcu_wakeup: wake_up_q(&wake_q); out_free: if (sem_io != fast_sem_io) - ipc_free(sem_io); + kvfree(sem_io); return err; } diff --git a/ipc/util.c b/ipc/util.c index 5d1ff1035efe..dd73feb1569a 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -394,29 +394,6 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp) ipcp->deleted = true; } -/** - * ipc_alloc - allocate ipc space - * @size: size desired - * - * Allocate memory from the appropriate pools and return a pointer to it. - * NULL is returned if the allocation fails - */ -void *ipc_alloc(int size) -{ - return kvmalloc(size, GFP_KERNEL); -} - -/** - * ipc_free - free ipc space - * @ptr: pointer returned by ipc_alloc - * - * Free a block created with ipc_alloc(). - */ -void ipc_free(void *ptr) -{ - kvfree(ptr); -} - /** * ipc_rcu_alloc - allocate ipc space * @size: size desired @@ -429,7 +406,7 @@ struct kern_ipc_perm *ipc_rcu_alloc(int size) /* * We prepend the allocation with the rcu struct */ - struct kern_ipc_perm *out = ipc_alloc(size); + struct kern_ipc_perm *out = kvmalloc(size, GFP_KERNEL); if (unlikely(!out)) return NULL; diff --git a/ipc/util.h b/ipc/util.h index 09d0f918c3e2..2578fd9be835 100644 --- a/ipc/util.h +++ b/ipc/util.h @@ -107,12 +107,6 @@ void ipc_rmid(struct ipc_ids *, struct kern_ipc_perm *); /* must be called with ipcp locked */ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flg); -/* for rare, potentially huge allocations. - * both function can sleep - */ -void *ipc_alloc(int size); -void ipc_free(void *ptr); - /* * For allocation that need to be freed by RCU. * Objects are reference counted, they start with reference count 1. -- cgit v1.2.3-59-g8ed1b From 1b4654ef72f61c84704b3c79b50fdeed8747fc56 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 12 Jul 2017 14:34:50 -0700 Subject: ipc/sem: do not use ipc_rcu_free() Avoid using ipc_rcu_free, since it just re-finds the original structure pointer. For the pre-list-init failure path, there is no RCU needed, since it was just allocated. It can be directly freed. Link: http://lkml.kernel.org/r/20170525185107.12869-6-manfred@colorfullife.com Signed-off-by: Kees Cook Signed-off-by: Manfred Spraul Cc: Davidlohr Bueso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/sem.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/ipc/sem.c b/ipc/sem.c index 484ccf83cf85..a04c4d6d120c 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -258,13 +258,18 @@ static void merge_queues(struct sem_array *sma) } } +static void __sem_free(struct sem_array *sma) +{ + kvfree(sma); +} + static void sem_rcu_free(struct rcu_head *head) { struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu); struct sem_array *sma = container_of(p, struct sem_array, sem_perm); security_sem_free(sma); - ipc_rcu_free(head); + __sem_free(sma); } /* @@ -482,7 +487,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) sma->sem_perm.security = NULL; retval = security_sem_alloc(sma); if (retval) { - ipc_rcu_putref(&sma->sem_perm, ipc_rcu_free); + __sem_free(sma); return retval; } -- cgit v1.2.3-59-g8ed1b From 66470b18179f8839a62b4c20ffd3903a4025fe90 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 12 Jul 2017 14:34:53 -0700 Subject: ipc/shm: do not use ipc_rcu_free() Avoid using ipc_rcu_free, since it just re-finds the original structure pointer. For the pre-list-init failure path, there is no RCU needed, since it was just allocated. It can be directly freed. Link: http://lkml.kernel.org/r/20170525185107.12869-7-manfred@colorfullife.com Signed-off-by: Kees Cook Signed-off-by: Manfred Spraul Cc: Davidlohr Bueso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/shm.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/ipc/shm.c b/ipc/shm.c index 5ef6d31a52c5..566c1e193ee1 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -172,6 +172,11 @@ static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp) ipc_lock_object(&ipcp->shm_perm); } +static void __shm_free(struct shmid_kernel *shp) +{ + kvfree(shp); +} + static void shm_rcu_free(struct rcu_head *head) { struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm, @@ -179,7 +184,7 @@ static void shm_rcu_free(struct rcu_head *head) struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel, shm_perm); security_shm_free(shp); - ipc_rcu_free(head); + __shm_free(shp); } static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) @@ -557,7 +562,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) shp->shm_perm.security = NULL; error = security_shm_alloc(shp); if (error) { - ipc_rcu_putref(&shp->shm_perm, ipc_rcu_free); + __shm_free(shp); return error; } -- cgit v1.2.3-59-g8ed1b From 9ef5932f8a1134b9d93676ee26701b2be90c7a95 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 12 Jul 2017 14:34:56 -0700 Subject: ipc/msg: do not use ipc_rcu_free() Avoid using ipc_rcu_free, since it just re-finds the original structure pointer. For the pre-list-init failure path, there is no RCU needed, since it was just allocated. It can be directly freed. Link: http://lkml.kernel.org/r/20170525185107.12869-8-manfred@colorfullife.com Signed-off-by: Kees Cook Signed-off-by: Manfred Spraul Cc: Davidlohr Bueso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/msg.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/ipc/msg.c b/ipc/msg.c index 0ed7dae7d4e8..25d43e27ef12 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -95,13 +95,18 @@ static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s) ipc_rmid(&msg_ids(ns), &s->q_perm); } +static void __msg_free(struct msg_queue *msq) +{ + kvfree(msq); +} + static void msg_rcu_free(struct rcu_head *head) { struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu); struct msg_queue *msq = container_of(p, struct msg_queue, q_perm); security_msg_queue_free(msq); - ipc_rcu_free(head); + __msg_free(msq); } /** @@ -131,7 +136,7 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) msq->q_perm.security = NULL; retval = security_msg_queue_alloc(msq); if (retval) { - ipc_rcu_putref(&msq->q_perm, ipc_rcu_free); + __msg_free(msq); return retval; } -- cgit v1.2.3-59-g8ed1b From 5ccc8fb54f1c083f1dc7e073150ec18d43457872 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 12 Jul 2017 14:34:59 -0700 Subject: ipc/util: drop ipc_rcu_free() There are no more callers of ipc_rcu_free(), so remove it. Link: http://lkml.kernel.org/r/20170525185107.12869-9-manfred@colorfullife.com Signed-off-by: Kees Cook Signed-off-by: Manfred Spraul Cc: Davidlohr Bueso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/util.c | 7 ------- ipc/util.h | 1 - 2 files changed, 8 deletions(-) diff --git a/ipc/util.c b/ipc/util.c index dd73feb1569a..556884bab698 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -429,13 +429,6 @@ void ipc_rcu_putref(struct kern_ipc_perm *ptr, call_rcu(&ptr->rcu, func); } -void ipc_rcu_free(struct rcu_head *h) -{ - struct kern_ipc_perm *ptr = container_of(h, struct kern_ipc_perm, rcu); - - kvfree(ptr); -} - /** * ipcperms - check ipc permissions * @ns: ipc namespace diff --git a/ipc/util.h b/ipc/util.h index 2578fd9be835..44efbc0b635b 100644 --- a/ipc/util.h +++ b/ipc/util.h @@ -119,7 +119,6 @@ struct kern_ipc_perm *ipc_rcu_alloc(int size); int ipc_rcu_getref(struct kern_ipc_perm *ptr); void ipc_rcu_putref(struct kern_ipc_perm *ptr, void (*func)(struct rcu_head *head)); -void ipc_rcu_free(struct rcu_head *h); struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int); struct kern_ipc_perm *ipc_obtain_object_idr(struct ipc_ids *ids, int id); -- cgit v1.2.3-59-g8ed1b From 101ede01dfd5072651965e974bc6e30c8d0748e2 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 12 Jul 2017 14:35:02 -0700 Subject: ipc/sem: avoid ipc_rcu_alloc() Instead of using ipc_rcu_alloc() which only performs the refcount bump, open code it to perform better sem-specific checks. This also allows for sem_array structure layout to be randomized in the future. [manfred@colorfullife.com: Rediff, because the memset was temporarily inside ipc_rcu_alloc()] Link: http://lkml.kernel.org/r/20170525185107.12869-10-manfred@colorfullife.com Signed-off-by: Kees Cook Signed-off-by: Manfred Spraul Cc: Davidlohr Bueso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/sem.c | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/ipc/sem.c b/ipc/sem.c index a04c4d6d120c..445a5b5eb88f 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -451,6 +451,25 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) ipc_rmid(&sem_ids(ns), &s->sem_perm); } +static struct sem_array *sem_alloc(size_t nsems) +{ + struct sem_array *sma; + size_t size; + + if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0])) + return NULL; + + size = sizeof(*sma) + nsems * sizeof(sma->sems[0]); + sma = kvmalloc(size, GFP_KERNEL); + if (unlikely(!sma)) + return NULL; + + memset(sma, 0, size); + atomic_set(&sma->sem_perm.refcount, 1); + + return sma; +} + /** * newary - Create a new semaphore set * @ns: namespace @@ -463,7 +482,6 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) int id; int retval; struct sem_array *sma; - int size; key_t key = params->key; int nsems = params->u.nsems; int semflg = params->flg; @@ -474,10 +492,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) if (ns->used_sems + nsems > ns->sc_semmns) return -ENOSPC; - BUILD_BUG_ON(offsetof(struct sem_array, sem_perm) != 0); - - size = sizeof(*sma) + nsems * sizeof(sma->sems[0]); - sma = container_of(ipc_rcu_alloc(size), struct sem_array, sem_perm); + sma = sem_alloc(nsems); if (!sma) return -ENOMEM; -- cgit v1.2.3-59-g8ed1b From 3e0c24042e5aa55eee817caeca67246df69931e1 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 12 Jul 2017 14:35:05 -0700 Subject: ipc/shm: avoid ipc_rcu_alloc() Instead of using ipc_rcu_alloc() which only performs the refcount bump, open code it. This also allows for shmid_kernel structure layout to be randomized in the future. Link: http://lkml.kernel.org/r/20170525185107.12869-11-manfred@colorfullife.com Signed-off-by: Kees Cook Signed-off-by: Manfred Spraul Cc: Davidlohr Bueso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/shm.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/ipc/shm.c b/ipc/shm.c index 566c1e193ee1..2ac489ef56e4 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -518,6 +518,19 @@ static const struct vm_operations_struct shm_vm_ops = { #endif }; +static struct shmid_kernel *shm_alloc(void) +{ + struct shmid_kernel *shp; + + shp = kvmalloc(sizeof(*shp), GFP_KERNEL); + if (unlikely(!shp)) + return NULL; + + atomic_set(&shp->shm_perm.refcount, 1); + + return shp; +} + /** * newseg - Create a new shared memory segment * @ns: namespace @@ -548,10 +561,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) ns->shm_tot + numpages > ns->shm_ctlall) return -ENOSPC; - BUILD_BUG_ON(offsetof(struct shmid_kernel, shm_perm) != 0); - - shp = container_of(ipc_rcu_alloc(sizeof(*shp)), struct shmid_kernel, - shm_perm); + shp = shm_alloc(); if (!shp) return -ENOMEM; -- cgit v1.2.3-59-g8ed1b From 52f908904e7e05b6300162faa48152df073be645 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 12 Jul 2017 14:35:07 -0700 Subject: ipc/msg: avoid ipc_rcu_alloc() Instead of using ipc_rcu_alloc() which only performs the refcount bump, open code it. This also allows for msg_queue structure layout to be randomized in the future. Link: http://lkml.kernel.org/r/20170525185107.12869-12-manfred@colorfullife.com Signed-off-by: Kees Cook Signed-off-by: Manfred Spraul Cc: Davidlohr Bueso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/msg.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/ipc/msg.c b/ipc/msg.c index 25d43e27ef12..10094a731b8e 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -109,6 +109,19 @@ static void msg_rcu_free(struct rcu_head *head) __msg_free(msq); } +static struct msg_queue *msg_alloc(void) +{ + struct msg_queue *msq; + + msq = kvmalloc(sizeof(*msq), GFP_KERNEL); + if (unlikely(!msq)) + return NULL; + + atomic_set(&msq->q_perm.refcount, 1); + + return msq; +} + /** * newque - Create a new msg queue * @ns: namespace @@ -123,10 +136,7 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) key_t key = params->key; int msgflg = params->flg; - BUILD_BUG_ON(offsetof(struct msg_queue, q_perm) != 0); - - msq = container_of(ipc_rcu_alloc(sizeof(*msq)), struct msg_queue, - q_perm); + msq = msg_alloc(); if (!msq) return -ENOMEM; -- cgit v1.2.3-59-g8ed1b From c3f6fb6fe4e638a58eae558cb209c463cd46c2fc Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 12 Jul 2017 14:35:10 -0700 Subject: ipc/util: drop ipc_rcu_alloc() No callers remain for ipc_rcu_alloc(). Drop the function. [manfred@colorfullife.com: Rediff because the memset was temporarily inside ipc_rcu_free()] Link: http://lkml.kernel.org/r/20170525185107.12869-13-manfred@colorfullife.com Signed-off-by: Kees Cook Signed-off-by: Manfred Spraul Cc: Kees Cook Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/util.c | 21 --------------------- ipc/util.h | 3 --- 2 files changed, 24 deletions(-) diff --git a/ipc/util.c b/ipc/util.c index 556884bab698..2428dd44ca97 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -394,27 +394,6 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp) ipcp->deleted = true; } -/** - * ipc_rcu_alloc - allocate ipc space - * @size: size desired - * - * Allocate memory for an ipc object. - * The first member must be struct kern_ipc_perm. - */ -struct kern_ipc_perm *ipc_rcu_alloc(int size) -{ - /* - * We prepend the allocation with the rcu struct - */ - struct kern_ipc_perm *out = kvmalloc(size, GFP_KERNEL); - if (unlikely(!out)) - return NULL; - - memset(out, 0, size); - atomic_set(&out->refcount, 1); - return out; -} - int ipc_rcu_getref(struct kern_ipc_perm *ptr) { return atomic_inc_not_zero(&ptr->refcount); diff --git a/ipc/util.h b/ipc/util.h index 44efbc0b635b..77336c2bdb9c 100644 --- a/ipc/util.h +++ b/ipc/util.h @@ -112,10 +112,7 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flg); * Objects are reference counted, they start with reference count 1. * getref increases the refcount, the putref call that reduces the recount * to 0 schedules the rcu destruction. Caller must guarantee locking. - * - * struct kern_ipc_perm must be the first member in the allocated structure. */ -struct kern_ipc_perm *ipc_rcu_alloc(int size); int ipc_rcu_getref(struct kern_ipc_perm *ptr); void ipc_rcu_putref(struct kern_ipc_perm *ptr, void (*func)(struct rcu_head *head)); -- cgit v1.2.3-59-g8ed1b From 2ec55f8024db859d70f14c26e91ca044328dd50d Mon Sep 17 00:00:00 2001 From: Manfred Spraul Date: Wed, 12 Jul 2017 14:35:13 -0700 Subject: ipc/sem.c: avoid ipc_rcu_putref for failed ipc_addid() Loosely based on a patch from Kees Cook : - id and retval can be merged - if ipc_addid() fails, then use call_rcu() directly. The difference is that call_rcu is used for failed ipc_addid() calls, to continue to guaranteed an rcu delay for security_sem_free(). Link: http://lkml.kernel.org/r/20170525185107.12869-14-manfred@colorfullife.com Signed-off-by: Manfred Spraul Cc: Kees Cook Cc: Davidlohr Bueso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/sem.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/ipc/sem.c b/ipc/sem.c index 445a5b5eb88f..2b2ed56e0fde 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -479,7 +479,6 @@ static struct sem_array *sem_alloc(size_t nsems) */ static int newary(struct ipc_namespace *ns, struct ipc_params *params) { - int id; int retval; struct sem_array *sma; key_t key = params->key; @@ -520,10 +519,10 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) sma->sem_nsems = nsems; sma->sem_ctime = get_seconds(); - id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); - if (id < 0) { - ipc_rcu_putref(&sma->sem_perm, sem_rcu_free); - return id; + retval = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); + if (retval < 0) { + call_rcu(&sma->sem_perm.rcu, sem_rcu_free); + return retval; } ns->used_sems += nsems; -- cgit v1.2.3-59-g8ed1b From a2642f8770993a1598500bb6a8082b7c32faff8b Mon Sep 17 00:00:00 2001 From: Manfred Spraul Date: Wed, 12 Jul 2017 14:35:16 -0700 Subject: ipc/shm.c: avoid ipc_rcu_putref for failed ipc_addid() Loosely based on a patch from Kees Cook : - id and error can be merged - if operations before ipc_addid() fail, then use call_rcu() directly. The difference is that call_rcu is used for failures after security_shm_alloc(), to continue to guaranteed an rcu delay for security_sem_free(). Link: http://lkml.kernel.org/r/20170525185107.12869-15-manfred@colorfullife.com Signed-off-by: Manfred Spraul Cc: Kees Cook Cc: Davidlohr Bueso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/shm.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/ipc/shm.c b/ipc/shm.c index 2ac489ef56e4..c5976d318ed1 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -548,7 +548,6 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; struct file *file; char name[13]; - int id; vm_flags_t acctflag = 0; if (size < SHMMIN || size > ns->shm_ctlmax) @@ -617,11 +616,9 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) shp->shm_file = file; shp->shm_creator = current; - id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); - if (id < 0) { - error = id; + error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); + if (error < 0) goto no_id; - } list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist); @@ -643,7 +640,7 @@ no_id: user_shm_unlock(size, shp->mlock_user); fput(file); no_file: - ipc_rcu_putref(&shp->shm_perm, shm_rcu_free); + call_rcu(&shp->shm_perm.rcu, shm_rcu_free); return error; } -- cgit v1.2.3-59-g8ed1b From 51c23b7b7db52493d4fc869cec8c3e8fe27bfcd3 Mon Sep 17 00:00:00 2001 From: Manfred Spraul Date: Wed, 12 Jul 2017 14:35:19 -0700 Subject: ipc/msg.c: avoid ipc_rcu_putref for failed ipc_addid() Loosely based on a patch from Kees Cook : - id and retval can be merged - if ipc_addid() fails, then use call_rcu() directly. The difference is that call_rcu is used for failed ipc_addid() calls, to continue to guaranteed an rcu delay for security_msg_queue_free(). Link: http://lkml.kernel.org/r/20170525185107.12869-16-manfred@colorfullife.com Signed-off-by: Manfred Spraul Cc: Kees Cook Cc: Davidlohr Bueso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/msg.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ipc/msg.c b/ipc/msg.c index 10094a731b8e..cd90bfde89a4 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -132,7 +132,7 @@ static struct msg_queue *msg_alloc(void) static int newque(struct ipc_namespace *ns, struct ipc_params *params) { struct msg_queue *msq; - int id, retval; + int retval; key_t key = params->key; int msgflg = params->flg; @@ -160,10 +160,10 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) INIT_LIST_HEAD(&msq->q_senders); /* ipc_addid() locks msq upon success. */ - id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); - if (id < 0) { - ipc_rcu_putref(&msq->q_perm, msg_rcu_free); - return id; + retval = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); + if (retval < 0) { + call_rcu(&msq->q_perm.rcu, msg_rcu_free); + return retval; } ipc_unlock_object(&msq->q_perm); -- cgit v1.2.3-59-g8ed1b From 3d3653f9732c73feb8c4addfc1cbdaa292a399fa Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 12 Jul 2017 14:35:22 -0700 Subject: ipc: move atomic_set() to where it is needed Only after ipc_addid() has succeeded will refcounting be used, so move initialization into ipc_addid() and remove from open-coded *_alloc() routines. Link: http://lkml.kernel.org/r/20170525185107.12869-17-manfred@colorfullife.com Signed-off-by: Kees Cook Signed-off-by: Manfred Spraul Cc: Davidlohr Bueso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/msg.c | 2 -- ipc/sem.c | 1 - ipc/shm.c | 2 -- ipc/util.c | 1 + 4 files changed, 1 insertion(+), 5 deletions(-) diff --git a/ipc/msg.c b/ipc/msg.c index cd90bfde89a4..770342e1d327 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -117,8 +117,6 @@ static struct msg_queue *msg_alloc(void) if (unlikely(!msq)) return NULL; - atomic_set(&msq->q_perm.refcount, 1); - return msq; } diff --git a/ipc/sem.c b/ipc/sem.c index 2b2ed56e0fde..5f137738819d 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -465,7 +465,6 @@ static struct sem_array *sem_alloc(size_t nsems) return NULL; memset(sma, 0, size); - atomic_set(&sma->sem_perm.refcount, 1); return sma; } diff --git a/ipc/shm.c b/ipc/shm.c index c5976d318ed1..d1988ef821a1 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -526,8 +526,6 @@ static struct shmid_kernel *shm_alloc(void) if (unlikely(!shp)) return NULL; - atomic_set(&shp->shm_perm.refcount, 1); - return shp; } diff --git a/ipc/util.c b/ipc/util.c index 2428dd44ca97..1a2cb02467ab 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -232,6 +232,7 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size) idr_preload(GFP_KERNEL); + atomic_set(&new->refcount, 1); spin_lock_init(&new->lock); new->deleted = false; rcu_read_lock(); -- cgit v1.2.3-59-g8ed1b From 42e618f77dabc82c697915c193d729e9d16e2a75 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 12 Jul 2017 14:35:25 -0700 Subject: ipc/shm: remove special shm_alloc/free There is nothing special about the shm_alloc/free routines any more, so remove them to make code more readable. [manfred@colorfullife.com: Rediff, to continue to keep rcu for free calls after a successful security_shm_alloc()] Link: http://lkml.kernel.org/r/20170525185107.12869-18-manfred@colorfullife.com Signed-off-by: Kees Cook Signed-off-by: Manfred Spraul Cc: Davidlohr Bueso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/shm.c | 24 ++++-------------------- 1 file changed, 4 insertions(+), 20 deletions(-) diff --git a/ipc/shm.c b/ipc/shm.c index d1988ef821a1..28a444861a8f 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -172,11 +172,6 @@ static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp) ipc_lock_object(&ipcp->shm_perm); } -static void __shm_free(struct shmid_kernel *shp) -{ - kvfree(shp); -} - static void shm_rcu_free(struct rcu_head *head) { struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm, @@ -184,7 +179,7 @@ static void shm_rcu_free(struct rcu_head *head) struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel, shm_perm); security_shm_free(shp); - __shm_free(shp); + kvfree(shp); } static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) @@ -518,17 +513,6 @@ static const struct vm_operations_struct shm_vm_ops = { #endif }; -static struct shmid_kernel *shm_alloc(void) -{ - struct shmid_kernel *shp; - - shp = kvmalloc(sizeof(*shp), GFP_KERNEL); - if (unlikely(!shp)) - return NULL; - - return shp; -} - /** * newseg - Create a new shared memory segment * @ns: namespace @@ -558,8 +542,8 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) ns->shm_tot + numpages > ns->shm_ctlall) return -ENOSPC; - shp = shm_alloc(); - if (!shp) + shp = kvmalloc(sizeof(*shp), GFP_KERNEL); + if (unlikely(!shp)) return -ENOMEM; shp->shm_perm.key = key; @@ -569,7 +553,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) shp->shm_perm.security = NULL; error = security_shm_alloc(shp); if (error) { - __shm_free(shp); + kvfree(shp); return error; } -- cgit v1.2.3-59-g8ed1b From fb259c310f79d295c2da2934ff2282e1b7c30529 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 12 Jul 2017 14:35:28 -0700 Subject: ipc/msg: remove special msg_alloc/free There is nothing special about the msg_alloc/free routines any more, so remove them to make code more readable. [manfred@colorfullife.com: Rediff to keep rcu protection for security_msg_queue_alloc()] Link: http://lkml.kernel.org/r/20170525185107.12869-19-manfred@colorfullife.com Signed-off-by: Kees Cook Signed-off-by: Manfred Spraul Cc: Davidlohr Bueso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/msg.c | 24 ++++-------------------- 1 file changed, 4 insertions(+), 20 deletions(-) diff --git a/ipc/msg.c b/ipc/msg.c index 770342e1d327..5b25e0755656 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -95,29 +95,13 @@ static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s) ipc_rmid(&msg_ids(ns), &s->q_perm); } -static void __msg_free(struct msg_queue *msq) -{ - kvfree(msq); -} - static void msg_rcu_free(struct rcu_head *head) { struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu); struct msg_queue *msq = container_of(p, struct msg_queue, q_perm); security_msg_queue_free(msq); - __msg_free(msq); -} - -static struct msg_queue *msg_alloc(void) -{ - struct msg_queue *msq; - - msq = kvmalloc(sizeof(*msq), GFP_KERNEL); - if (unlikely(!msq)) - return NULL; - - return msq; + kvfree(msq); } /** @@ -134,8 +118,8 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) key_t key = params->key; int msgflg = params->flg; - msq = msg_alloc(); - if (!msq) + msq = kvmalloc(sizeof(*msq), GFP_KERNEL); + if (unlikely(!msq)) return -ENOMEM; msq->q_perm.mode = msgflg & S_IRWXUGO; @@ -144,7 +128,7 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) msq->q_perm.security = NULL; retval = security_msg_queue_alloc(msq); if (retval) { - __msg_free(msq); + kvfree(msq); return retval; } -- cgit v1.2.3-59-g8ed1b From e2029dfeef7b09f08ac8572e8be3d4c624d1f79a Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 12 Jul 2017 14:35:31 -0700 Subject: ipc/sem: drop __sem_free() The remaining users of __sem_free() can simply call kvfree() instead for better readability. [manfred@colorfullife.com: Rediff to keep rcu protection for security_sem_alloc()] Link: http://lkml.kernel.org/r/20170525185107.12869-20-manfred@colorfullife.com Signed-off-by: Kees Cook Signed-off-by: Manfred Spraul Cc: Davidlohr Bueso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/sem.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/ipc/sem.c b/ipc/sem.c index 5f137738819d..9e70cd7a17da 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -258,18 +258,13 @@ static void merge_queues(struct sem_array *sma) } } -static void __sem_free(struct sem_array *sma) -{ - kvfree(sma); -} - static void sem_rcu_free(struct rcu_head *head) { struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu); struct sem_array *sma = container_of(p, struct sem_array, sem_perm); security_sem_free(sma); - __sem_free(sma); + kvfree(sma); } /* @@ -500,7 +495,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) sma->sem_perm.security = NULL; retval = security_sem_alloc(sma); if (retval) { - __sem_free(sma); + kvfree(sma); return retval; } -- cgit v1.2.3-59-g8ed1b From 62b49c9908bcee88347efe8b4ed1b4f53c60ee66 Mon Sep 17 00:00:00 2001 From: Manfred Spraul Date: Wed, 12 Jul 2017 14:35:34 -0700 Subject: ipc/util.h: update documentation for ipc_getref() and ipc_putref() Now that ipc_rcu_alloc() and ipc_rcu_free() are removed, document when it is valid to use ipc_getref() and ipc_putref(). Link: http://lkml.kernel.org/r/20170525185107.12869-21-manfred@colorfullife.com Signed-off-by: Manfred Spraul Cc: Davidlohr Bueso Cc: Kees Cook Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/util.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ipc/util.h b/ipc/util.h index 77336c2bdb9c..c692010e6f0a 100644 --- a/ipc/util.h +++ b/ipc/util.h @@ -112,6 +112,9 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flg); * Objects are reference counted, they start with reference count 1. * getref increases the refcount, the putref call that reduces the recount * to 0 schedules the rcu destruction. Caller must guarantee locking. + * + * refcount is initialized by ipc_addid(), before that point call_rcu() + * must be used. */ int ipc_rcu_getref(struct kern_ipc_perm *ptr); void ipc_rcu_putref(struct kern_ipc_perm *ptr, -- cgit v1.2.3-59-g8ed1b From eacd86ca3b036e55e172b7279f101cef4a6ff3a4 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Wed, 12 Jul 2017 14:35:37 -0700 Subject: net/netfilter/x_tables.c: use kvmalloc() in xt_alloc_table_info() xt_alloc_table_info() basically opencodes kvmalloc() so use the library function instead. Link: http://lkml.kernel.org/r/20170531155145.17111-4-mhocko@kernel.org Signed-off-by: Michal Hocko Cc: Pablo Neira Ayuso Cc: Jozsef Kadlecsik Cc: Florian Westphal Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- net/netfilter/x_tables.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 1770c1d9b37f..e1648238a9c9 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -1003,14 +1003,10 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size) if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages) return NULL; - if (sz <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) - info = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); - if (!info) { - info = __vmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY, - PAGE_KERNEL); - if (!info) - return NULL; - } + info = kvmalloc(sz, GFP_KERNEL); + if (!info) + return NULL; + memset(info, 0, sizeof(*info)); info->size = size; return info; -- cgit v1.2.3-59-g8ed1b From 24bb44612c5f93a1dff1f7e71b7b7b109a988791 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 12 Jul 2017 14:35:40 -0700 Subject: kernel/watchdog: remove unused declaration Patch series "Improve watchdog config for arch watchdogs", v4. A series to make the hardlockup watchdog more easily replaceable by arch code. The last patch provides some justification for why we want to do this (existing sparc watchdog is another that could benefit). This patch (of 5): Remove unused declaration. Link: http://lkml.kernel.org/r/20170616065715.18390-2-npiggin@gmail.com Signed-off-by: Nicholas Piggin Reviewed-by: Don Zickus Reviewed-by: Babu Moger Tested-by: Babu Moger [sparc] Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Michael Ellerman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/nmi.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/include/linux/nmi.h b/include/linux/nmi.h index aa3cd0878270..5e2e57536d98 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -12,9 +12,6 @@ extern void touch_softlockup_watchdog_sched(void); extern void touch_softlockup_watchdog(void); extern void touch_softlockup_watchdog_sync(void); extern void touch_all_softlockup_watchdogs(void); -extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, - void __user *buffer, - size_t *lenp, loff_t *ppos); extern unsigned int softlockup_panic; extern unsigned int hardlockup_panic; void lockup_detector_init(void); -- cgit v1.2.3-59-g8ed1b From f2e0cff85ed111a3cf24d894c3fa11697dfae628 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 12 Jul 2017 14:35:43 -0700 Subject: kernel/watchdog: introduce arch_touch_nmi_watchdog() For architectures that define HAVE_NMI_WATCHDOG, instead of having them provide the complete touch_nmi_watchdog() function, just have them provide arch_touch_nmi_watchdog(). This gives the generic code more flexibility in implementing this function, and arch implementations don't miss out on touching the softlockup watchdog or other generic details. Link: http://lkml.kernel.org/r/20170616065715.18390-3-npiggin@gmail.com Signed-off-by: Nicholas Piggin Reviewed-by: Don Zickus Reviewed-by: Babu Moger Tested-by: Babu Moger [sparc] Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Michael Ellerman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/blackfin/include/asm/nmi.h | 2 ++ arch/blackfin/kernel/nmi.c | 2 +- arch/mn10300/include/asm/nmi.h | 2 ++ arch/mn10300/kernel/mn10300-watchdog-low.S | 8 ++++---- arch/mn10300/kernel/mn10300-watchdog.c | 2 +- arch/sparc/include/asm/nmi.h | 1 + arch/sparc/kernel/nmi.c | 6 ++---- include/linux/nmi.h | 27 ++++++++++++++++----------- kernel/watchdog_hld.c | 5 ++--- 9 files changed, 31 insertions(+), 24 deletions(-) diff --git a/arch/blackfin/include/asm/nmi.h b/arch/blackfin/include/asm/nmi.h index b9caac4fcfd8..107d23705f46 100644 --- a/arch/blackfin/include/asm/nmi.h +++ b/arch/blackfin/include/asm/nmi.h @@ -9,4 +9,6 @@ #include +extern void arch_touch_nmi_watchdog(void); + #endif diff --git a/arch/blackfin/kernel/nmi.c b/arch/blackfin/kernel/nmi.c index 633c37083e87..1e714329fe8a 100644 --- a/arch/blackfin/kernel/nmi.c +++ b/arch/blackfin/kernel/nmi.c @@ -190,7 +190,7 @@ static int __init init_nmi_wdt(void) } device_initcall(init_nmi_wdt); -void touch_nmi_watchdog(void) +void arch_touch_nmi_watchdog(void) { atomic_set(&nmi_touched[smp_processor_id()], 1); } diff --git a/arch/mn10300/include/asm/nmi.h b/arch/mn10300/include/asm/nmi.h index f3671cbbc117..b05627597b1b 100644 --- a/arch/mn10300/include/asm/nmi.h +++ b/arch/mn10300/include/asm/nmi.h @@ -11,4 +11,6 @@ #ifndef _ASM_NMI_H #define _ASM_NMI_H +extern void arch_touch_nmi_watchdog(void); + #endif /* _ASM_NMI_H */ diff --git a/arch/mn10300/kernel/mn10300-watchdog-low.S b/arch/mn10300/kernel/mn10300-watchdog-low.S index f2f5c9cfaabd..34f8773de7d0 100644 --- a/arch/mn10300/kernel/mn10300-watchdog-low.S +++ b/arch/mn10300/kernel/mn10300-watchdog-low.S @@ -50,9 +50,9 @@ watchdog_handler: # we can't inline it) # ############################################################################### - .globl touch_nmi_watchdog - .type touch_nmi_watchdog,@function -touch_nmi_watchdog: + .globl arch_touch_nmi_watchdog + .type arch_touch_nmi_watchdog,@function +arch_touch_nmi_watchdog: clr d0 clr d1 mov watchdog_alert_counter, a0 @@ -63,4 +63,4 @@ touch_nmi_watchdog: lne ret [],0 - .size touch_nmi_watchdog,.-touch_nmi_watchdog + .size arch_touch_nmi_watchdog,.-arch_touch_nmi_watchdog diff --git a/arch/mn10300/kernel/mn10300-watchdog.c b/arch/mn10300/kernel/mn10300-watchdog.c index a2d8e6938d67..0d5641beadf5 100644 --- a/arch/mn10300/kernel/mn10300-watchdog.c +++ b/arch/mn10300/kernel/mn10300-watchdog.c @@ -31,7 +31,7 @@ static unsigned int watchdog; static unsigned int watchdog_hz = 1; unsigned int watchdog_alert_counter[NR_CPUS]; -EXPORT_SYMBOL(touch_nmi_watchdog); +EXPORT_SYMBOL(arch_touch_nmi_watchdog); /* * the best way to detect whether a CPU has a 'hard lockup' problem diff --git a/arch/sparc/include/asm/nmi.h b/arch/sparc/include/asm/nmi.h index 26ad2b2607c6..284eac3ffaf2 100644 --- a/arch/sparc/include/asm/nmi.h +++ b/arch/sparc/include/asm/nmi.h @@ -7,6 +7,7 @@ void nmi_adjust_hz(unsigned int new_hz); extern atomic_t nmi_active; +void arch_touch_nmi_watchdog(void); void start_nmi_watchdog(void *unused); void stop_nmi_watchdog(void *unused); diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c index 95e73c63c99d..048ad783ea3f 100644 --- a/arch/sparc/kernel/nmi.c +++ b/arch/sparc/kernel/nmi.c @@ -51,7 +51,7 @@ static DEFINE_PER_CPU(unsigned int, last_irq_sum); static DEFINE_PER_CPU(long, alert_counter); static DEFINE_PER_CPU(int, nmi_touch); -void touch_nmi_watchdog(void) +void arch_touch_nmi_watchdog(void) { if (atomic_read(&nmi_active)) { int cpu; @@ -61,10 +61,8 @@ void touch_nmi_watchdog(void) per_cpu(nmi_touch, cpu) = 1; } } - - touch_softlockup_watchdog(); } -EXPORT_SYMBOL(touch_nmi_watchdog); +EXPORT_SYMBOL(arch_touch_nmi_watchdog); static void die_nmi(const char *str, struct pt_regs *regs, int do_panic) { diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 5e2e57536d98..bd387ef8bccd 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -6,6 +6,9 @@ #include #include +#if defined(CONFIG_HAVE_NMI_WATCHDOG) +#include +#endif #ifdef CONFIG_LOCKUP_DETECTOR extern void touch_softlockup_watchdog_sched(void); @@ -58,6 +61,18 @@ static inline void reset_hung_task_detector(void) #define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT) #define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT) +#if defined(CONFIG_HARDLOCKUP_DETECTOR) +extern void hardlockup_detector_disable(void); +#else +static inline void hardlockup_detector_disable(void) {} +#endif + +#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG) +extern void arch_touch_nmi_watchdog(void); +#else +static inline void arch_touch_nmi_watchdog(void) {} +#endif + /** * touch_nmi_watchdog - restart NMI watchdog timeout. * @@ -65,21 +80,11 @@ static inline void reset_hung_task_detector(void) * may be used to reset the timeout - for code which intentionally * disables interrupts for a long time. This call is stateless. */ -#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR) -#include -extern void touch_nmi_watchdog(void); -#else static inline void touch_nmi_watchdog(void) { + arch_touch_nmi_watchdog(); touch_softlockup_watchdog(); } -#endif - -#if defined(CONFIG_HARDLOCKUP_DETECTOR) -extern void hardlockup_detector_disable(void); -#else -static inline void hardlockup_detector_disable(void) {} -#endif /* * Create trigger_all_cpu_backtrace() out of the arch-provided diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c index 54a427d1f344..90d688df6ce1 100644 --- a/kernel/watchdog_hld.c +++ b/kernel/watchdog_hld.c @@ -56,7 +56,7 @@ static int __init hardlockup_panic_setup(char *str) } __setup("nmi_watchdog=", hardlockup_panic_setup); -void touch_nmi_watchdog(void) +void arch_touch_nmi_watchdog(void) { /* * Using __raw here because some code paths have @@ -66,9 +66,8 @@ void touch_nmi_watchdog(void) * going off. */ raw_cpu_write(watchdog_nmi_touch, true); - touch_softlockup_watchdog(); } -EXPORT_SYMBOL(touch_nmi_watchdog); +EXPORT_SYMBOL(arch_touch_nmi_watchdog); static struct perf_event_attr wd_hw_attr = { .type = PERF_TYPE_HARDWARE, -- cgit v1.2.3-59-g8ed1b From 05a4a95279311c3a4633b4277a5d21cfd616c6c7 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 12 Jul 2017 14:35:46 -0700 Subject: kernel/watchdog: split up config options Split SOFTLOCKUP_DETECTOR from LOCKUP_DETECTOR, and split HARDLOCKUP_DETECTOR_PERF from HARDLOCKUP_DETECTOR. LOCKUP_DETECTOR implies the general boot, sysctl, and programming interfaces for the lockup detectors. An architecture that wants to use a hard lockup detector must define HAVE_HARDLOCKUP_DETECTOR_PERF or HAVE_HARDLOCKUP_DETECTOR_ARCH. Alternatively an arch can define HAVE_NMI_WATCHDOG, which provides the minimum arch_touch_nmi_watchdog, and it otherwise does its own thing and does not implement the LOCKUP_DETECTOR interfaces. sparc is unusual in that it has started to implement some of the interfaces, but not fully yet. It should probably be converted to a full HAVE_HARDLOCKUP_DETECTOR_ARCH. [npiggin@gmail.com: fix] Link: http://lkml.kernel.org/r/20170617223522.66c0ad88@roar.ozlabs.ibm.com Link: http://lkml.kernel.org/r/20170616065715.18390-4-npiggin@gmail.com Signed-off-by: Nicholas Piggin Reviewed-by: Don Zickus Reviewed-by: Babu Moger Tested-by: Babu Moger [sparc] Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Michael Ellerman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/Kconfig | 25 ++++- arch/powerpc/Kconfig | 1 + arch/powerpc/kernel/setup_64.c | 2 +- arch/x86/Kconfig | 1 + arch/x86/kernel/apic/hw_nmi.c | 2 +- include/linux/nmi.h | 29 +++-- kernel/Makefile | 2 +- kernel/sysctl.c | 31 +++--- kernel/watchdog.c | 243 +++++++++++++++++++++++++++-------------- kernel/watchdog_hld.c | 32 ------ lib/Kconfig.debug | 45 +++++--- 11 files changed, 251 insertions(+), 162 deletions(-) diff --git a/arch/Kconfig b/arch/Kconfig index cae0958a2298..fb9bd7d36b05 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -198,9 +198,6 @@ config HAVE_KPROBES_ON_FTRACE config HAVE_NMI bool -config HAVE_NMI_WATCHDOG - depends on HAVE_NMI - bool # # An arch should select this if it provides all these things: # @@ -288,6 +285,28 @@ config HAVE_PERF_EVENTS_NMI subsystem. Also has support for calculating CPU cycle events to determine how many clock cycles in a given period. +config HAVE_HARDLOCKUP_DETECTOR_PERF + bool + depends on HAVE_PERF_EVENTS_NMI + help + The arch chooses to use the generic perf-NMI-based hardlockup + detector. Must define HAVE_PERF_EVENTS_NMI. + +config HAVE_NMI_WATCHDOG + depends on HAVE_NMI + bool + help + The arch provides a low level NMI watchdog. It provides + asm/nmi.h, and defines its own arch_touch_nmi_watchdog(). + +config HAVE_HARDLOCKUP_DETECTOR_ARCH + bool + select HAVE_NMI_WATCHDOG + help + The arch chooses to provide its own hardlockup detector, which is + a superset of the HAVE_NMI_WATCHDOG. It also conforms to config + interfaces and parameters provided by hardlockup detector subsystem. + config HAVE_PERF_REGS bool help diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 7177a3f4f418..63ed758e1d20 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -197,6 +197,7 @@ config PPC select HAVE_OPTPROBES if PPC64 select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS_NMI if PPC64 + select HAVE_HARDLOCKUP_DETECTOR_PERF if HAVE_PERF_EVENTS_NMI select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP select HAVE_RCU_TABLE_FREE if SMP diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 4640f6d64f8b..074a075a9cdb 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -752,7 +752,7 @@ struct ppc_pci_io ppc_pci_io; EXPORT_SYMBOL(ppc_pci_io); #endif -#ifdef CONFIG_HARDLOCKUP_DETECTOR +#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF u64 hw_nmi_get_sample_period(int watchdog_thresh) { return ppc_proc_freq * watchdog_thresh; diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 94a18681353d..3d2b8ce54e00 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -162,6 +162,7 @@ config X86 select HAVE_PCSPKR_PLATFORM select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS_NMI + select HAVE_HARDLOCKUP_DETECTOR_PERF if HAVE_PERF_EVENTS_NMI select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP select HAVE_REGS_AND_STACK_ACCESS_API diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c index c73c9fb281e1..d6f387780849 100644 --- a/arch/x86/kernel/apic/hw_nmi.c +++ b/arch/x86/kernel/apic/hw_nmi.c @@ -19,7 +19,7 @@ #include #include -#ifdef CONFIG_HARDLOCKUP_DETECTOR +#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF u64 hw_nmi_get_sample_period(int watchdog_thresh) { return (u64)(cpu_khz) * 1000 * watchdog_thresh; diff --git a/include/linux/nmi.h b/include/linux/nmi.h index bd387ef8bccd..8aa01fd859fb 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -11,13 +11,21 @@ #endif #ifdef CONFIG_LOCKUP_DETECTOR +void lockup_detector_init(void); +#else +static inline void lockup_detector_init(void) +{ +} +#endif + +#ifdef CONFIG_SOFTLOCKUP_DETECTOR extern void touch_softlockup_watchdog_sched(void); extern void touch_softlockup_watchdog(void); extern void touch_softlockup_watchdog_sync(void); extern void touch_all_softlockup_watchdogs(void); extern unsigned int softlockup_panic; -extern unsigned int hardlockup_panic; -void lockup_detector_init(void); +extern int soft_watchdog_enabled; +extern atomic_t watchdog_park_in_progress; #else static inline void touch_softlockup_watchdog_sched(void) { @@ -31,9 +39,6 @@ static inline void touch_softlockup_watchdog_sync(void) static inline void touch_all_softlockup_watchdogs(void) { } -static inline void lockup_detector_init(void) -{ -} #endif #ifdef CONFIG_DETECT_HUNG_TASK @@ -63,15 +68,18 @@ static inline void reset_hung_task_detector(void) #if defined(CONFIG_HARDLOCKUP_DETECTOR) extern void hardlockup_detector_disable(void); +extern unsigned int hardlockup_panic; #else static inline void hardlockup_detector_disable(void) {} #endif -#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG) +#if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF) extern void arch_touch_nmi_watchdog(void); #else +#if !defined(CONFIG_HAVE_NMI_WATCHDOG) static inline void arch_touch_nmi_watchdog(void) {} #endif +#endif /** * touch_nmi_watchdog - restart NMI watchdog timeout. @@ -141,15 +149,18 @@ static inline bool trigger_single_cpu_backtrace(int cpu) } #endif -#ifdef CONFIG_LOCKUP_DETECTOR +#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF u64 hw_nmi_get_sample_period(int watchdog_thresh); +#endif + +#ifdef CONFIG_LOCKUP_DETECTOR extern int nmi_watchdog_enabled; -extern int soft_watchdog_enabled; extern int watchdog_user_enabled; extern int watchdog_thresh; extern unsigned long watchdog_enabled; +extern struct cpumask watchdog_cpumask; extern unsigned long *watchdog_cpumask_bits; -extern atomic_t watchdog_park_in_progress; +extern int __read_mostly watchdog_suspended; #ifdef CONFIG_SMP extern int sysctl_softlockup_all_cpu_backtrace; extern int sysctl_hardlockup_all_cpu_backtrace; diff --git a/kernel/Makefile b/kernel/Makefile index 72aa080f91f0..4cb8e8b23c6e 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -82,7 +82,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_KGDB) += debug/ obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o obj-$(CONFIG_LOCKUP_DETECTOR) += watchdog.o -obj-$(CONFIG_HARDLOCKUP_DETECTOR) += watchdog_hld.o +obj-$(CONFIG_HARDLOCKUP_DETECTOR_PERF) += watchdog_hld.o obj-$(CONFIG_SECCOMP) += seccomp.o obj-$(CONFIG_RELAY) += relay.o obj-$(CONFIG_SYSCTL) += utsname_sysctl.o diff --git a/kernel/sysctl.c b/kernel/sysctl.c index df9f2a367882..6648fbbb8157 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -900,6 +900,14 @@ static struct ctl_table kern_table[] = { .extra2 = &zero, #endif }, + { + .procname = "watchdog_cpumask", + .data = &watchdog_cpumask_bits, + .maxlen = NR_CPUS, + .mode = 0644, + .proc_handler = proc_watchdog_cpumask, + }, +#ifdef CONFIG_SOFTLOCKUP_DETECTOR { .procname = "soft_watchdog", .data = &soft_watchdog_enabled, @@ -909,13 +917,6 @@ static struct ctl_table kern_table[] = { .extra1 = &zero, .extra2 = &one, }, - { - .procname = "watchdog_cpumask", - .data = &watchdog_cpumask_bits, - .maxlen = NR_CPUS, - .mode = 0644, - .proc_handler = proc_watchdog_cpumask, - }, { .procname = "softlockup_panic", .data = &softlockup_panic, @@ -925,27 +926,29 @@ static struct ctl_table kern_table[] = { .extra1 = &zero, .extra2 = &one, }, -#ifdef CONFIG_HARDLOCKUP_DETECTOR +#ifdef CONFIG_SMP { - .procname = "hardlockup_panic", - .data = &hardlockup_panic, + .procname = "softlockup_all_cpu_backtrace", + .data = &sysctl_softlockup_all_cpu_backtrace, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, +#endif /* CONFIG_SMP */ #endif -#ifdef CONFIG_SMP +#ifdef CONFIG_HARDLOCKUP_DETECTOR { - .procname = "softlockup_all_cpu_backtrace", - .data = &sysctl_softlockup_all_cpu_backtrace, + .procname = "hardlockup_panic", + .data = &hardlockup_panic, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, +#ifdef CONFIG_SMP { .procname = "hardlockup_all_cpu_backtrace", .data = &sysctl_hardlockup_all_cpu_backtrace, @@ -957,6 +960,8 @@ static struct ctl_table kern_table[] = { }, #endif /* CONFIG_SMP */ #endif +#endif + #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86) { .procname = "unknown_nmi_panic", diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 03e0b69bb5bf..1fba9c3d66dc 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -29,15 +29,58 @@ #include #include +/* Watchdog configuration */ static DEFINE_MUTEX(watchdog_proc_mutex); -#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR) -unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED; +int __read_mostly nmi_watchdog_enabled; + +#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG) +unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED | + NMI_WATCHDOG_ENABLED; #else unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED; #endif -int __read_mostly nmi_watchdog_enabled; + +#ifdef CONFIG_HARDLOCKUP_DETECTOR +/* boot commands */ +/* + * Should we panic when a soft-lockup or hard-lockup occurs: + */ +unsigned int __read_mostly hardlockup_panic = + CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE; +/* + * We may not want to enable hard lockup detection by default in all cases, + * for example when running the kernel as a guest on a hypervisor. In these + * cases this function can be called to disable hard lockup detection. This + * function should only be executed once by the boot processor before the + * kernel command line parameters are parsed, because otherwise it is not + * possible to override this in hardlockup_panic_setup(). + */ +void hardlockup_detector_disable(void) +{ + watchdog_enabled &= ~NMI_WATCHDOG_ENABLED; +} + +static int __init hardlockup_panic_setup(char *str) +{ + if (!strncmp(str, "panic", 5)) + hardlockup_panic = 1; + else if (!strncmp(str, "nopanic", 7)) + hardlockup_panic = 0; + else if (!strncmp(str, "0", 1)) + watchdog_enabled &= ~NMI_WATCHDOG_ENABLED; + else if (!strncmp(str, "1", 1)) + watchdog_enabled |= NMI_WATCHDOG_ENABLED; + return 1; +} +__setup("nmi_watchdog=", hardlockup_panic_setup); + +#endif + +#ifdef CONFIG_SOFTLOCKUP_DETECTOR int __read_mostly soft_watchdog_enabled; +#endif + int __read_mostly watchdog_user_enabled; int __read_mostly watchdog_thresh = 10; @@ -45,15 +88,9 @@ int __read_mostly watchdog_thresh = 10; int __read_mostly sysctl_softlockup_all_cpu_backtrace; int __read_mostly sysctl_hardlockup_all_cpu_backtrace; #endif -static struct cpumask watchdog_cpumask __read_mostly; +struct cpumask watchdog_cpumask __read_mostly; unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask); -/* Helper for online, unparked cpus. */ -#define for_each_watchdog_cpu(cpu) \ - for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask) - -atomic_t watchdog_park_in_progress = ATOMIC_INIT(0); - /* * The 'watchdog_running' variable is set to 1 when the watchdog threads * are registered/started and is set to 0 when the watchdog threads are @@ -72,7 +109,27 @@ static int __read_mostly watchdog_running; * of 'watchdog_running' cannot change while the watchdog is deactivated * temporarily (see related code in 'proc' handlers). */ -static int __read_mostly watchdog_suspended; +int __read_mostly watchdog_suspended; + +/* + * These functions can be overridden if an architecture implements its + * own hardlockup detector. + */ +int __weak watchdog_nmi_enable(unsigned int cpu) +{ + return 0; +} +void __weak watchdog_nmi_disable(unsigned int cpu) +{ +} + +#ifdef CONFIG_SOFTLOCKUP_DETECTOR + +/* Helper for online, unparked cpus. */ +#define for_each_watchdog_cpu(cpu) \ + for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask) + +atomic_t watchdog_park_in_progress = ATOMIC_INIT(0); static u64 __read_mostly sample_period; @@ -120,6 +177,7 @@ static int __init softlockup_all_cpu_backtrace_setup(char *str) return 1; } __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup); +#ifdef CONFIG_HARDLOCKUP_DETECTOR static int __init hardlockup_all_cpu_backtrace_setup(char *str) { sysctl_hardlockup_all_cpu_backtrace = @@ -128,6 +186,7 @@ static int __init hardlockup_all_cpu_backtrace_setup(char *str) } __setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup); #endif +#endif /* * Hard-lockup warnings should be triggered after just a few seconds. Soft- @@ -213,18 +272,6 @@ void touch_softlockup_watchdog_sync(void) __this_cpu_write(watchdog_touch_ts, 0); } -/* watchdog detector functions */ -bool is_hardlockup(void) -{ - unsigned long hrint = __this_cpu_read(hrtimer_interrupts); - - if (__this_cpu_read(hrtimer_interrupts_saved) == hrint) - return true; - - __this_cpu_write(hrtimer_interrupts_saved, hrint); - return false; -} - static int is_softlockup(unsigned long touch_ts) { unsigned long now = get_timestamp(); @@ -237,21 +284,21 @@ static int is_softlockup(unsigned long touch_ts) return 0; } -static void watchdog_interrupt_count(void) +/* watchdog detector functions */ +bool is_hardlockup(void) { - __this_cpu_inc(hrtimer_interrupts); -} + unsigned long hrint = __this_cpu_read(hrtimer_interrupts); -/* - * These two functions are mostly architecture specific - * defining them as weak here. - */ -int __weak watchdog_nmi_enable(unsigned int cpu) -{ - return 0; + if (__this_cpu_read(hrtimer_interrupts_saved) == hrint) + return true; + + __this_cpu_write(hrtimer_interrupts_saved, hrint); + return false; } -void __weak watchdog_nmi_disable(unsigned int cpu) + +static void watchdog_interrupt_count(void) { + __this_cpu_inc(hrtimer_interrupts); } static int watchdog_enable_all_cpus(void); @@ -502,57 +549,6 @@ static void watchdog_unpark_threads(void) kthread_unpark(per_cpu(softlockup_watchdog, cpu)); } -/* - * Suspend the hard and soft lockup detector by parking the watchdog threads. - */ -int lockup_detector_suspend(void) -{ - int ret = 0; - - get_online_cpus(); - mutex_lock(&watchdog_proc_mutex); - /* - * Multiple suspend requests can be active in parallel (counted by - * the 'watchdog_suspended' variable). If the watchdog threads are - * running, the first caller takes care that they will be parked. - * The state of 'watchdog_running' cannot change while a suspend - * request is active (see related code in 'proc' handlers). - */ - if (watchdog_running && !watchdog_suspended) - ret = watchdog_park_threads(); - - if (ret == 0) - watchdog_suspended++; - else { - watchdog_disable_all_cpus(); - pr_err("Failed to suspend lockup detectors, disabled\n"); - watchdog_enabled = 0; - } - - mutex_unlock(&watchdog_proc_mutex); - - return ret; -} - -/* - * Resume the hard and soft lockup detector by unparking the watchdog threads. - */ -void lockup_detector_resume(void) -{ - mutex_lock(&watchdog_proc_mutex); - - watchdog_suspended--; - /* - * The watchdog threads are unparked if they were previously running - * and if there is no more active suspend request. - */ - if (watchdog_running && !watchdog_suspended) - watchdog_unpark_threads(); - - mutex_unlock(&watchdog_proc_mutex); - put_online_cpus(); -} - static int update_watchdog_all_cpus(void) { int ret; @@ -604,6 +600,81 @@ static void watchdog_disable_all_cpus(void) } } +#else /* SOFTLOCKUP */ +static int watchdog_park_threads(void) +{ + return 0; +} + +static void watchdog_unpark_threads(void) +{ +} + +static int watchdog_enable_all_cpus(void) +{ + return 0; +} + +static void watchdog_disable_all_cpus(void) +{ +} + +static void set_sample_period(void) +{ +} +#endif /* SOFTLOCKUP */ + +/* + * Suspend the hard and soft lockup detector by parking the watchdog threads. + */ +int lockup_detector_suspend(void) +{ + int ret = 0; + + get_online_cpus(); + mutex_lock(&watchdog_proc_mutex); + /* + * Multiple suspend requests can be active in parallel (counted by + * the 'watchdog_suspended' variable). If the watchdog threads are + * running, the first caller takes care that they will be parked. + * The state of 'watchdog_running' cannot change while a suspend + * request is active (see related code in 'proc' handlers). + */ + if (watchdog_running && !watchdog_suspended) + ret = watchdog_park_threads(); + + if (ret == 0) + watchdog_suspended++; + else { + watchdog_disable_all_cpus(); + pr_err("Failed to suspend lockup detectors, disabled\n"); + watchdog_enabled = 0; + } + + mutex_unlock(&watchdog_proc_mutex); + + return ret; +} + +/* + * Resume the hard and soft lockup detector by unparking the watchdog threads. + */ +void lockup_detector_resume(void) +{ + mutex_lock(&watchdog_proc_mutex); + + watchdog_suspended--; + /* + * The watchdog threads are unparked if they were previously running + * and if there is no more active suspend request. + */ + if (watchdog_running && !watchdog_suspended) + watchdog_unpark_threads(); + + mutex_unlock(&watchdog_proc_mutex); + put_online_cpus(); +} + #ifdef CONFIG_SYSCTL /* @@ -810,9 +881,11 @@ int proc_watchdog_cpumask(struct ctl_table *table, int write, * a temporary cpumask, so we are likely not in a * position to do much else to make things better. */ +#ifdef CONFIG_SOFTLOCKUP_DETECTOR if (smpboot_update_cpumask_percpu_thread( &watchdog_threads, &watchdog_cpumask) != 0) pr_err("cpumask update failed\n"); +#endif } } out: diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c index 90d688df6ce1..295a0d84934c 100644 --- a/kernel/watchdog_hld.c +++ b/kernel/watchdog_hld.c @@ -22,39 +22,7 @@ static DEFINE_PER_CPU(bool, hard_watchdog_warn); static DEFINE_PER_CPU(bool, watchdog_nmi_touch); static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); -/* boot commands */ -/* - * Should we panic when a soft-lockup or hard-lockup occurs: - */ -unsigned int __read_mostly hardlockup_panic = - CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE; static unsigned long hardlockup_allcpu_dumped; -/* - * We may not want to enable hard lockup detection by default in all cases, - * for example when running the kernel as a guest on a hypervisor. In these - * cases this function can be called to disable hard lockup detection. This - * function should only be executed once by the boot processor before the - * kernel command line parameters are parsed, because otherwise it is not - * possible to override this in hardlockup_panic_setup(). - */ -void hardlockup_detector_disable(void) -{ - watchdog_enabled &= ~NMI_WATCHDOG_ENABLED; -} - -static int __init hardlockup_panic_setup(char *str) -{ - if (!strncmp(str, "panic", 5)) - hardlockup_panic = 1; - else if (!strncmp(str, "nopanic", 7)) - hardlockup_panic = 0; - else if (!strncmp(str, "0", 1)) - watchdog_enabled &= ~NMI_WATCHDOG_ENABLED; - else if (!strncmp(str, "1", 1)) - watchdog_enabled |= NMI_WATCHDOG_ENABLED; - return 1; -} -__setup("nmi_watchdog=", hardlockup_panic_setup); void arch_touch_nmi_watchdog(void) { diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index f28f4252e54a..b0d01c6d4e03 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -778,34 +778,45 @@ config DEBUG_SHIRQ menu "Debug Lockups and Hangs" config LOCKUP_DETECTOR - bool "Detect Hard and Soft Lockups" + bool + +config SOFTLOCKUP_DETECTOR + bool "Detect Soft Lockups" depends on DEBUG_KERNEL && !S390 + select LOCKUP_DETECTOR help Say Y here to enable the kernel to act as a watchdog to detect - hard and soft lockups. + soft lockups. Softlockups are bugs that cause the kernel to loop in kernel mode for more than 20 seconds, without giving other tasks a chance to run. The current stack trace is displayed upon detection and the system will stay locked up. +config HARDLOCKUP_DETECTOR_PERF + bool + select SOFTLOCKUP_DETECTOR + +# +# arch/ can define HAVE_HARDLOCKUP_DETECTOR_ARCH to provide their own hard +# lockup detector rather than the perf based detector. +# +config HARDLOCKUP_DETECTOR + bool "Detect Hard Lockups" + depends on DEBUG_KERNEL && !S390 + depends on HAVE_HARDLOCKUP_DETECTOR_PERF || HAVE_HARDLOCKUP_DETECTOR_ARCH + select LOCKUP_DETECTOR + select HARDLOCKUP_DETECTOR_PERF if HAVE_HARDLOCKUP_DETECTOR_PERF + select HARDLOCKUP_DETECTOR_ARCH if HAVE_HARDLOCKUP_DETECTOR_ARCH + help + Say Y here to enable the kernel to act as a watchdog to detect + hard lockups. + Hardlockups are bugs that cause the CPU to loop in kernel mode for more than 10 seconds, without letting other interrupts have a chance to run. The current stack trace is displayed upon detection and the system will stay locked up. - The overhead should be minimal. A periodic hrtimer runs to - generate interrupts and kick the watchdog task every 4 seconds. - An NMI is generated every 10 seconds or so to check for hardlockups. - - The frequency of hrtimer and NMI events and the soft and hard lockup - thresholds can be controlled through the sysctl watchdog_thresh. - -config HARDLOCKUP_DETECTOR - def_bool y - depends on LOCKUP_DETECTOR && !HAVE_NMI_WATCHDOG - depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI - config BOOTPARAM_HARDLOCKUP_PANIC bool "Panic (Reboot) On Hard Lockups" depends on HARDLOCKUP_DETECTOR @@ -826,7 +837,7 @@ config BOOTPARAM_HARDLOCKUP_PANIC_VALUE config BOOTPARAM_SOFTLOCKUP_PANIC bool "Panic (Reboot) On Soft Lockups" - depends on LOCKUP_DETECTOR + depends on SOFTLOCKUP_DETECTOR help Say Y here to enable the kernel to panic on "soft lockups", which are bugs that cause the kernel to loop in kernel @@ -843,7 +854,7 @@ config BOOTPARAM_SOFTLOCKUP_PANIC config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE int - depends on LOCKUP_DETECTOR + depends on SOFTLOCKUP_DETECTOR range 0 1 default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC default 1 if BOOTPARAM_SOFTLOCKUP_PANIC @@ -851,7 +862,7 @@ config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE config DETECT_HUNG_TASK bool "Detect Hung Tasks" depends on DEBUG_KERNEL - default LOCKUP_DETECTOR + default SOFTLOCKUP_DETECTOR help Say Y here to enable the kernel to detect "hung tasks", which are bugs that cause the task to be stuck in -- cgit v1.2.3-59-g8ed1b From a10a842ff81a7e3810817b3b04e4c432b6191e21 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 12 Jul 2017 14:35:49 -0700 Subject: kernel/watchdog: provide watchdog_nmi_reconfigure() for arch watchdogs After reconfiguring watchdog sysctls etc., architecture specific watchdogs may not get all their parameters updated. watchdog_nmi_reconfigure() can be implemented to pull the new values in and set the arch NMI watchdog. [npiggin@gmail.com: add code comments] Link: http://lkml.kernel.org/r/20170617125933.774d3858@roar.ozlabs.ibm.com [arnd@arndb.de: hide unused function] Link: http://lkml.kernel.org/r/20170620204854.966601-1-arnd@arndb.de Link: http://lkml.kernel.org/r/20170616065715.18390-5-npiggin@gmail.com Signed-off-by: Nicholas Piggin Signed-off-by: Arnd Bergmann Reviewed-by: Don Zickus Tested-by: Babu Moger [sparc] Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Michael Ellerman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/watchdog.c | 48 ++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 44 insertions(+), 4 deletions(-) diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 1fba9c3d66dc..cabe3e9fb620 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -114,6 +114,10 @@ int __read_mostly watchdog_suspended; /* * These functions can be overridden if an architecture implements its * own hardlockup detector. + * + * watchdog_nmi_enable/disable can be implemented to start and stop when + * softlockup watchdog threads start and stop. The arch must select the + * SOFTLOCKUP_DETECTOR Kconfig. */ int __weak watchdog_nmi_enable(unsigned int cpu) { @@ -123,6 +127,22 @@ void __weak watchdog_nmi_disable(unsigned int cpu) { } +/* + * watchdog_nmi_reconfigure can be implemented to be notified after any + * watchdog configuration change. The arch hardlockup watchdog should + * respond to the following variables: + * - nmi_watchdog_enabled + * - watchdog_thresh + * - watchdog_cpumask + * - sysctl_hardlockup_all_cpu_backtrace + * - hardlockup_panic + * - watchdog_suspended + */ +void __weak watchdog_nmi_reconfigure(void) +{ +} + + #ifdef CONFIG_SOFTLOCKUP_DETECTOR /* Helper for online, unparked cpus. */ @@ -600,6 +620,14 @@ static void watchdog_disable_all_cpus(void) } } +#ifdef CONFIG_SYSCTL +static int watchdog_update_cpus(void) +{ + return smpboot_update_cpumask_percpu_thread( + &watchdog_threads, &watchdog_cpumask); +} +#endif + #else /* SOFTLOCKUP */ static int watchdog_park_threads(void) { @@ -619,6 +647,13 @@ static void watchdog_disable_all_cpus(void) { } +#ifdef CONFIG_SYSCTL +static int watchdog_update_cpus(void) +{ + return 0; +} +#endif + static void set_sample_period(void) { } @@ -651,6 +686,8 @@ int lockup_detector_suspend(void) watchdog_enabled = 0; } + watchdog_nmi_reconfigure(); + mutex_unlock(&watchdog_proc_mutex); return ret; @@ -671,6 +708,8 @@ void lockup_detector_resume(void) if (watchdog_running && !watchdog_suspended) watchdog_unpark_threads(); + watchdog_nmi_reconfigure(); + mutex_unlock(&watchdog_proc_mutex); put_online_cpus(); } @@ -696,6 +735,8 @@ static int proc_watchdog_update(void) else watchdog_disable_all_cpus(); + watchdog_nmi_reconfigure(); + return err; } @@ -881,12 +922,11 @@ int proc_watchdog_cpumask(struct ctl_table *table, int write, * a temporary cpumask, so we are likely not in a * position to do much else to make things better. */ -#ifdef CONFIG_SOFTLOCKUP_DETECTOR - if (smpboot_update_cpumask_percpu_thread( - &watchdog_threads, &watchdog_cpumask) != 0) + if (watchdog_update_cpus() != 0) pr_err("cpumask update failed\n"); -#endif } + + watchdog_nmi_reconfigure(); } out: mutex_unlock(&watchdog_proc_mutex); -- cgit v1.2.3-59-g8ed1b From 2104180a53698df5aec35aed5f840a26ade0551d Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 12 Jul 2017 14:35:52 -0700 Subject: powerpc/64s: implement arch-specific hardlockup watchdog Implement an arch-speicfic watchdog rather than use the perf-based hardlockup detector. The new watchdog takes the soft-NMI directly, rather than going through perf. Perf interrupts are to be made maskable in future, so that would prevent the perf detector from working in those regions. Additionally, implement a SMP based detector where all CPUs watch one another by pinging a shared cpumask. This is because powerpc Book3S does not have a true periodic local NMI, but some platforms do implement a true NMI IPI. If a CPU is stuck with interrupts hard disabled, the soft-NMI watchdog does not work, but the SMP watchdog will. Even on platforms without a true NMI IPI to get a good trace from the stuck CPU, other CPUs will notice the lockup sufficiently to report it and panic. [npiggin@gmail.com: honor watchdog disable at boot/hotplug] Link: http://lkml.kernel.org/r/20170621001346.5bb337c9@roar.ozlabs.ibm.com [npiggin@gmail.com: fix false positive warning at CPU unplug] Link: http://lkml.kernel.org/r/20170630080740.20766-1-npiggin@gmail.com [akpm@linux-foundation.org: coding-style fixes] Link: http://lkml.kernel.org/r/20170616065715.18390-6-npiggin@gmail.com Signed-off-by: Nicholas Piggin Reviewed-by: Don Zickus Tested-by: Babu Moger [sparc] Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Michael Ellerman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/Kconfig | 7 +- arch/powerpc/include/asm/nmi.h | 11 + arch/powerpc/include/asm/smp.h | 2 + arch/powerpc/kernel/Makefile | 1 + arch/powerpc/kernel/exceptions-64s.S | 30 ++- arch/powerpc/kernel/kvm.c | 7 + arch/powerpc/kernel/setup_64.c | 19 -- arch/powerpc/kernel/smp.c | 20 +- arch/powerpc/kernel/watchdog.c | 386 +++++++++++++++++++++++++++++++++++ 9 files changed, 458 insertions(+), 25 deletions(-) create mode 100644 arch/powerpc/kernel/watchdog.c diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 63ed758e1d20..fce2f4f20891 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -82,7 +82,7 @@ config NR_IRQS config NMI_IPI bool - depends on SMP && (DEBUGGER || KEXEC_CORE) + depends on SMP && (DEBUGGER || KEXEC_CORE || HARDLOCKUP_DETECTOR) default y config STACKTRACE_SUPPORT @@ -192,12 +192,13 @@ config PPC select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP select HAVE_MOD_ARCH_SPECIFIC - select HAVE_NMI if PERF_EVENTS + select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S) + select HAVE_HARDLOCKUP_DETECTOR_ARCH if (PPC64 && PPC_BOOK3S) select HAVE_OPROFILE select HAVE_OPTPROBES if PPC64 select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS_NMI if PPC64 - select HAVE_HARDLOCKUP_DETECTOR_PERF if HAVE_PERF_EVENTS_NMI + select HAVE_HARDLOCKUP_DETECTOR_PERF if HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP select HAVE_RCU_TABLE_FREE if SMP diff --git a/arch/powerpc/include/asm/nmi.h b/arch/powerpc/include/asm/nmi.h index ff1ccb375e60..6f8e79cd35d8 100644 --- a/arch/powerpc/include/asm/nmi.h +++ b/arch/powerpc/include/asm/nmi.h @@ -1,4 +1,15 @@ #ifndef _ASM_NMI_H #define _ASM_NMI_H +#ifdef CONFIG_HARDLOCKUP_DETECTOR +extern void arch_touch_nmi_watchdog(void); + +extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask, + bool exclude_self); +#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace + +#else +static inline void arch_touch_nmi_watchdog(void) {} +#endif + #endif /* _ASM_NMI_H */ diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index ebddb2111d87..8ea98504f900 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -55,6 +55,8 @@ struct smp_ops_t { int (*cpu_bootable)(unsigned int nr); }; +extern void smp_flush_nmi_ipi(u64 delay_us); +extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us); extern void smp_send_debugger_break(void); extern void start_secondary_resume(void); extern void smp_generic_give_timebase(void); diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 0845eebc5af3..4aa7c147e447 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -38,6 +38,7 @@ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ signal_64.o ptrace32.o \ paca.o nvram_64.o firmware.o obj-$(CONFIG_VDSO32) += vdso32/ +obj-$(CONFIG_HARDLOCKUP_DETECTOR) += watchdog.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 4c18a5fbb4bb..e6d8354d79ef 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1314,6 +1314,31 @@ EXC_REAL_NONE(0x1800, 0x100) EXC_VIRT_NONE(0x5800, 0x100) #endif +#if defined(CONFIG_HARDLOCKUP_DETECTOR) && defined(CONFIG_HAVE_HARDLOCKUP_DETECTOR_ARCH) + +#define MASKED_DEC_HANDLER_LABEL 3f + +#define MASKED_DEC_HANDLER(_H) \ +3: /* soft-nmi */ \ + std r12,PACA_EXGEN+EX_R12(r13); \ + GET_SCRATCH0(r10); \ + std r10,PACA_EXGEN+EX_R13(r13); \ + EXCEPTION_PROLOG_PSERIES_1(soft_nmi_common, _H) + +EXC_COMMON_BEGIN(soft_nmi_common) + mr r10,r1 + ld r1,PACAEMERGSP(r13) + ld r1,PACA_NMI_EMERG_SP(r13) + subi r1,r1,INT_FRAME_SIZE + EXCEPTION_COMMON_NORET_STACK(PACA_EXGEN, 0x900, + system_reset, soft_nmi_interrupt, + ADD_NVGPRS;ADD_RECONCILE) + b ret_from_except + +#else +#define MASKED_DEC_HANDLER_LABEL 2f /* normal return */ +#define MASKED_DEC_HANDLER(_H) +#endif /* * An interrupt came in while soft-disabled. We set paca->irq_happened, then: @@ -1336,7 +1361,7 @@ masked_##_H##interrupt: \ lis r10,0x7fff; \ ori r10,r10,0xffff; \ mtspr SPRN_DEC,r10; \ - b 2f; \ + b MASKED_DEC_HANDLER_LABEL; \ 1: cmpwi r10,PACA_IRQ_DBELL; \ beq 2f; \ cmpwi r10,PACA_IRQ_HMI; \ @@ -1351,7 +1376,8 @@ masked_##_H##interrupt: \ ld r11,PACA_EXGEN+EX_R11(r13); \ GET_SCRATCH0(r13); \ ##_H##rfid; \ - b . + b .; \ + MASKED_DEC_HANDLER(_H) /* * Real mode exceptions actually use this too, but alternate diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index 9ad37f827a97..1086ea37c832 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c @@ -25,6 +25,7 @@ #include #include #include +#include /* hardlockup_detector_disable() */ #include #include @@ -718,6 +719,12 @@ static __init void kvm_free_tmp(void) static int __init kvm_guest_init(void) { + /* + * The hardlockup detector is likely to get false positives in + * KVM guests, so disable it by default. + */ + hardlockup_detector_disable(); + if (!kvm_para_available()) goto free_tmp; diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 074a075a9cdb..af23d4b576ec 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -751,22 +751,3 @@ unsigned long memory_block_size_bytes(void) struct ppc_pci_io ppc_pci_io; EXPORT_SYMBOL(ppc_pci_io); #endif - -#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF -u64 hw_nmi_get_sample_period(int watchdog_thresh) -{ - return ppc_proc_freq * watchdog_thresh; -} - -/* - * The hardlockup detector breaks PMU event based branches and is likely - * to get false positives in KVM guests, so disable it by default. - */ -static int __init disable_hardlockup_detector(void) -{ - hardlockup_detector_disable(); - - return 0; -} -early_initcall(disable_hardlockup_detector); -#endif diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index c6b8bace1766..997c88d54acf 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -435,13 +435,31 @@ static void do_smp_send_nmi_ipi(int cpu) } } +void smp_flush_nmi_ipi(u64 delay_us) +{ + unsigned long flags; + + nmi_ipi_lock_start(&flags); + while (nmi_ipi_busy_count) { + nmi_ipi_unlock_end(&flags); + udelay(1); + if (delay_us) { + delay_us--; + if (!delay_us) + return; + } + nmi_ipi_lock_start(&flags); + } + nmi_ipi_unlock_end(&flags); +} + /* * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS. * - fn is the target callback function. * - delay_us > 0 is the delay before giving up waiting for targets to * enter the handler, == 0 specifies indefinite delay. */ -static int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) +int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) { unsigned long flags; int me = raw_smp_processor_id(); diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c new file mode 100644 index 000000000000..b67f8b03a32d --- /dev/null +++ b/arch/powerpc/kernel/watchdog.c @@ -0,0 +1,386 @@ +/* + * Watchdog support on powerpc systems. + * + * Copyright 2017, IBM Corporation. + * + * This uses code from arch/sparc/kernel/nmi.c and kernel/watchdog.c + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * The watchdog has a simple timer that runs on each CPU, once per timer + * period. This is the heartbeat. + * + * Then there are checks to see if the heartbeat has not triggered on a CPU + * for the panic timeout period. Currently the watchdog only supports an + * SMP check, so the heartbeat only turns on when we have 2 or more CPUs. + * + * This is not an NMI watchdog, but Linux uses that name for a generic + * watchdog in some cases, so NMI gets used in some places. + */ + +static cpumask_t wd_cpus_enabled __read_mostly; + +static u64 wd_panic_timeout_tb __read_mostly; /* timebase ticks until panic */ +static u64 wd_smp_panic_timeout_tb __read_mostly; /* panic other CPUs */ + +static u64 wd_timer_period_ms __read_mostly; /* interval between heartbeat */ + +static DEFINE_PER_CPU(struct timer_list, wd_timer); +static DEFINE_PER_CPU(u64, wd_timer_tb); + +/* + * These are for the SMP checker. CPUs clear their pending bit in their + * heartbeat. If the bitmask becomes empty, the time is noted and the + * bitmask is refilled. + * + * All CPUs clear their bit in the pending mask every timer period. + * Once all have cleared, the time is noted and the bits are reset. + * If the time since all clear was greater than the panic timeout, + * we can panic with the list of stuck CPUs. + * + * This will work best with NMI IPIs for crash code so the stuck CPUs + * can be pulled out to get their backtraces. + */ +static unsigned long __wd_smp_lock; +static cpumask_t wd_smp_cpus_pending; +static cpumask_t wd_smp_cpus_stuck; +static u64 wd_smp_last_reset_tb; + +static inline void wd_smp_lock(unsigned long *flags) +{ + /* + * Avoid locking layers if possible. + * This may be called from low level interrupt handlers at some + * point in future. + */ + local_irq_save(*flags); + while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock))) + cpu_relax(); +} + +static inline void wd_smp_unlock(unsigned long *flags) +{ + clear_bit_unlock(0, &__wd_smp_lock); + local_irq_restore(*flags); +} + +static void wd_lockup_ipi(struct pt_regs *regs) +{ + pr_emerg("Watchdog CPU:%d Hard LOCKUP\n", raw_smp_processor_id()); + print_modules(); + print_irqtrace_events(current); + if (regs) + show_regs(regs); + else + dump_stack(); + + if (hardlockup_panic) + nmi_panic(regs, "Hard LOCKUP"); +} + +static void set_cpu_stuck(int cpu, u64 tb) +{ + cpumask_set_cpu(cpu, &wd_smp_cpus_stuck); + cpumask_clear_cpu(cpu, &wd_smp_cpus_pending); + if (cpumask_empty(&wd_smp_cpus_pending)) { + wd_smp_last_reset_tb = tb; + cpumask_andnot(&wd_smp_cpus_pending, + &wd_cpus_enabled, + &wd_smp_cpus_stuck); + } +} + +static void watchdog_smp_panic(int cpu, u64 tb) +{ + unsigned long flags; + int c; + + wd_smp_lock(&flags); + /* Double check some things under lock */ + if ((s64)(tb - wd_smp_last_reset_tb) < (s64)wd_smp_panic_timeout_tb) + goto out; + if (cpumask_test_cpu(cpu, &wd_smp_cpus_pending)) + goto out; + if (cpumask_weight(&wd_smp_cpus_pending) == 0) + goto out; + + pr_emerg("Watchdog CPU:%d detected Hard LOCKUP other CPUS:%*pbl\n", + cpu, cpumask_pr_args(&wd_smp_cpus_pending)); + + /* + * Try to trigger the stuck CPUs. + */ + for_each_cpu(c, &wd_smp_cpus_pending) { + if (c == cpu) + continue; + smp_send_nmi_ipi(c, wd_lockup_ipi, 1000000); + } + smp_flush_nmi_ipi(1000000); + + /* Take the stuck CPU out of the watch group */ + for_each_cpu(c, &wd_smp_cpus_pending) + set_cpu_stuck(c, tb); + +out: + wd_smp_unlock(&flags); + + printk_safe_flush(); + /* + * printk_safe_flush() seems to require another print + * before anything actually goes out to console. + */ + if (sysctl_hardlockup_all_cpu_backtrace) + trigger_allbutself_cpu_backtrace(); + + if (hardlockup_panic) + nmi_panic(NULL, "Hard LOCKUP"); +} + +static void wd_smp_clear_cpu_pending(int cpu, u64 tb) +{ + if (!cpumask_test_cpu(cpu, &wd_smp_cpus_pending)) { + if (unlikely(cpumask_test_cpu(cpu, &wd_smp_cpus_stuck))) { + unsigned long flags; + + pr_emerg("Watchdog CPU:%d became unstuck\n", cpu); + wd_smp_lock(&flags); + cpumask_clear_cpu(cpu, &wd_smp_cpus_stuck); + wd_smp_unlock(&flags); + } + return; + } + cpumask_clear_cpu(cpu, &wd_smp_cpus_pending); + if (cpumask_empty(&wd_smp_cpus_pending)) { + unsigned long flags; + + wd_smp_lock(&flags); + if (cpumask_empty(&wd_smp_cpus_pending)) { + wd_smp_last_reset_tb = tb; + cpumask_andnot(&wd_smp_cpus_pending, + &wd_cpus_enabled, + &wd_smp_cpus_stuck); + } + wd_smp_unlock(&flags); + } +} + +static void watchdog_timer_interrupt(int cpu) +{ + u64 tb = get_tb(); + + per_cpu(wd_timer_tb, cpu) = tb; + + wd_smp_clear_cpu_pending(cpu, tb); + + if ((s64)(tb - wd_smp_last_reset_tb) >= (s64)wd_smp_panic_timeout_tb) + watchdog_smp_panic(cpu, tb); +} + +void soft_nmi_interrupt(struct pt_regs *regs) +{ + unsigned long flags; + int cpu = raw_smp_processor_id(); + u64 tb; + + if (!cpumask_test_cpu(cpu, &wd_cpus_enabled)) + return; + + nmi_enter(); + tb = get_tb(); + if (tb - per_cpu(wd_timer_tb, cpu) >= wd_panic_timeout_tb) { + per_cpu(wd_timer_tb, cpu) = tb; + + wd_smp_lock(&flags); + if (cpumask_test_cpu(cpu, &wd_smp_cpus_stuck)) { + wd_smp_unlock(&flags); + goto out; + } + set_cpu_stuck(cpu, tb); + + pr_emerg("Watchdog CPU:%d Hard LOCKUP\n", cpu); + print_modules(); + print_irqtrace_events(current); + if (regs) + show_regs(regs); + else + dump_stack(); + + wd_smp_unlock(&flags); + + if (sysctl_hardlockup_all_cpu_backtrace) + trigger_allbutself_cpu_backtrace(); + + if (hardlockup_panic) + nmi_panic(regs, "Hard LOCKUP"); + } + if (wd_panic_timeout_tb < 0x7fffffff) + mtspr(SPRN_DEC, wd_panic_timeout_tb); + +out: + nmi_exit(); +} + +static void wd_timer_reset(unsigned int cpu, struct timer_list *t) +{ + t->expires = jiffies + msecs_to_jiffies(wd_timer_period_ms); + if (wd_timer_period_ms > 1000) + t->expires = __round_jiffies_up(t->expires, cpu); + add_timer_on(t, cpu); +} + +static void wd_timer_fn(unsigned long data) +{ + struct timer_list *t = this_cpu_ptr(&wd_timer); + int cpu = smp_processor_id(); + + watchdog_timer_interrupt(cpu); + + wd_timer_reset(cpu, t); +} + +void arch_touch_nmi_watchdog(void) +{ + int cpu = smp_processor_id(); + + watchdog_timer_interrupt(cpu); +} +EXPORT_SYMBOL(arch_touch_nmi_watchdog); + +static void start_watchdog_timer_on(unsigned int cpu) +{ + struct timer_list *t = per_cpu_ptr(&wd_timer, cpu); + + per_cpu(wd_timer_tb, cpu) = get_tb(); + + setup_pinned_timer(t, wd_timer_fn, 0); + wd_timer_reset(cpu, t); +} + +static void stop_watchdog_timer_on(unsigned int cpu) +{ + struct timer_list *t = per_cpu_ptr(&wd_timer, cpu); + + del_timer_sync(t); +} + +static int start_wd_on_cpu(unsigned int cpu) +{ + if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) { + WARN_ON(1); + return 0; + } + + if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) + return 0; + + if (watchdog_suspended) + return 0; + + if (!cpumask_test_cpu(cpu, &watchdog_cpumask)) + return 0; + + cpumask_set_cpu(cpu, &wd_cpus_enabled); + if (cpumask_weight(&wd_cpus_enabled) == 1) { + cpumask_set_cpu(cpu, &wd_smp_cpus_pending); + wd_smp_last_reset_tb = get_tb(); + } + smp_wmb(); + start_watchdog_timer_on(cpu); + + return 0; +} + +static int stop_wd_on_cpu(unsigned int cpu) +{ + if (!cpumask_test_cpu(cpu, &wd_cpus_enabled)) + return 0; /* Can happen in CPU unplug case */ + + stop_watchdog_timer_on(cpu); + + cpumask_clear_cpu(cpu, &wd_cpus_enabled); + wd_smp_clear_cpu_pending(cpu, get_tb()); + + return 0; +} + +static void watchdog_calc_timeouts(void) +{ + wd_panic_timeout_tb = watchdog_thresh * ppc_tb_freq; + + /* Have the SMP detector trigger a bit later */ + wd_smp_panic_timeout_tb = wd_panic_timeout_tb * 3 / 2; + + /* 2/5 is the factor that the perf based detector uses */ + wd_timer_period_ms = watchdog_thresh * 1000 * 2 / 5; +} + +void watchdog_nmi_reconfigure(void) +{ + int cpu; + + watchdog_calc_timeouts(); + + for_each_cpu(cpu, &wd_cpus_enabled) + stop_wd_on_cpu(cpu); + + for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask) + start_wd_on_cpu(cpu); +} + +/* + * This runs after lockup_detector_init() which sets up watchdog_cpumask. + */ +static int __init powerpc_watchdog_init(void) +{ + int err; + + watchdog_calc_timeouts(); + + err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/watchdog:online", + start_wd_on_cpu, stop_wd_on_cpu); + if (err < 0) + pr_warn("Watchdog could not be initialized"); + + return 0; +} +arch_initcall(powerpc_watchdog_init); + +static void handle_backtrace_ipi(struct pt_regs *regs) +{ + nmi_cpu_backtrace(regs); +} + +static void raise_backtrace_ipi(cpumask_t *mask) +{ + unsigned int cpu; + + for_each_cpu(cpu, mask) { + if (cpu == smp_processor_id()) + handle_backtrace_ipi(NULL); + else + smp_send_nmi_ipi(cpu, handle_backtrace_ipi, 1000000); + } +} + +void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) +{ + nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi); +} -- cgit v1.2.3-59-g8ed1b From 3e2c044a54e6b6373606f8ffad42a4a0759fcf3d Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 12 Jul 2017 14:35:55 -0700 Subject: efi: avoid fortify checks in EFI stub This avoids CONFIG_FORTIFY_SOURCE from being enabled during the EFI stub build, as adding a panic() implementation may not work well. This can be adjusted in the future. Link: http://lkml.kernel.org/r/1497903987-21002-2-git-send-email-keescook@chromium.org Signed-off-by: Kees Cook Suggested-by: Daniel Micay Reviewed-by: Ard Biesheuvel Acked-by: Mark Rutland Cc: Matt Fleming Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/firmware/efi/libstub/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index f7425960f6a5..37e24f525162 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -17,6 +17,7 @@ cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \ cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \ + -D__NO_FORTIFY \ $(call cc-option,-ffreestanding) \ $(call cc-option,-fno-stack-protector) -- cgit v1.2.3-59-g8ed1b From e2ae8ab4b571e2e4094a28acb60649bc2732c67f Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 12 Jul 2017 14:35:58 -0700 Subject: kexec_file: adjust declaration of kexec_purgatory Defining kexec_purgatory as a zero-length char array upsets compile time size checking. Since this is built on a per-arch basis, define it as an unsized char array (like is done for other similar things, e.g. linker sections). This silences the warning generated by the future CONFIG_FORTIFY_SOURCE, which did not like the memcmp() of a "0 byte" array. This drops the __weak and uses an extern instead, since both users define kexec_purgatory. Link: http://lkml.kernel.org/r/1497903987-21002-4-git-send-email-keescook@chromium.org Signed-off-by: Kees Cook Acked-by: "Eric W. Biederman" Cc: Daniel Micay Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kexec_file.c | 7 ------- kernel/kexec_internal.h | 2 ++ 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index c8f7f77e9fa9..9f48f4412297 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c @@ -26,13 +26,6 @@ #include #include "kexec_internal.h" -/* - * Declare these symbols weak so that if architecture provides a purgatory, - * these will be overridden. - */ -char __weak kexec_purgatory[0]; -size_t __weak kexec_purgatory_size = 0; - static int kexec_calculate_store_digests(struct kimage *image); /* Architectures can provide this probe function */ diff --git a/kernel/kexec_internal.h b/kernel/kexec_internal.h index 799a8a452187..50dfcb039a41 100644 --- a/kernel/kexec_internal.h +++ b/kernel/kexec_internal.h @@ -17,6 +17,8 @@ extern struct mutex kexec_mutex; #ifdef CONFIG_KEXEC_FILE #include void kimage_file_post_load_cleanup(struct kimage *image); +extern char kexec_purgatory[]; +extern size_t kexec_purgatory_size; #else /* CONFIG_KEXEC_FILE */ static inline void kimage_file_post_load_cleanup(struct kimage *image) { } #endif /* CONFIG_KEXEC_FILE */ -- cgit v1.2.3-59-g8ed1b From 4c93496f18ce5044d78e4f7f9e018682a4f44b3d Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 12 Jul 2017 14:36:01 -0700 Subject: IB/rxe: do not copy extra stack memory to skb This fixes a over-read condition detected by FORTIFY_SOURCE for this line: memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(skb->cb)); The error was: In file included from ./include/linux/bitmap.h:8:0, from ./include/linux/cpumask.h:11, from ./include/linux/mm_types_task.h:13, from ./include/linux/mm_types.h:4, from ./include/linux/kmemcheck.h:4, from ./include/linux/skbuff.h:18, from drivers/infiniband/sw/rxe/rxe_resp.c:34: In function 'memcpy', inlined from 'send_atomic_ack.constprop' at drivers/infiniband/sw/rxe/rxe_resp.c:998:2, inlined from 'acknowledge' at drivers/infiniband/sw/rxe/rxe_resp.c:1026:3, inlined from 'rxe_responder' at drivers/infiniband/sw/rxe/rxe_resp.c:1286:10: ./include/linux/string.h:309:4: error: call to '__read_overflow2' declared with attribute error: detected read beyond size of object passed as 2nd parameter __read_overflow2(); Daniel Micay noted that struct rxe_pkt_info is 32 bytes on 32-bit architectures, but skb->cb is still 64. The memcpy() over-reads 32 bytes. This fixes it by zeroing the unused bytes in skb->cb. Link: http://lkml.kernel.org/r/1497903987-21002-5-git-send-email-keescook@chromium.org Signed-off-by: Kees Cook Cc: Moni Shoua Cc: Doug Ledford Cc: Sean Hefty Cc: Daniel Micay Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/infiniband/sw/rxe/rxe_resp.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 23039768f541..be944d5aa9af 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -995,7 +995,9 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, free_rd_atomic_resource(qp, res); rxe_advance_resp_resource(qp); - memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(skb->cb)); + memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(ack_pkt)); + memset((unsigned char *)SKB_TO_PKT(skb) + sizeof(ack_pkt), 0, + sizeof(skb->cb) - sizeof(ack_pkt)); res->type = RXE_ATOMIC_MASK; res->atomic.skb = skb; -- cgit v1.2.3-59-g8ed1b From 054f367a32381b5640c5d150fe0b7ba285564998 Mon Sep 17 00:00:00 2001 From: Daniel Axtens Date: Wed, 12 Jul 2017 14:36:04 -0700 Subject: powerpc: don't fortify prom_init prom_init is a bit special; in theory it should be able to be linked separately to the kernel. To keep this from getting too complex, the symbols that prom_init.c uses are checked. Fortification adds symbols, and it gets quite messy as it includes things like panic(). So just don't fortify prom_init.c for now. Link: http://lkml.kernel.org/r/1497903987-21002-6-git-send-email-keescook@chromium.org Signed-off-by: Daniel Axtens Signed-off-by: Kees Cook Acked-by: Michael Ellerman Cc: Daniel Micay Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/kernel/prom_init.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index dd8a04f3053a..613f79f03877 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -15,6 +15,9 @@ #undef DEBUG_PROM +/* we cannot use FORTIFY as it brings in new symbols */ +#define __NO_FORTIFY + #include #include #include -- cgit v1.2.3-59-g8ed1b From c69a48cdb301a18697bc8c9935baf4f32861cf9e Mon Sep 17 00:00:00 2001 From: Daniel Axtens Date: Wed, 12 Jul 2017 14:36:07 -0700 Subject: powerpc: make feature-fixup tests fortify-safe Testing the fortified string functions[1] would cause a kernel panic on boot in test_feature_fixups() due to a buffer overflow in memcmp. This boils down to things like this: extern unsigned int ftr_fixup_test1; extern unsigned int ftr_fixup_test1_orig; check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0); We know that these are asm labels so it is safe to read up to 'size' bytes at those addresses. However, because we have passed the address of a single unsigned int to memcmp, the compiler believes the underlying object is in fact a single unsigned int. So if size > sizeof(unsigned int), there will be a panic at runtime. We can fix this by changing the types: instead of calling the asm labels unsigned ints, call them unsigned int[]s. Therefore the size isn't incorrectly determined at compile time and we get a regular unsafe memcmp and no panic. [1] http://openwall.com/lists/kernel-hardening/2017/05/09/2 Link: http://lkml.kernel.org/r/1497903987-21002-7-git-send-email-keescook@chromium.org Signed-off-by: Daniel Axtens Signed-off-by: Kees Cook Suggested-by: Michael Ellerman Tested-by: Andrew Donnellan Reviewed-by: Andrew Donnellan Cc: Kees Cook Cc: Daniel Micay Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/lib/feature-fixups.c | 180 +++++++++++++++++++------------------- 1 file changed, 90 insertions(+), 90 deletions(-) diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index f3917705c686..41cf5ae273cf 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -233,192 +233,192 @@ static long calc_offset(struct fixup_entry *entry, unsigned int *p) static void test_basic_patching(void) { - extern unsigned int ftr_fixup_test1; - extern unsigned int end_ftr_fixup_test1; - extern unsigned int ftr_fixup_test1_orig; - extern unsigned int ftr_fixup_test1_expected; - int size = &end_ftr_fixup_test1 - &ftr_fixup_test1; + extern unsigned int ftr_fixup_test1[]; + extern unsigned int end_ftr_fixup_test1[]; + extern unsigned int ftr_fixup_test1_orig[]; + extern unsigned int ftr_fixup_test1_expected[]; + int size = end_ftr_fixup_test1 - ftr_fixup_test1; fixup.value = fixup.mask = 8; - fixup.start_off = calc_offset(&fixup, &ftr_fixup_test1 + 1); - fixup.end_off = calc_offset(&fixup, &ftr_fixup_test1 + 2); + fixup.start_off = calc_offset(&fixup, ftr_fixup_test1 + 1); + fixup.end_off = calc_offset(&fixup, ftr_fixup_test1 + 2); fixup.alt_start_off = fixup.alt_end_off = 0; /* Sanity check */ - check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0); + check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0); /* Check we don't patch if the value matches */ patch_feature_section(8, &fixup); - check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0); + check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0); /* Check we do patch if the value doesn't match */ patch_feature_section(0, &fixup); - check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0); + check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0); /* Check we do patch if the mask doesn't match */ - memcpy(&ftr_fixup_test1, &ftr_fixup_test1_orig, size); - check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0); + memcpy(ftr_fixup_test1, ftr_fixup_test1_orig, size); + check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0); patch_feature_section(~8, &fixup); - check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0); + check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0); } static void test_alternative_patching(void) { - extern unsigned int ftr_fixup_test2; - extern unsigned int end_ftr_fixup_test2; - extern unsigned int ftr_fixup_test2_orig; - extern unsigned int ftr_fixup_test2_alt; - extern unsigned int ftr_fixup_test2_expected; - int size = &end_ftr_fixup_test2 - &ftr_fixup_test2; + extern unsigned int ftr_fixup_test2[]; + extern unsigned int end_ftr_fixup_test2[]; + extern unsigned int ftr_fixup_test2_orig[]; + extern unsigned int ftr_fixup_test2_alt[]; + extern unsigned int ftr_fixup_test2_expected[]; + int size = end_ftr_fixup_test2 - ftr_fixup_test2; fixup.value = fixup.mask = 0xF; - fixup.start_off = calc_offset(&fixup, &ftr_fixup_test2 + 1); - fixup.end_off = calc_offset(&fixup, &ftr_fixup_test2 + 2); - fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test2_alt); - fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test2_alt + 1); + fixup.start_off = calc_offset(&fixup, ftr_fixup_test2 + 1); + fixup.end_off = calc_offset(&fixup, ftr_fixup_test2 + 2); + fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test2_alt); + fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test2_alt + 1); /* Sanity check */ - check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0); + check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0); /* Check we don't patch if the value matches */ patch_feature_section(0xF, &fixup); - check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0); + check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0); /* Check we do patch if the value doesn't match */ patch_feature_section(0, &fixup); - check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0); + check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0); /* Check we do patch if the mask doesn't match */ - memcpy(&ftr_fixup_test2, &ftr_fixup_test2_orig, size); - check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0); + memcpy(ftr_fixup_test2, ftr_fixup_test2_orig, size); + check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0); patch_feature_section(~0xF, &fixup); - check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0); + check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0); } static void test_alternative_case_too_big(void) { - extern unsigned int ftr_fixup_test3; - extern unsigned int end_ftr_fixup_test3; - extern unsigned int ftr_fixup_test3_orig; - extern unsigned int ftr_fixup_test3_alt; - int size = &end_ftr_fixup_test3 - &ftr_fixup_test3; + extern unsigned int ftr_fixup_test3[]; + extern unsigned int end_ftr_fixup_test3[]; + extern unsigned int ftr_fixup_test3_orig[]; + extern unsigned int ftr_fixup_test3_alt[]; + int size = end_ftr_fixup_test3 - ftr_fixup_test3; fixup.value = fixup.mask = 0xC; - fixup.start_off = calc_offset(&fixup, &ftr_fixup_test3 + 1); - fixup.end_off = calc_offset(&fixup, &ftr_fixup_test3 + 2); - fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test3_alt); - fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test3_alt + 2); + fixup.start_off = calc_offset(&fixup, ftr_fixup_test3 + 1); + fixup.end_off = calc_offset(&fixup, ftr_fixup_test3 + 2); + fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test3_alt); + fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test3_alt + 2); /* Sanity check */ - check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0); + check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0); /* Expect nothing to be patched, and the error returned to us */ check(patch_feature_section(0xF, &fixup) == 1); - check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0); + check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0); check(patch_feature_section(0, &fixup) == 1); - check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0); + check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0); check(patch_feature_section(~0xF, &fixup) == 1); - check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0); + check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0); } static void test_alternative_case_too_small(void) { - extern unsigned int ftr_fixup_test4; - extern unsigned int end_ftr_fixup_test4; - extern unsigned int ftr_fixup_test4_orig; - extern unsigned int ftr_fixup_test4_alt; - extern unsigned int ftr_fixup_test4_expected; - int size = &end_ftr_fixup_test4 - &ftr_fixup_test4; + extern unsigned int ftr_fixup_test4[]; + extern unsigned int end_ftr_fixup_test4[]; + extern unsigned int ftr_fixup_test4_orig[]; + extern unsigned int ftr_fixup_test4_alt[]; + extern unsigned int ftr_fixup_test4_expected[]; + int size = end_ftr_fixup_test4 - ftr_fixup_test4; unsigned long flag; /* Check a high-bit flag */ flag = 1UL << ((sizeof(unsigned long) - 1) * 8); fixup.value = fixup.mask = flag; - fixup.start_off = calc_offset(&fixup, &ftr_fixup_test4 + 1); - fixup.end_off = calc_offset(&fixup, &ftr_fixup_test4 + 5); - fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test4_alt); - fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test4_alt + 2); + fixup.start_off = calc_offset(&fixup, ftr_fixup_test4 + 1); + fixup.end_off = calc_offset(&fixup, ftr_fixup_test4 + 5); + fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test4_alt); + fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test4_alt + 2); /* Sanity check */ - check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0); + check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0); /* Check we don't patch if the value matches */ patch_feature_section(flag, &fixup); - check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0); + check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0); /* Check we do patch if the value doesn't match */ patch_feature_section(0, &fixup); - check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0); + check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0); /* Check we do patch if the mask doesn't match */ - memcpy(&ftr_fixup_test4, &ftr_fixup_test4_orig, size); - check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0); + memcpy(ftr_fixup_test4, ftr_fixup_test4_orig, size); + check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0); patch_feature_section(~flag, &fixup); - check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0); + check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0); } static void test_alternative_case_with_branch(void) { - extern unsigned int ftr_fixup_test5; - extern unsigned int end_ftr_fixup_test5; - extern unsigned int ftr_fixup_test5_expected; - int size = &end_ftr_fixup_test5 - &ftr_fixup_test5; + extern unsigned int ftr_fixup_test5[]; + extern unsigned int end_ftr_fixup_test5[]; + extern unsigned int ftr_fixup_test5_expected[]; + int size = end_ftr_fixup_test5 - ftr_fixup_test5; - check(memcmp(&ftr_fixup_test5, &ftr_fixup_test5_expected, size) == 0); + check(memcmp(ftr_fixup_test5, ftr_fixup_test5_expected, size) == 0); } static void test_alternative_case_with_external_branch(void) { - extern unsigned int ftr_fixup_test6; - extern unsigned int end_ftr_fixup_test6; - extern unsigned int ftr_fixup_test6_expected; - int size = &end_ftr_fixup_test6 - &ftr_fixup_test6; + extern unsigned int ftr_fixup_test6[]; + extern unsigned int end_ftr_fixup_test6[]; + extern unsigned int ftr_fixup_test6_expected[]; + int size = end_ftr_fixup_test6 - ftr_fixup_test6; - check(memcmp(&ftr_fixup_test6, &ftr_fixup_test6_expected, size) == 0); + check(memcmp(ftr_fixup_test6, ftr_fixup_test6_expected, size) == 0); } static void test_cpu_macros(void) { - extern u8 ftr_fixup_test_FTR_macros; - extern u8 ftr_fixup_test_FTR_macros_expected; - unsigned long size = &ftr_fixup_test_FTR_macros_expected - - &ftr_fixup_test_FTR_macros; + extern u8 ftr_fixup_test_FTR_macros[]; + extern u8 ftr_fixup_test_FTR_macros_expected[]; + unsigned long size = ftr_fixup_test_FTR_macros_expected - + ftr_fixup_test_FTR_macros; /* The fixups have already been done for us during boot */ - check(memcmp(&ftr_fixup_test_FTR_macros, - &ftr_fixup_test_FTR_macros_expected, size) == 0); + check(memcmp(ftr_fixup_test_FTR_macros, + ftr_fixup_test_FTR_macros_expected, size) == 0); } static void test_fw_macros(void) { #ifdef CONFIG_PPC64 - extern u8 ftr_fixup_test_FW_FTR_macros; - extern u8 ftr_fixup_test_FW_FTR_macros_expected; - unsigned long size = &ftr_fixup_test_FW_FTR_macros_expected - - &ftr_fixup_test_FW_FTR_macros; + extern u8 ftr_fixup_test_FW_FTR_macros[]; + extern u8 ftr_fixup_test_FW_FTR_macros_expected[]; + unsigned long size = ftr_fixup_test_FW_FTR_macros_expected - + ftr_fixup_test_FW_FTR_macros; /* The fixups have already been done for us during boot */ - check(memcmp(&ftr_fixup_test_FW_FTR_macros, - &ftr_fixup_test_FW_FTR_macros_expected, size) == 0); + check(memcmp(ftr_fixup_test_FW_FTR_macros, + ftr_fixup_test_FW_FTR_macros_expected, size) == 0); #endif } static void test_lwsync_macros(void) { - extern u8 lwsync_fixup_test; - extern u8 end_lwsync_fixup_test; - extern u8 lwsync_fixup_test_expected_LWSYNC; - extern u8 lwsync_fixup_test_expected_SYNC; - unsigned long size = &end_lwsync_fixup_test - - &lwsync_fixup_test; + extern u8 lwsync_fixup_test[]; + extern u8 end_lwsync_fixup_test[]; + extern u8 lwsync_fixup_test_expected_LWSYNC[]; + extern u8 lwsync_fixup_test_expected_SYNC[]; + unsigned long size = end_lwsync_fixup_test - + lwsync_fixup_test; /* The fixups have already been done for us during boot */ if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) { - check(memcmp(&lwsync_fixup_test, - &lwsync_fixup_test_expected_LWSYNC, size) == 0); + check(memcmp(lwsync_fixup_test, + lwsync_fixup_test_expected_LWSYNC, size) == 0); } else { - check(memcmp(&lwsync_fixup_test, - &lwsync_fixup_test_expected_SYNC, size) == 0); + check(memcmp(lwsync_fixup_test, + lwsync_fixup_test_expected_SYNC, size) == 0); } } -- cgit v1.2.3-59-g8ed1b From 6974f0c4555e285ab217cee58b6e874f776ff409 Mon Sep 17 00:00:00 2001 From: Daniel Micay Date: Wed, 12 Jul 2017 14:36:10 -0700 Subject: include/linux/string.h: add the option of fortified string.h functions This adds support for compiling with a rough equivalent to the glibc _FORTIFY_SOURCE=1 feature, providing compile-time and runtime buffer overflow checks for string.h functions when the compiler determines the size of the source or destination buffer at compile-time. Unlike glibc, it covers buffer reads in addition to writes. GNU C __builtin_*_chk intrinsics are avoided because they would force a much more complex implementation. They aren't designed to detect read overflows and offer no real benefit when using an implementation based on inline checks. Inline checks don't add up to much code size and allow full use of the regular string intrinsics while avoiding the need for a bunch of _chk functions and per-arch assembly to avoid wrapper overhead. This detects various overflows at compile-time in various drivers and some non-x86 core kernel code. There will likely be issues caught in regular use at runtime too. Future improvements left out of initial implementation for simplicity, as it's all quite optional and can be done incrementally: * Some of the fortified string functions (strncpy, strcat), don't yet place a limit on reads from the source based on __builtin_object_size of the source buffer. * Extending coverage to more string functions like strlcat. * It should be possible to optionally use __builtin_object_size(x, 1) for some functions (C strings) to detect intra-object overflows (like glibc's _FORTIFY_SOURCE=2), but for now this takes the conservative approach to avoid likely compatibility issues. * The compile-time checks should be made available via a separate config option which can be enabled by default (or always enabled) once enough time has passed to get the issues it catches fixed. Kees said: "This is great to have. While it was out-of-tree code, it would have blocked at least CVE-2016-3858 from being exploitable (improper size argument to strlcpy()). I've sent a number of fixes for out-of-bounds-reads that this detected upstream already" [arnd@arndb.de: x86: fix fortified memcpy] Link: http://lkml.kernel.org/r/20170627150047.660360-1-arnd@arndb.de [keescook@chromium.org: avoid panic() in favor of BUG()] Link: http://lkml.kernel.org/r/20170626235122.GA25261@beast [keescook@chromium.org: move from -mm, add ARCH_HAS_FORTIFY_SOURCE, tweak Kconfig help] Link: http://lkml.kernel.org/r/20170526095404.20439-1-danielmicay@gmail.com Link: http://lkml.kernel.org/r/1497903987-21002-8-git-send-email-keescook@chromium.org Signed-off-by: Daniel Micay Signed-off-by: Kees Cook Signed-off-by: Arnd Bergmann Acked-by: Kees Cook Cc: Mark Rutland Cc: Daniel Axtens Cc: Rasmus Villemoes Cc: Andy Shevchenko Cc: Chris Metcalf Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/Kconfig | 6 ++ arch/arm64/Kconfig | 1 + arch/arm64/include/asm/string.h | 5 + arch/powerpc/Kconfig | 1 + arch/x86/Kconfig | 1 + arch/x86/boot/compressed/misc.c | 5 + arch/x86/include/asm/string_32.h | 9 ++ arch/x86/include/asm/string_64.h | 7 ++ arch/x86/lib/memcpy_32.c | 2 +- include/linux/string.h | 200 +++++++++++++++++++++++++++++++++++++++ lib/string.c | 7 ++ security/Kconfig | 7 ++ 12 files changed, 250 insertions(+), 1 deletion(-) diff --git a/arch/Kconfig b/arch/Kconfig index fb9bd7d36b05..21d0089117fe 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -223,6 +223,12 @@ config GENERIC_SMP_IDLE_THREAD config GENERIC_IDLE_POLL_SETUP bool +config ARCH_HAS_FORTIFY_SOURCE + bool + help + An architecture should select this when it can successfully + build and run with CONFIG_FORTIFY_SOURCE. + # Select if arch has all set_memory_ro/rw/x/nx() functions in asm/cacheflush.h config ARCH_HAS_SET_MEMORY bool diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 8addb851ab5e..dfd908630631 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -12,6 +12,7 @@ config ARM64 select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI select ARCH_HAS_ELF_RANDOMIZE + select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA select ARCH_HAS_KCOV diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h index 2eb714c4639f..d0aa42907569 100644 --- a/arch/arm64/include/asm/string.h +++ b/arch/arm64/include/asm/string.h @@ -63,6 +63,11 @@ extern int memcmp(const void *, const void *, size_t); #define memcpy(dst, src, len) __memcpy(dst, src, len) #define memmove(dst, src, len) __memmove(dst, src, len) #define memset(s, c, n) __memset(s, c, n) + +#ifndef __NO_FORTIFY +#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */ +#endif + #endif #endif diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index fce2f4f20891..36f858c37ca7 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -125,6 +125,7 @@ config PPC select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DMA_SET_COHERENT_MASK select ARCH_HAS_ELF_RANDOMIZE + select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE select ARCH_HAS_SG_CHAIN diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 3d2b8ce54e00..781521b7cf9e 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -50,6 +50,7 @@ config X86 select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_FAST_MULTIPLIER + select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_KCOV if X86_64 select ARCH_HAS_MMIO_FLUSH diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 00241c815524..a0838ab929f2 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -411,3 +411,8 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap, debug_putstr("done.\nBooting the kernel.\n"); return output; } + +void fortify_panic(const char *name) +{ + error("detected buffer overflow"); +} diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h index 3d3e8353ee5c..e9ee84873de5 100644 --- a/arch/x86/include/asm/string_32.h +++ b/arch/x86/include/asm/string_32.h @@ -142,7 +142,9 @@ static __always_inline void *__constant_memcpy(void *to, const void *from, } #define __HAVE_ARCH_MEMCPY +extern void *memcpy(void *, const void *, size_t); +#ifndef CONFIG_FORTIFY_SOURCE #ifdef CONFIG_X86_USE_3DNOW #include @@ -195,11 +197,15 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len) #endif #endif +#endif /* !CONFIG_FORTIFY_SOURCE */ #define __HAVE_ARCH_MEMMOVE void *memmove(void *dest, const void *src, size_t n); +extern int memcmp(const void *, const void *, size_t); +#ifndef CONFIG_FORTIFY_SOURCE #define memcmp __builtin_memcmp +#endif #define __HAVE_ARCH_MEMCHR extern void *memchr(const void *cs, int c, size_t count); @@ -321,6 +327,8 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern, : __memset_generic((s), (c), (count))) #define __HAVE_ARCH_MEMSET +extern void *memset(void *, int, size_t); +#ifndef CONFIG_FORTIFY_SOURCE #if (__GNUC__ >= 4) #define memset(s, c, count) __builtin_memset(s, c, count) #else @@ -330,6 +338,7 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern, (count)) \ : __memset((s), (c), (count))) #endif +#endif /* !CONFIG_FORTIFY_SOURCE */ /* * find the first occurrence of byte 'c', or 1 past the area if none diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h index 1f22bc277c45..2a8c822de1fc 100644 --- a/arch/x86/include/asm/string_64.h +++ b/arch/x86/include/asm/string_64.h @@ -31,6 +31,7 @@ static __always_inline void *__inline_memcpy(void *to, const void *from, size_t extern void *memcpy(void *to, const void *from, size_t len); extern void *__memcpy(void *to, const void *from, size_t len); +#ifndef CONFIG_FORTIFY_SOURCE #ifndef CONFIG_KMEMCHECK #if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4 #define memcpy(dst, src, len) \ @@ -51,6 +52,7 @@ extern void *__memcpy(void *to, const void *from, size_t len); */ #define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len)) #endif +#endif /* !CONFIG_FORTIFY_SOURCE */ #define __HAVE_ARCH_MEMSET void *memset(void *s, int c, size_t n); @@ -77,6 +79,11 @@ int strcmp(const char *cs, const char *ct); #define memcpy(dst, src, len) __memcpy(dst, src, len) #define memmove(dst, src, len) __memmove(dst, src, len) #define memset(s, c, n) __memset(s, c, n) + +#ifndef __NO_FORTIFY +#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */ +#endif + #endif #define __HAVE_ARCH_MEMCPY_MCSAFE 1 diff --git a/arch/x86/lib/memcpy_32.c b/arch/x86/lib/memcpy_32.c index cad12634d6bd..2eab7d0bfedd 100644 --- a/arch/x86/lib/memcpy_32.c +++ b/arch/x86/lib/memcpy_32.c @@ -6,7 +6,7 @@ __visible void *memcpy(void *to, const void *from, size_t n) { -#ifdef CONFIG_X86_USE_3DNOW +#if defined(CONFIG_X86_USE_3DNOW) && !defined(CONFIG_FORTIFY_SOURCE) return __memcpy3d(to, from, n); #else return __memcpy(to, from, n); diff --git a/include/linux/string.h b/include/linux/string.h index 7439d83eaa33..96f5a5fd0377 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -193,4 +193,204 @@ static inline const char *kbasename(const char *path) return tail ? tail + 1 : path; } +#define __FORTIFY_INLINE extern __always_inline __attribute__((gnu_inline)) +#define __RENAME(x) __asm__(#x) + +void fortify_panic(const char *name) __noreturn __cold; +void __read_overflow(void) __compiletime_error("detected read beyond size of object passed as 1st parameter"); +void __read_overflow2(void) __compiletime_error("detected read beyond size of object passed as 2nd parameter"); +void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter"); + +#if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE) +__FORTIFY_INLINE char *strcpy(char *p, const char *q) +{ + size_t p_size = __builtin_object_size(p, 0); + size_t q_size = __builtin_object_size(q, 0); + if (p_size == (size_t)-1 && q_size == (size_t)-1) + return __builtin_strcpy(p, q); + if (strscpy(p, q, p_size < q_size ? p_size : q_size) < 0) + fortify_panic(__func__); + return p; +} + +__FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + if (__builtin_constant_p(size) && p_size < size) + __write_overflow(); + if (p_size < size) + fortify_panic(__func__); + return __builtin_strncpy(p, q, size); +} + +__FORTIFY_INLINE char *strcat(char *p, const char *q) +{ + size_t p_size = __builtin_object_size(p, 0); + if (p_size == (size_t)-1) + return __builtin_strcat(p, q); + if (strlcat(p, q, p_size) >= p_size) + fortify_panic(__func__); + return p; +} + +__FORTIFY_INLINE __kernel_size_t strlen(const char *p) +{ + __kernel_size_t ret; + size_t p_size = __builtin_object_size(p, 0); + if (p_size == (size_t)-1) + return __builtin_strlen(p); + ret = strnlen(p, p_size); + if (p_size <= ret) + fortify_panic(__func__); + return ret; +} + +extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen); +__FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen) +{ + size_t p_size = __builtin_object_size(p, 0); + __kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size); + if (p_size <= ret && maxlen != ret) + fortify_panic(__func__); + return ret; +} + +/* defined after fortified strlen to reuse it */ +extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy); +__FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size) +{ + size_t ret; + size_t p_size = __builtin_object_size(p, 0); + size_t q_size = __builtin_object_size(q, 0); + if (p_size == (size_t)-1 && q_size == (size_t)-1) + return __real_strlcpy(p, q, size); + ret = strlen(q); + if (size) { + size_t len = (ret >= size) ? size - 1 : ret; + if (__builtin_constant_p(len) && len >= p_size) + __write_overflow(); + if (len >= p_size) + fortify_panic(__func__); + __builtin_memcpy(p, q, len); + p[len] = '\0'; + } + return ret; +} + +/* defined after fortified strlen and strnlen to reuse them */ +__FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count) +{ + size_t p_len, copy_len; + size_t p_size = __builtin_object_size(p, 0); + size_t q_size = __builtin_object_size(q, 0); + if (p_size == (size_t)-1 && q_size == (size_t)-1) + return __builtin_strncat(p, q, count); + p_len = strlen(p); + copy_len = strnlen(q, count); + if (p_size < p_len + copy_len + 1) + fortify_panic(__func__); + __builtin_memcpy(p + p_len, q, copy_len); + p[p_len + copy_len] = '\0'; + return p; +} + +__FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + if (__builtin_constant_p(size) && p_size < size) + __write_overflow(); + if (p_size < size) + fortify_panic(__func__); + return __builtin_memset(p, c, size); +} + +__FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + size_t q_size = __builtin_object_size(q, 0); + if (__builtin_constant_p(size)) { + if (p_size < size) + __write_overflow(); + if (q_size < size) + __read_overflow2(); + } + if (p_size < size || q_size < size) + fortify_panic(__func__); + return __builtin_memcpy(p, q, size); +} + +__FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + size_t q_size = __builtin_object_size(q, 0); + if (__builtin_constant_p(size)) { + if (p_size < size) + __write_overflow(); + if (q_size < size) + __read_overflow2(); + } + if (p_size < size || q_size < size) + fortify_panic(__func__); + return __builtin_memmove(p, q, size); +} + +extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan); +__FORTIFY_INLINE void *memscan(void *p, int c, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + if (__builtin_constant_p(size) && p_size < size) + __read_overflow(); + if (p_size < size) + fortify_panic(__func__); + return __real_memscan(p, c, size); +} + +__FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + size_t q_size = __builtin_object_size(q, 0); + if (__builtin_constant_p(size)) { + if (p_size < size) + __read_overflow(); + if (q_size < size) + __read_overflow2(); + } + if (p_size < size || q_size < size) + fortify_panic(__func__); + return __builtin_memcmp(p, q, size); +} + +__FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + if (__builtin_constant_p(size) && p_size < size) + __read_overflow(); + if (p_size < size) + fortify_panic(__func__); + return __builtin_memchr(p, c, size); +} + +void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv); +__FORTIFY_INLINE void *memchr_inv(const void *p, int c, size_t size) +{ + size_t p_size = __builtin_object_size(p, 0); + if (__builtin_constant_p(size) && p_size < size) + __read_overflow(); + if (p_size < size) + fortify_panic(__func__); + return __real_memchr_inv(p, c, size); +} + +extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup); +__FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp) +{ + size_t p_size = __builtin_object_size(p, 0); + if (__builtin_constant_p(size) && p_size < size) + __read_overflow(); + if (p_size < size) + fortify_panic(__func__); + return __real_kmemdup(p, size, gfp); +} +#endif + #endif /* _LINUX_STRING_H_ */ diff --git a/lib/string.c b/lib/string.c index 1c1fc9187b05..ebbb99c775bd 100644 --- a/lib/string.c +++ b/lib/string.c @@ -978,3 +978,10 @@ char *strreplace(char *s, char old, char new) return s; } EXPORT_SYMBOL(strreplace); + +void fortify_panic(const char *name) +{ + pr_emerg("detected buffer overflow in %s\n", name); + BUG(); +} +EXPORT_SYMBOL(fortify_panic); diff --git a/security/Kconfig b/security/Kconfig index d540bfe73190..e8e449444e65 100644 --- a/security/Kconfig +++ b/security/Kconfig @@ -163,6 +163,13 @@ config HARDENED_USERCOPY_PAGESPAN been removed. This config is intended to be used only while trying to find such users. +config FORTIFY_SOURCE + bool "Harden common str/mem functions against buffer overflows" + depends on ARCH_HAS_FORTIFY_SOURCE + help + Detect overflows of buffers in common string and memory functions + where the compiler can determine and validate the buffer sizes. + config STATIC_USERMODEHELPER bool "Force all usermode helper calls through a single binary" help -- cgit v1.2.3-59-g8ed1b From 579e14524c6593cb651a806a6563e14b263c00e2 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 12 Jul 2017 14:36:13 -0700 Subject: sh: mark end of BUG() implementation as unreachable When building the sh architecture, the compiler doesn't realize that BUG() doesn't return, so it will complain about functions using BUG() that are marked with the noreturn attribute: lib/string.c: In function 'fortify_panic': >> lib/string.c:986:1: warning: 'noreturn' function does return } ^ Link: http://lkml.kernel.org/r/20170627192050.GA66784@beast Signed-off-by: Kees Cook Cc: Yoshinori Sato Cc: Rich Felker Cc: Daniel Micay Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/sh/include/asm/bug.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/sh/include/asm/bug.h b/arch/sh/include/asm/bug.h index 1b77f068be2b..c9828f785ca0 100644 --- a/arch/sh/include/asm/bug.h +++ b/arch/sh/include/asm/bug.h @@ -48,6 +48,7 @@ do { \ "i" (__FILE__), \ "i" (__LINE__), "i" (0), \ "i" (sizeof(struct bug_entry))); \ + unreachable(); \ } while (0) #define __WARN_FLAGS(flags) \ -- cgit v1.2.3-59-g8ed1b From 022c204040f3fd22d6445bc35517786195b7ae80 Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Wed, 12 Jul 2017 14:36:17 -0700 Subject: random,stackprotect: introduce get_random_canary function Patch series "stackprotector: ascii armor the stack canary", v2. Zero out the first byte of the stack canary value on 64 bit systems, in order to mitigate unterminated C string overflows. The null byte both prevents C string functions from reading the canary, and from writing it if the canary value were guessed or obtained through some other means. Reducing the entropy by 8 bits is acceptable on 64-bit systems, which will still have 56 bits of entropy left, but not on 32 bit systems, so the "ascii armor" canary is only implemented on 64-bit systems. Inspired by the "ascii armor" code in execshield and Daniel Micay's linux-hardened tree. Also see https://github.com/thestinger/linux-hardened/ This patch (of 5): Introduce get_random_canary(), which provides a random unsigned long canary value with the first byte zeroed out on 64 bit architectures, in order to mitigate non-terminated C string overflows. The null byte both prevents C string functions from reading the canary, and from writing it if the canary value were guessed or obtained through some other means. Reducing the entropy by 8 bits is acceptable on 64-bit systems, which will still have 56 bits of entropy left, but not on 32 bit systems, so the "ascii armor" canary is only implemented on 64-bit systems. Inspired by the "ascii armor" code in the old execshield patches, and Daniel Micay's linux-hardened tree. Link: http://lkml.kernel.org/r/20170524155751.424-2-riel@redhat.com Signed-off-by: Rik van Riel Acked-by: Kees Cook Cc: Daniel Micay Cc: "Theodore Ts'o" Cc: H. Peter Anvin Cc: Andy Lutomirski Cc: Ingo Molnar Cc: Catalin Marinas Cc: Yoshinori Sato Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/random.h | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/include/linux/random.h b/include/linux/random.h index ed5c3838780d..1fa0dc880bd7 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -57,6 +57,27 @@ static inline unsigned long get_random_long(void) #endif } +/* + * On 64-bit architectures, protect against non-terminated C string overflows + * by zeroing out the first byte of the canary; this leaves 56 bits of entropy. + */ +#ifdef CONFIG_64BIT +# ifdef __LITTLE_ENDIAN +# define CANARY_MASK 0xffffffffffffff00UL +# else /* big endian, 64 bits: */ +# define CANARY_MASK 0x00ffffffffffffffUL +# endif +#else /* 32 bits: */ +# define CANARY_MASK 0xffffffffUL +#endif + +static inline unsigned long get_random_canary(void) +{ + unsigned long val = get_random_long(); + + return val & CANARY_MASK; +} + unsigned long randomize_page(unsigned long start, unsigned long range); u32 prandom_u32(void); -- cgit v1.2.3-59-g8ed1b From 7cd815bce828220deffd1654265f0ef891567774 Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Wed, 12 Jul 2017 14:36:20 -0700 Subject: fork,random: use get_random_canary() to set tsk->stack_canary Use the ascii-armor canary to prevent unterminated C string overflows from being able to successfully overwrite the canary, even if they somehow obtain the canary value. Inspired by execshield ascii-armor and Daniel Micay's linux-hardened tree. Link: http://lkml.kernel.org/r/20170524155751.424-3-riel@redhat.com Signed-off-by: Rik van Riel Acked-by: Kees Cook Cc: Daniel Micay Cc: "Theodore Ts'o" Cc: H. Peter Anvin Cc: Andy Lutomirski Cc: Ingo Molnar Cc: Catalin Marinas Cc: Yoshinori Sato Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/fork.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/fork.c b/kernel/fork.c index ade237a96308..17921b0390b4 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -554,7 +554,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) set_task_stack_end_magic(tsk); #ifdef CONFIG_CC_STACKPROTECTOR - tsk->stack_canary = get_random_long(); + tsk->stack_canary = get_random_canary(); #endif /* -- cgit v1.2.3-59-g8ed1b From bf9eb5443844e288b73f7963dfc67a0d2fbc3849 Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Wed, 12 Jul 2017 14:36:23 -0700 Subject: x86: ascii armor the x86_64 boot init stack canary Use the ascii-armor canary to prevent unterminated C string overflows from being able to successfully overwrite the canary, even if they somehow obtain the canary value. Inspired by execshield ascii-armor and Daniel Micay's linux-hardened tree. Link: http://lkml.kernel.org/r/20170524155751.424-4-riel@redhat.com Signed-off-by: Rik van Riel Acked-by: Kees Cook Cc: Daniel Micay Cc: "Theodore Ts'o" Cc: H. Peter Anvin Cc: Andy Lutomirski Cc: Ingo Molnar Cc: Catalin Marinas Cc: Yoshinori Sato Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/include/asm/stackprotector.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index dcbd9bcce714..8abedf1d650e 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h @@ -74,6 +74,7 @@ static __always_inline void boot_init_stack_canary(void) get_random_bytes(&canary, sizeof(canary)); tsc = rdtsc(); canary += tsc + (tsc << 32UL); + canary &= CANARY_MASK; current->stack_canary = canary; #ifdef CONFIG_X86_64 -- cgit v1.2.3-59-g8ed1b From d21f54988bb2927341757d42152440c3abcae4eb Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Wed, 12 Jul 2017 14:36:26 -0700 Subject: arm64: ascii armor the arm64 boot init stack canary Use the ascii-armor canary to prevent unterminated C string overflows from being able to successfully overwrite the canary, even if they somehow obtain the canary value. Inspired by execshield ascii-armor and Daniel Micay's linux-hardened tree. Link: http://lkml.kernel.org/r/20170524155751.424-5-riel@redhat.com Signed-off-by: Rik van Riel Acked-by: Kees Cook Cc: Daniel Micay Cc: "Theodore Ts'o" Cc: H. Peter Anvin Cc: Andy Lutomirski Cc: Ingo Molnar Cc: Catalin Marinas Cc: Yoshinori Sato Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/include/asm/stackprotector.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/include/asm/stackprotector.h b/arch/arm64/include/asm/stackprotector.h index fe5e287dc56b..b86a0865ddf1 100644 --- a/arch/arm64/include/asm/stackprotector.h +++ b/arch/arm64/include/asm/stackprotector.h @@ -30,6 +30,7 @@ static __always_inline void boot_init_stack_canary(void) /* Try to get a semi random initial value. */ get_random_bytes(&canary, sizeof(canary)); canary ^= LINUX_VERSION_CODE; + canary &= CANARY_MASK; current->stack_canary = canary; __stack_chk_guard = current->stack_canary; -- cgit v1.2.3-59-g8ed1b From 2c8340eda45cee1784f7b29e6d7dcc740ea3d02a Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Wed, 12 Jul 2017 14:36:29 -0700 Subject: sh64: ascii armor the sh64 boot init stack canary Use the ascii-armor canary to prevent unterminated C string overflows from being able to successfully overwrite the canary, even if they somehow obtain the canary value. Inspired by execshield ascii-armor and Daniel Micay's linux-hardened tree. Link: http://lkml.kernel.org/r/20170524123446.78510066@annuminas.surriel.com Signed-off-by: Rik van Riel Acked-by: Kees Cook Cc: Daniel Micay Cc: "Theodore Ts'o" Cc: H. Peter Anvin Cc: Andy Lutomirski Cc: Ingo Molnar Cc: Catalin Marinas Cc: Yoshinori Sato Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/sh/include/asm/stackprotector.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/sh/include/asm/stackprotector.h b/arch/sh/include/asm/stackprotector.h index d9df3a76847c..141515a43b78 100644 --- a/arch/sh/include/asm/stackprotector.h +++ b/arch/sh/include/asm/stackprotector.h @@ -19,6 +19,7 @@ static __always_inline void boot_init_stack_canary(void) /* Try to get a semi random initial value. */ get_random_bytes(&canary, sizeof(canary)); canary ^= LINUX_VERSION_CODE; + canary &= CANARY_MASK; current->stack_canary = canary; __stack_chk_guard = current->stack_canary; -- cgit v1.2.3-59-g8ed1b From c204d21f2232d875e36b8774c36ffd027dc1d606 Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Wed, 12 Jul 2017 14:36:33 -0700 Subject: x86/mmap: properly account for stack randomization in mmap_base When RLIMIT_STACK is, for example, 256MB, the current code results in a gap between the top of the task and mmap_base of 256MB, failing to take into account the amount by which the stack address was randomized. In other words, the stack gets less than RLIMIT_STACK space. Ensure that the gap between the stack and mmap_base always takes stack randomization and the stack guard gap into account. Obtained from Daniel Micay's linux-hardened tree. Link: http://lkml.kernel.org/r/20170622200033.25714-2-riel@redhat.com Signed-off-by: Daniel Micay Signed-off-by: Rik van Riel Reported-by: Florian Weimer Acked-by: Ingo Molnar Cc: Will Deacon Cc: Daniel Micay Cc: Benjamin Herrenschmidt Cc: Hugh Dickins Cc: Catalin Marinas Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/mm/mmap.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 797295e792b2..229d04a83f85 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -92,13 +92,18 @@ unsigned long arch_mmap_rnd(void) static unsigned long mmap_base(unsigned long rnd, unsigned long task_size) { unsigned long gap = rlimit(RLIMIT_STACK); + unsigned long pad = stack_maxrandom_size(task_size) + stack_guard_gap; unsigned long gap_min, gap_max; + /* Values close to RLIM_INFINITY can overflow. */ + if (gap + pad > gap) + gap += pad; + /* * Top of mmap area (just below the process stack). * Leave an at least ~128 MB hole with possible stack randomization. */ - gap_min = SIZE_128M + stack_maxrandom_size(task_size); + gap_min = SIZE_128M; gap_max = (task_size / 6) * 5; if (gap < gap_min) -- cgit v1.2.3-59-g8ed1b From cf92251dc52d2dfae6c6adaeae8b12ea50686946 Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Wed, 12 Jul 2017 14:36:36 -0700 Subject: arm64/mmap: properly account for stack randomization in mmap_base When RLIMIT_STACK is, for example, 256MB, the current code results in a gap between the top of the task and mmap_base of 256MB, failing to take into account the amount by which the stack address was randomized. In other words, the stack gets less than RLIMIT_STACK space. Ensure that the gap between the stack and mmap_base always takes stack randomization and the stack guard gap into account. Obtained from Daniel Micay's linux-hardened tree. Link: http://lkml.kernel.org/r/20170622200033.25714-3-riel@redhat.com Signed-off-by: Daniel Micay Signed-off-by: Rik van Riel Reported-by: Florian Weimer Cc: Ingo Molnar Cc: Will Deacon Cc: Daniel Micay Cc: Benjamin Herrenschmidt Cc: Hugh Dickins Cc: Catalin Marinas Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm64/mm/mmap.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index adc208c2ae9c..decccffb03ca 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c @@ -35,7 +35,7 @@ * Leave enough space between the mmap area and the stack to honour ulimit in * the face of randomisation. */ -#define MIN_GAP (SZ_128M + ((STACK_RND_MASK << PAGE_SHIFT) + 1)) +#define MIN_GAP (SZ_128M) #define MAX_GAP (STACK_TOP/6*5) static int mmap_is_legacy(void) @@ -65,6 +65,11 @@ unsigned long arch_mmap_rnd(void) static unsigned long mmap_base(unsigned long rnd) { unsigned long gap = rlimit(RLIMIT_STACK); + unsigned long pad = (STACK_RND_MASK << PAGE_SHIFT) + stack_guard_gap; + + /* Values close to RLIM_INFINITY can overflow. */ + if (gap + pad > gap) + gap += pad; if (gap < MIN_GAP) gap = MIN_GAP; -- cgit v1.2.3-59-g8ed1b From 0a782dc31f4d867921eb7caf1f4bb1222e45bb0e Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Wed, 12 Jul 2017 14:36:39 -0700 Subject: powerpc,mmap: properly account for stack randomization in mmap_base When RLIMIT_STACK is, for example, 256MB, the current code results in a gap between the top of the task and mmap_base of 256MB, failing to take into account the amount by which the stack address was randomized. In other words, the stack gets less than RLIMIT_STACK space. Ensure that the gap between the stack and mmap_base always takes stack randomization and the stack guard gap into account. Inspired by Daniel Micay's linux-hardened tree. Link: http://lkml.kernel.org/r/20170622200033.25714-4-riel@redhat.com Signed-off-by: Rik van Riel Reported-by: Florian Weimer Cc: Ingo Molnar Cc: Will Deacon Cc: Daniel Micay Cc: Benjamin Herrenschmidt Cc: Hugh Dickins Cc: Catalin Marinas Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/mm/mmap.c | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c index 0ee6be4f1ba4..5d78b193fec4 100644 --- a/arch/powerpc/mm/mmap.c +++ b/arch/powerpc/mm/mmap.c @@ -34,16 +34,9 @@ /* * Top of mmap area (just below the process stack). * - * Leave at least a ~128 MB hole on 32bit applications. - * - * On 64bit applications we randomise the stack by 1GB so we need to - * space our mmap start address by a further 1GB, otherwise there is a - * chance the mmap area will end up closer to the stack than our ulimit - * requires. + * Leave at least a ~128 MB hole. */ -#define MIN_GAP32 (128*1024*1024) -#define MIN_GAP64 ((128 + 1024)*1024*1024UL) -#define MIN_GAP ((is_32bit_task()) ? MIN_GAP32 : MIN_GAP64) +#define MIN_GAP (128*1024*1024) #define MAX_GAP (TASK_SIZE/6*5) static inline int mmap_is_legacy(void) @@ -71,9 +64,26 @@ unsigned long arch_mmap_rnd(void) return rnd << PAGE_SHIFT; } +static inline unsigned long stack_maxrandom_size(void) +{ + if (!(current->flags & PF_RANDOMIZE)) + return 0; + + /* 8MB for 32bit, 1GB for 64bit */ + if (is_32bit_task()) + return (1<<23); + else + return (1<<30); +} + static inline unsigned long mmap_base(unsigned long rnd) { unsigned long gap = rlimit(RLIMIT_STACK); + unsigned long pad = stack_maxrandom_size() + stack_guard_gap; + + /* Values close to RLIM_INFINITY can overflow. */ + if (gap + pad > gap) + gap += pad; if (gap < MIN_GAP) gap = MIN_GAP; -- cgit v1.2.3-59-g8ed1b From 473738eb78c3e379d682fb8a3cf7e1d17beded9f Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Wed, 12 Jul 2017 14:36:42 -0700 Subject: MIPS: do not use __GFP_REPEAT for order-0 request Patch series "mm: give __GFP_REPEAT a better semantic". The main motivation for the change is that the current implementation of __GFP_REPEAT is not very much useful. The documentation says: * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt * _might_ fail. This depends upon the particular VM implementation. It just fails to mention that this is true only for large (costly) high order which has been the case since the flag was introduced. A similar semantic would be really helpful for smal orders as well, though, because we have places where a failure with a specific fallback error handling is preferred to a potential endless loop inside the page allocator. The earlier cleanup dropped __GFP_REPEAT usage for low (!costly) order users so only those which might use larger orders have stayed. One new user added in the meantime is addressed in patch 1. Let's rename the flag to something more verbose and use it for existing users. Semantic for those will not change. Then implement low (!costly) orders failure path which is hit after the page allocator is about to invoke the oom killer. With that we have a good counterpart for __GFP_NORETRY and finally can tell try as hard as possible without the OOM killer. Xfs code already has an existing annotation for allocations which are allowed to fail and we can trivially map them to the new gfp flag because it will provide the semantic KM_MAYFAIL wants. Christoph didn't consider the new flag really necessary but didn't respond to the OOM killer aspect of the change so I have kept the patch. If this is still seen as not really needed I can drop the patch. kvmalloc will allow also !costly high order allocations to retry hard before falling back to the vmalloc. drm/i915 asked for the new semantic explicitly. Memory migration code, especially for the memory hotplug, should back off rather than invoking the OOM killer as well. This patch (of 6): Commit 3377e227af44 ("MIPS: Add 48-bit VA space (and 4-level page tables) for 4K pages.") has added a new __GFP_REPEAT user but using this flag doesn't really make any sense for order-0 request which is the case here because PUD_ORDER is 0. __GFP_REPEAT has historically effect only on allocation requests with order > PAGE_ALLOC_COSTLY_ORDER. This doesn't introduce any functional change. This is a preparatory patch for later work which renames the flag and redefines its semantic. Link: http://lkml.kernel.org/r/20170623085345.11304-2-mhocko@kernel.org Signed-off-by: Michal Hocko Acked-by: Vlastimil Babka Cc: Alex Belits Cc: David Daney Cc: Ralf Baechle Cc: Johannes Weiner Cc: Mel Gorman Cc: NeilBrown Cc: Christoph Hellwig Cc: Chris Wilson Cc: Darrick J. Wong Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/mips/include/asm/pgalloc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h index a1bdb1ea5234..39b9f311c4ef 100644 --- a/arch/mips/include/asm/pgalloc.h +++ b/arch/mips/include/asm/pgalloc.h @@ -116,7 +116,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) { pud_t *pud; - pud = (pud_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PUD_ORDER); + pud = (pud_t *) __get_free_pages(GFP_KERNEL, PUD_ORDER); if (pud) pud_init((unsigned long)pud, (unsigned long)invalid_pmd_table); return pud; -- cgit v1.2.3-59-g8ed1b From dcda9b04713c3f6ff0875652924844fae28286ea Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Wed, 12 Jul 2017 14:36:45 -0700 Subject: mm, tree wide: replace __GFP_REPEAT by __GFP_RETRY_MAYFAIL with more useful semantic __GFP_REPEAT was designed to allow retry-but-eventually-fail semantic to the page allocator. This has been true but only for allocations requests larger than PAGE_ALLOC_COSTLY_ORDER. It has been always ignored for smaller sizes. This is a bit unfortunate because there is no way to express the same semantic for those requests and they are considered too important to fail so they might end up looping in the page allocator for ever, similarly to GFP_NOFAIL requests. Now that the whole tree has been cleaned up and accidental or misled usage of __GFP_REPEAT flag has been removed for !costly requests we can give the original flag a better name and more importantly a more useful semantic. Let's rename it to __GFP_RETRY_MAYFAIL which tells the user that the allocator would try really hard but there is no promise of a success. This will work independent of the order and overrides the default allocator behavior. Page allocator users have several levels of guarantee vs. cost options (take GFP_KERNEL as an example) - GFP_KERNEL & ~__GFP_RECLAIM - optimistic allocation without _any_ attempt to free memory at all. The most light weight mode which even doesn't kick the background reclaim. Should be used carefully because it might deplete the memory and the next user might hit the more aggressive reclaim - GFP_KERNEL & ~__GFP_DIRECT_RECLAIM (or GFP_NOWAIT)- optimistic allocation without any attempt to free memory from the current context but can wake kswapd to reclaim memory if the zone is below the low watermark. Can be used from either atomic contexts or when the request is a performance optimization and there is another fallback for a slow path. - (GFP_KERNEL|__GFP_HIGH) & ~__GFP_DIRECT_RECLAIM (aka GFP_ATOMIC) - non sleeping allocation with an expensive fallback so it can access some portion of memory reserves. Usually used from interrupt/bh context with an expensive slow path fallback. - GFP_KERNEL - both background and direct reclaim are allowed and the _default_ page allocator behavior is used. That means that !costly allocation requests are basically nofail but there is no guarantee of that behavior so failures have to be checked properly by callers (e.g. OOM killer victim is allowed to fail currently). - GFP_KERNEL | __GFP_NORETRY - overrides the default allocator behavior and all allocation requests fail early rather than cause disruptive reclaim (one round of reclaim in this implementation). The OOM killer is not invoked. - GFP_KERNEL | __GFP_RETRY_MAYFAIL - overrides the default allocator behavior and all allocation requests try really hard. The request will fail if the reclaim cannot make any progress. The OOM killer won't be triggered. - GFP_KERNEL | __GFP_NOFAIL - overrides the default allocator behavior and all allocation requests will loop endlessly until they succeed. This might be really dangerous especially for larger orders. Existing users of __GFP_REPEAT are changed to __GFP_RETRY_MAYFAIL because they already had their semantic. No new users are added. __alloc_pages_slowpath is changed to bail out for __GFP_RETRY_MAYFAIL if there is no progress and we have already passed the OOM point. This means that all the reclaim opportunities have been exhausted except the most disruptive one (the OOM killer) and a user defined fallback behavior is more sensible than keep retrying in the page allocator. [akpm@linux-foundation.org: fix arch/sparc/kernel/mdesc.c] [mhocko@suse.com: semantic fix] Link: http://lkml.kernel.org/r/20170626123847.GM11534@dhcp22.suse.cz [mhocko@kernel.org: address other thing spotted by Vlastimil] Link: http://lkml.kernel.org/r/20170626124233.GN11534@dhcp22.suse.cz Link: http://lkml.kernel.org/r/20170623085345.11304-3-mhocko@kernel.org Signed-off-by: Michal Hocko Acked-by: Vlastimil Babka Cc: Alex Belits Cc: Chris Wilson Cc: Christoph Hellwig Cc: Darrick J. Wong Cc: David Daney Cc: Johannes Weiner Cc: Mel Gorman Cc: NeilBrown Cc: Ralf Baechle Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/DMA-ISA-LPC.txt | 2 +- arch/powerpc/include/asm/book3s/64/pgalloc.h | 2 +- arch/powerpc/kvm/book3s_64_mmu_hv.c | 2 +- arch/sparc/kernel/mdesc.c | 2 +- drivers/mmc/host/wbsd.c | 2 +- drivers/s390/char/vmcp.c | 2 +- drivers/target/target_core_transport.c | 2 +- drivers/vhost/net.c | 2 +- drivers/vhost/scsi.c | 2 +- drivers/vhost/vsock.c | 2 +- include/linux/gfp.h | 56 +++++++++++++++++++++------- include/linux/slab.h | 3 +- include/trace/events/mmflags.h | 2 +- mm/hugetlb.c | 4 +- mm/internal.h | 2 +- mm/page_alloc.c | 14 +++++-- mm/sparse-vmemmap.c | 4 +- mm/util.c | 6 +-- mm/vmalloc.c | 2 +- mm/vmscan.c | 8 ++-- net/core/dev.c | 6 +-- net/core/skbuff.c | 2 +- net/sched/sch_fq.c | 2 +- tools/perf/builtin-kmem.c | 2 +- 24 files changed, 86 insertions(+), 47 deletions(-) diff --git a/Documentation/DMA-ISA-LPC.txt b/Documentation/DMA-ISA-LPC.txt index c41331398752..7a065ac4a9d1 100644 --- a/Documentation/DMA-ISA-LPC.txt +++ b/Documentation/DMA-ISA-LPC.txt @@ -42,7 +42,7 @@ requirements you pass the flag GFP_DMA to kmalloc. Unfortunately the memory available for ISA DMA is scarce so unless you allocate the memory during boot-up it's a good idea to also pass -__GFP_REPEAT and __GFP_NOWARN to make the allocator try a bit harder. +__GFP_RETRY_MAYFAIL and __GFP_NOWARN to make the allocator try a bit harder. (This scarcity also means that you should allocate the buffer as early as possible and not release it until the driver is unloaded.) diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h index 20b1485ff1e8..e2329db9d6f4 100644 --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h @@ -56,7 +56,7 @@ static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm) return (pgd_t *)__get_free_page(pgtable_gfp_flags(mm, PGALLOC_GFP)); #else struct page *page; - page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_REPEAT), + page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_RETRY_MAYFAIL), 4); if (!page) return NULL; diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 710e491206ed..8cb0190e2a73 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -93,7 +93,7 @@ int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order) } if (!hpt) - hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT + hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_RETRY_MAYFAIL |__GFP_NOWARN, order - PAGE_SHIFT); if (!hpt) diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c index e4b4e790bf89..fa466ce45bc9 100644 --- a/arch/sparc/kernel/mdesc.c +++ b/arch/sparc/kernel/mdesc.c @@ -205,7 +205,7 @@ static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size) handle_size = (sizeof(struct mdesc_handle) - sizeof(struct mdesc_hdr) + mdesc_size); - base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_REPEAT); + base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_RETRY_MAYFAIL); if (!base) return NULL; diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c index e15a9733fcfd..9668616faf16 100644 --- a/drivers/mmc/host/wbsd.c +++ b/drivers/mmc/host/wbsd.c @@ -1386,7 +1386,7 @@ static void wbsd_request_dma(struct wbsd_host *host, int dma) * order for ISA to be able to DMA to it. */ host->dma_buffer = kmalloc(WBSD_DMA_SIZE, - GFP_NOIO | GFP_DMA | __GFP_REPEAT | __GFP_NOWARN); + GFP_NOIO | GFP_DMA | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); if (!host->dma_buffer) goto free; diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c index 65f5a794f26d..98749fa817da 100644 --- a/drivers/s390/char/vmcp.c +++ b/drivers/s390/char/vmcp.c @@ -98,7 +98,7 @@ vmcp_write(struct file *file, const char __user *buff, size_t count, } if (!session->response) session->response = (char *)__get_free_pages(GFP_KERNEL - | __GFP_REPEAT | GFP_DMA, + | __GFP_RETRY_MAYFAIL | GFP_DMA, get_order(session->bufsize)); if (!session->response) { mutex_unlock(&session->mutex); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index f1b3a46bdcaf..1bdc10651bcd 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -252,7 +252,7 @@ int transport_alloc_session_tags(struct se_session *se_sess, int rc; se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, - GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); + GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL); if (!se_sess->sess_cmd_map) { se_sess->sess_cmd_map = vzalloc(tag_num * tag_size); if (!se_sess->sess_cmd_map) { diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index e3d7ea1288c6..06d044862e58 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -897,7 +897,7 @@ static int vhost_net_open(struct inode *inode, struct file *f) struct sk_buff **queue; int i; - n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_REPEAT); + n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL); if (!n) return -ENOMEM; vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL); diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index fd6c8b66f06f..ff02a942c4d5 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1404,7 +1404,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f) struct vhost_virtqueue **vqs; int r = -ENOMEM, i; - vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); + vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL); if (!vs) { vs = vzalloc(sizeof(*vs)); if (!vs) diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 3f63e03de8e8..c9de9c41aa97 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -508,7 +508,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file) /* This struct is large and allocation could fail, fall back to vmalloc * if there is no other way. */ - vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_REPEAT); + vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL); if (!vsock) return -ENOMEM; diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 4c6656f1fee7..bcfb9f7c46f5 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -25,7 +25,7 @@ struct vm_area_struct; #define ___GFP_FS 0x80u #define ___GFP_COLD 0x100u #define ___GFP_NOWARN 0x200u -#define ___GFP_REPEAT 0x400u +#define ___GFP_RETRY_MAYFAIL 0x400u #define ___GFP_NOFAIL 0x800u #define ___GFP_NORETRY 0x1000u #define ___GFP_MEMALLOC 0x2000u @@ -136,26 +136,56 @@ struct vm_area_struct; * * __GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim. * - * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt - * _might_ fail. This depends upon the particular VM implementation. + * The default allocator behavior depends on the request size. We have a concept + * of so called costly allocations (with order > PAGE_ALLOC_COSTLY_ORDER). + * !costly allocations are too essential to fail so they are implicitly + * non-failing by default (with some exceptions like OOM victims might fail so + * the caller still has to check for failures) while costly requests try to be + * not disruptive and back off even without invoking the OOM killer. + * The following three modifiers might be used to override some of these + * implicit rules + * + * __GFP_NORETRY: The VM implementation will try only very lightweight + * memory direct reclaim to get some memory under memory pressure (thus + * it can sleep). It will avoid disruptive actions like OOM killer. The + * caller must handle the failure which is quite likely to happen under + * heavy memory pressure. The flag is suitable when failure can easily be + * handled at small cost, such as reduced throughput + * + * __GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim + * procedures that have previously failed if there is some indication + * that progress has been made else where. It can wait for other + * tasks to attempt high level approaches to freeing memory such as + * compaction (which removes fragmentation) and page-out. + * There is still a definite limit to the number of retries, but it is + * a larger limit than with __GFP_NORETRY. + * Allocations with this flag may fail, but only when there is + * genuinely little unused memory. While these allocations do not + * directly trigger the OOM killer, their failure indicates that + * the system is likely to need to use the OOM killer soon. The + * caller must handle failure, but can reasonably do so by failing + * a higher-level request, or completing it only in a much less + * efficient manner. + * If the allocation does fail, and the caller is in a position to + * free some non-essential memory, doing so could benefit the system + * as a whole. * * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller - * cannot handle allocation failures. New users should be evaluated carefully - * (and the flag should be used only when there is no reasonable failure - * policy) but it is definitely preferable to use the flag rather than - * opencode endless loop around allocator. - * - * __GFP_NORETRY: The VM implementation must not retry indefinitely and will - * return NULL when direct reclaim and memory compaction have failed to allow - * the allocation to succeed. The OOM killer is not called with the current - * implementation. + * cannot handle allocation failures. The allocation could block + * indefinitely but will never return with failure. Testing for + * failure is pointless. + * New users should be evaluated carefully (and the flag should be + * used only when there is no reasonable failure policy) but it is + * definitely preferable to use the flag rather than opencode endless + * loop around allocator. + * Using this flag for costly allocations is _highly_ discouraged. */ #define __GFP_IO ((__force gfp_t)___GFP_IO) #define __GFP_FS ((__force gfp_t)___GFP_FS) #define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */ #define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */ #define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM)) -#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) +#define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL) #define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) #define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) diff --git a/include/linux/slab.h b/include/linux/slab.h index 04a7f7993e67..41473df6dfb0 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -471,7 +471,8 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags) * * %__GFP_NOWARN - If allocation fails, don't issue any warnings. * - * %__GFP_REPEAT - If allocation fails initially, try once more before failing. + * %__GFP_RETRY_MAYFAIL - Try really hard to succeed the allocation but fail + * eventually. * * There are other flags available as well, but these are not intended * for general use, and so are not documented here. For a full list of diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 10e3663a75a6..8e50d01c645f 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -34,7 +34,7 @@ {(unsigned long)__GFP_FS, "__GFP_FS"}, \ {(unsigned long)__GFP_COLD, "__GFP_COLD"}, \ {(unsigned long)__GFP_NOWARN, "__GFP_NOWARN"}, \ - {(unsigned long)__GFP_REPEAT, "__GFP_REPEAT"}, \ + {(unsigned long)__GFP_RETRY_MAYFAIL, "__GFP_RETRY_MAYFAIL"}, \ {(unsigned long)__GFP_NOFAIL, "__GFP_NOFAIL"}, \ {(unsigned long)__GFP_NORETRY, "__GFP_NORETRY"}, \ {(unsigned long)__GFP_COMP, "__GFP_COMP"}, \ diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 1e516520433d..bc48ee783dd9 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1384,7 +1384,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) page = __alloc_pages_node(nid, htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE| - __GFP_REPEAT|__GFP_NOWARN, + __GFP_RETRY_MAYFAIL|__GFP_NOWARN, huge_page_order(h)); if (page) { prep_new_huge_page(h, page, nid); @@ -1525,7 +1525,7 @@ static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h, { int order = huge_page_order(h); - gfp_mask |= __GFP_COMP|__GFP_REPEAT|__GFP_NOWARN; + gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN; if (nid == NUMA_NO_NODE) nid = numa_mem_id(); return __alloc_pages_nodemask(gfp_mask, order, nid, nmask); diff --git a/mm/internal.h b/mm/internal.h index 0e4f558412fb..24d88f084705 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -23,7 +23,7 @@ * hints such as HIGHMEM usage. */ #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\ - __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ + __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\ __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\ __GFP_ATOMIC) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 64b7d82a9b1a..6d30e914afb6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3284,6 +3284,14 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, /* The OOM killer will not help higher order allocs */ if (order > PAGE_ALLOC_COSTLY_ORDER) goto out; + /* + * We have already exhausted all our reclaim opportunities without any + * success so it is time to admit defeat. We will skip the OOM killer + * because it is very likely that the caller has a more reasonable + * fallback than shooting a random task. + */ + if (gfp_mask & __GFP_RETRY_MAYFAIL) + goto out; /* The OOM killer does not needlessly kill tasks for lowmem */ if (ac->high_zoneidx < ZONE_NORMAL) goto out; @@ -3413,7 +3421,7 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, } /* - * !costly requests are much more important than __GFP_REPEAT + * !costly requests are much more important than __GFP_RETRY_MAYFAIL * costly ones because they are de facto nofail and invoke OOM * killer to move on while costly can fail and users are ready * to cope with that. 1/4 retries is rather arbitrary but we @@ -3920,9 +3928,9 @@ retry: /* * Do not retry costly high order allocations unless they are - * __GFP_REPEAT + * __GFP_RETRY_MAYFAIL */ - if (costly_order && !(gfp_mask & __GFP_REPEAT)) + if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL)) goto nopage; if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index a56c3989f773..c50b1a14d55e 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -56,11 +56,11 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node) if (node_state(node, N_HIGH_MEMORY)) page = alloc_pages_node( - node, GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT, + node, GFP_KERNEL | __GFP_ZERO | __GFP_RETRY_MAYFAIL, get_order(size)); else page = alloc_pages( - GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT, + GFP_KERNEL | __GFP_ZERO | __GFP_RETRY_MAYFAIL, get_order(size)); if (page) return page_address(page); diff --git a/mm/util.c b/mm/util.c index 26be6407abd7..6520f2d4a226 100644 --- a/mm/util.c +++ b/mm/util.c @@ -339,7 +339,7 @@ EXPORT_SYMBOL(vm_mmap); * Uses kmalloc to get the memory but if the allocation fails then falls back * to the vmalloc allocator. Use kvfree for freeing the memory. * - * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported. __GFP_REPEAT + * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported. __GFP_RETRY_MAYFAIL * is supported only for large (>32kB) allocations, and it should be used only if * kmalloc is preferable to the vmalloc fallback, due to visible performance drawbacks. * @@ -367,11 +367,11 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node) kmalloc_flags |= __GFP_NOWARN; /* - * We have to override __GFP_REPEAT by __GFP_NORETRY for !costly + * We have to override __GFP_RETRY_MAYFAIL by __GFP_NORETRY for !costly * requests because there is no other way to tell the allocator * that we want to fail rather than retry endlessly. */ - if (!(kmalloc_flags & __GFP_REPEAT) || + if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL) || (size <= PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) kmalloc_flags |= __GFP_NORETRY; } diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 6016ab079e2b..8698c1c86c4d 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1795,7 +1795,7 @@ fail: * allocator with @gfp_mask flags. Map them into contiguous * kernel virtual space, using a pagetable protection of @prot. * - * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_REPEAT + * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL * and __GFP_NOFAIL are not supported * * Any use of gfp flags outside of GFP_KERNEL should be consulted diff --git a/mm/vmscan.c b/mm/vmscan.c index e9210f825219..a1af041930a6 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2506,18 +2506,18 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat, return false; /* Consider stopping depending on scan and reclaim activity */ - if (sc->gfp_mask & __GFP_REPEAT) { + if (sc->gfp_mask & __GFP_RETRY_MAYFAIL) { /* - * For __GFP_REPEAT allocations, stop reclaiming if the + * For __GFP_RETRY_MAYFAIL allocations, stop reclaiming if the * full LRU list has been scanned and we are still failing * to reclaim pages. This full LRU scan is potentially - * expensive but a __GFP_REPEAT caller really wants to succeed + * expensive but a __GFP_RETRY_MAYFAIL caller really wants to succeed */ if (!nr_reclaimed && !nr_scanned) return false; } else { /* - * For non-__GFP_REPEAT allocations which can presumably + * For non-__GFP_RETRY_MAYFAIL allocations which can presumably * fail without consequence, stop if we failed to reclaim * any pages from the last SWAP_CLUSTER_MAX number of * pages that were scanned. This will return to the diff --git a/net/core/dev.c b/net/core/dev.c index 02440518dd69..8515f8fe0460 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -7384,7 +7384,7 @@ static int netif_alloc_rx_queues(struct net_device *dev) BUG_ON(count < 1); - rx = kvzalloc(sz, GFP_KERNEL | __GFP_REPEAT); + rx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL); if (!rx) return -ENOMEM; @@ -7424,7 +7424,7 @@ static int netif_alloc_netdev_queues(struct net_device *dev) if (count < 1 || count > 0xffff) return -EINVAL; - tx = kvzalloc(sz, GFP_KERNEL | __GFP_REPEAT); + tx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL); if (!tx) return -ENOMEM; @@ -7965,7 +7965,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, /* ensure 32-byte alignment of whole construct */ alloc_size += NETDEV_ALIGN - 1; - p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_REPEAT); + p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL); if (!p) return NULL; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 8b11341ed69a..f990eb8b30a9 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -4747,7 +4747,7 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len, gfp_head = gfp_mask; if (gfp_head & __GFP_DIRECT_RECLAIM) - gfp_head |= __GFP_REPEAT; + gfp_head |= __GFP_RETRY_MAYFAIL; *errcode = -ENOBUFS; skb = alloc_skb(header_len, gfp_head); diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index 147fde73a0f5..263d16e3219e 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -648,7 +648,7 @@ static int fq_resize(struct Qdisc *sch, u32 log) return 0; /* If XPS was setup, we can allocate memory on right NUMA node */ - array = kvmalloc_node(sizeof(struct rb_root) << log, GFP_KERNEL | __GFP_REPEAT, + array = kvmalloc_node(sizeof(struct rb_root) << log, GFP_KERNEL | __GFP_RETRY_MAYFAIL, netdev_queue_numa_node_read(sch->dev_queue)); if (!array) return -ENOMEM; diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index 0a8a1c45af87..a1497c516d85 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -643,7 +643,7 @@ static const struct { { "__GFP_FS", "F" }, { "__GFP_COLD", "CO" }, { "__GFP_NOWARN", "NWR" }, - { "__GFP_REPEAT", "R" }, + { "__GFP_RETRY_MAYFAIL", "R" }, { "__GFP_NOFAIL", "NF" }, { "__GFP_NORETRY", "NR" }, { "__GFP_COMP", "C" }, -- cgit v1.2.3-59-g8ed1b From 91c63ecda75d3004da8ffd5d2590383e0ba47722 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Wed, 12 Jul 2017 14:36:49 -0700 Subject: xfs: map KM_MAYFAIL to __GFP_RETRY_MAYFAIL KM_MAYFAIL didn't have any suitable GFP_FOO counterpart until recently so it relied on the default page allocator behavior for the given set of flags. This means that small allocations actually never failed. Now that we have __GFP_RETRY_MAYFAIL flag which works independently on the allocation request size we can map KM_MAYFAIL to it. The allocator will try as hard as it can to fulfill the request but fails eventually if the progress cannot be made. It does so without triggering the OOM killer which can be seen as an improvement because KM_MAYFAIL users should be able to deal with allocation failures. Link: http://lkml.kernel.org/r/20170623085345.11304-4-mhocko@kernel.org Signed-off-by: Michal Hocko Cc: Darrick J. Wong Cc: Christoph Hellwig Cc: Alex Belits Cc: Chris Wilson Cc: David Daney Cc: Johannes Weiner Cc: Mel Gorman Cc: NeilBrown Cc: Ralf Baechle Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/xfs/kmem.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h index d6ea520162b2..4d85992d75b2 100644 --- a/fs/xfs/kmem.h +++ b/fs/xfs/kmem.h @@ -54,6 +54,16 @@ kmem_flags_convert(xfs_km_flags_t flags) lflags &= ~__GFP_FS; } + /* + * Default page/slab allocator behavior is to retry for ever + * for small allocations. We can override this behavior by using + * __GFP_RETRY_MAYFAIL which will tell the allocator to retry as long + * as it is feasible but rather fail than retry forever for all + * request sizes. + */ + if (flags & KM_MAYFAIL) + lflags |= __GFP_RETRY_MAYFAIL; + if (flags & KM_ZERO) lflags |= __GFP_ZERO; -- cgit v1.2.3-59-g8ed1b From cc965a29db172c28e25b9742db86a85766a08bf5 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Wed, 12 Jul 2017 14:36:52 -0700 Subject: mm: kvmalloc support __GFP_RETRY_MAYFAIL for all sizes Now that __GFP_RETRY_MAYFAIL has a reasonable semantic regardless of the request size we can drop the hackish implementation for !costly orders. __GFP_RETRY_MAYFAIL retries as long as the reclaim makes a forward progress and backs of when we are out of memory for the requested size. Therefore we do not need to enforce__GFP_NORETRY for !costly orders just to silent the oom killer anymore. Link: http://lkml.kernel.org/r/20170623085345.11304-5-mhocko@kernel.org Signed-off-by: Michal Hocko Acked-by: Vlastimil Babka Cc: Alex Belits Cc: Chris Wilson Cc: Christoph Hellwig Cc: Darrick J. Wong Cc: David Daney Cc: Johannes Weiner Cc: Mel Gorman Cc: NeilBrown Cc: Ralf Baechle Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/util.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/mm/util.c b/mm/util.c index 6520f2d4a226..ee250e2cde34 100644 --- a/mm/util.c +++ b/mm/util.c @@ -339,9 +339,9 @@ EXPORT_SYMBOL(vm_mmap); * Uses kmalloc to get the memory but if the allocation fails then falls back * to the vmalloc allocator. Use kvfree for freeing the memory. * - * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported. __GFP_RETRY_MAYFAIL - * is supported only for large (>32kB) allocations, and it should be used only if - * kmalloc is preferable to the vmalloc fallback, due to visible performance drawbacks. + * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported. + * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is + * preferable to the vmalloc fallback, due to visible performance drawbacks. * * Any use of gfp flags outside of GFP_KERNEL should be consulted with mm people. */ @@ -366,13 +366,7 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node) if (size > PAGE_SIZE) { kmalloc_flags |= __GFP_NOWARN; - /* - * We have to override __GFP_RETRY_MAYFAIL by __GFP_NORETRY for !costly - * requests because there is no other way to tell the allocator - * that we want to fail rather than retry endlessly. - */ - if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL) || - (size <= PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) + if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL)) kmalloc_flags |= __GFP_NORETRY; } -- cgit v1.2.3-59-g8ed1b From dbb329561ae9ccfb942b6ba330030a4654e8908e Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Wed, 12 Jul 2017 14:36:55 -0700 Subject: drm/i915: use __GFP_RETRY_MAYFAIL Commit 24f8e00a8a2e ("drm/i915: Prefer to report ENOMEM rather than incur the oom for gfx allocations") has tried to remove disruptive OOM killer because the userspace should be able to cope with allocation failures. At the time only __GFP_NORETRY could achieve that and it turned out that this would fail the allocations just too easily. So "drm/i915: Remove __GFP_NORETRY from our buffer allocator" removed it and hoped for a better solution. __GFP_RETRY_MAYFAIL is that solution. It will keep retrying the allocation until there is no more progress and we would go OOM. Instead we fail the allocation and let the caller to deal with it. Link: http://lkml.kernel.org/r/20170623085345.11304-6-mhocko@kernel.org Signed-off-by: Michal Hocko Cc: Chris Wilson Cc: Alex Belits Cc: Christoph Hellwig Cc: Darrick J. Wong Cc: David Daney Cc: Johannes Weiner Cc: Mel Gorman Cc: NeilBrown Cc: Ralf Baechle Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/gpu/drm/i915/i915_gem.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 7dcac3bfb771..969bac8404f1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2434,8 +2434,9 @@ rebuild_st: * again with !__GFP_NORETRY. However, we still * want to fail this allocation rather than * trigger the out-of-memory killer and for - * this we want the future __GFP_MAYFAIL. + * this we want __GFP_RETRY_MAYFAIL. */ + gfp |= __GFP_RETRY_MAYFAIL; } } while (1); -- cgit v1.2.3-59-g8ed1b From 0f55685627d6dd2beda55a82abc02297f0f8e5c2 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Wed, 12 Jul 2017 14:36:58 -0700 Subject: mm, migration: do not trigger OOM killer when migrating memory Page migration (for memory hotplug, soft_offline_page or mbind) needs to allocate a new memory. This can trigger an oom killer if the target memory is depleated. Although quite unlikely, still possible, especially for the memory hotplug (offlining of memoery). Up to now we didn't really have reasonable means to back off. __GFP_NORETRY can fail just too easily and __GFP_THISNODE sticks to a single node and that is not suitable for all callers. But now that we have __GFP_RETRY_MAYFAIL we should use it. It is preferable to fail the migration than disrupt the system by killing some processes. Link: http://lkml.kernel.org/r/20170623085345.11304-7-mhocko@kernel.org Signed-off-by: Michal Hocko Acked-by: Vlastimil Babka Cc: Alex Belits Cc: Chris Wilson Cc: Christoph Hellwig Cc: Darrick J. Wong Cc: David Daney Cc: Johannes Weiner Cc: Mel Gorman Cc: NeilBrown Cc: Ralf Baechle Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/migrate.h | 2 +- mm/mempolicy.c | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 4634da521238..3e0d405dc842 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -34,7 +34,7 @@ extern char *migrate_reason_names[MR_TYPES]; static inline struct page *new_page_nodemask(struct page *page, int preferred_nid, nodemask_t *nodemask) { - gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE; + gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL; if (PageHuge(page)) return alloc_huge_page_nodemask(page_hstate(compound_head(page)), diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 7d8e56214ac0..d911fa5cb2a7 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1078,7 +1078,8 @@ static struct page *new_page(struct page *page, unsigned long start, int **x) /* * if !vma, alloc_page_vma() will use task or system default policy */ - return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); + return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL, + vma, address); } #else -- cgit v1.2.3-59-g8ed1b From 596ed45b5b5b7e4624c813ddeffe0e100f8b13ba Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 12 Jul 2017 14:37:02 -0700 Subject: checkpatch: improve the STORAGE_CLASS test Make sure static, extern, and asmlinkage appear before a specific type. e.g.: int asmlinkage foo(void) is better written asmlinkage int foo(void) Link: http://lkml.kernel.org/r/31704c96df2d5fd9df0b41165940a7a4feb16a63.1499284835.git.joe@perches.com Signed-off-by: Joe Perches Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- scripts/checkpatch.pl | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 8f940c09918f..2287a0bca863 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -5576,10 +5576,18 @@ sub process { "architecture specific defines should be avoided\n" . $herecurr); } +# check that the storage class is not after a type + if ($line =~ /\b($Type)\s+($Storage)\b/) { + WARN("STORAGE_CLASS", + "storage class '$2' should be located before type '$1'\n" . $herecurr); + } # Check that the storage class is at the beginning of a declaration - if ($line =~ /\b$Storage\b/ && $line !~ /^.\s*$Storage\b/) { + if ($line =~ /\b$Storage\b/ && + $line !~ /^.\s*$Storage/ && + $line =~ /^.\s*(.+?)\$Storage\s/ && + $1 !~ /[\,\)]\s*$/) { WARN("STORAGE_CLASS", - "storage class should be at the beginning of the declaration\n" . $herecurr) + "storage class should be at the beginning of the declaration\n" . $herecurr); } # check the location of the inline attribute, that it is between -- cgit v1.2.3-59-g8ed1b From ead9fba6b8e9a81107807f5a221576cb01206a6e Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 12 Jul 2017 14:37:05 -0700 Subject: ARM: KVM: move asmlinkage before type asmlinkage is either 'extern "C"' or blank. Move the uses of asmlinkage before the return types to be similar to the rest of the kernel. Link: http://lkml.kernel.org/r/005b8e120650c6a13b541e420f4e3605603fe9e6.1499284835.git.joe@perches.com Signed-off-by: Joe Perches Cc: Christoffer Dall Cc: Marc Zyngier Cc: Paolo Bonzini Cc: Radim Krcmar Cc: Russell King Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/include/asm/kvm_hyp.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h index 58508900c4bb..14b5903f0224 100644 --- a/arch/arm/include/asm/kvm_hyp.h +++ b/arch/arm/include/asm/kvm_hyp.h @@ -110,8 +110,8 @@ void __sysreg_restore_state(struct kvm_cpu_context *ctxt); void __vgic_v3_save_state(struct kvm_vcpu *vcpu); void __vgic_v3_restore_state(struct kvm_vcpu *vcpu); -void asmlinkage __vfp_save_state(struct vfp_hard_struct *vfp); -void asmlinkage __vfp_restore_state(struct vfp_hard_struct *vfp); +asmlinkage void __vfp_save_state(struct vfp_hard_struct *vfp); +asmlinkage void __vfp_restore_state(struct vfp_hard_struct *vfp); static inline bool __vfp_enabled(void) { return !(read_sysreg(HCPTR) & (HCPTR_TCP(11) | HCPTR_TCP(10))); @@ -120,8 +120,8 @@ static inline bool __vfp_enabled(void) void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt); void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt); -int asmlinkage __guest_enter(struct kvm_vcpu *vcpu, +asmlinkage int __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host); -int asmlinkage __hyp_do_panic(const char *, int, u32); +asmlinkage int __hyp_do_panic(const char *, int, u32); #endif /* __ARM_KVM_HYP_H__ */ -- cgit v1.2.3-59-g8ed1b From 1e90d0ed327a289c6fee4f36b25f67d3d7864a43 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 12 Jul 2017 14:37:08 -0700 Subject: ARM: HP Jornada 7XX: move inline before return type Convert 'u8 inline' to 'inline u8' to be the same style used by the rest of the kernel. Miscellanea: jornada_ssp_reverse is an odd function. It is declared inline but is also EXPORT_SYMBOL. It is also apparently only used by jornada720_ssp.c Likely the EXPORT_SYMBOL could be removed and the function converted to static. The addition of static and removal of EXPORT_SYMBOL was not done. Link: http://lkml.kernel.org/r/5bd3b2bf39c6c9caf773949f18158f8f5ec08582.1499284835.git.joe@perches.com Signed-off-by: Joe Perches Cc: Russell King Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/mach-sa1100/jornada720_ssp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/mach-sa1100/jornada720_ssp.c b/arch/arm/mach-sa1100/jornada720_ssp.c index b143c4659346..7fc11a3c17b4 100644 --- a/arch/arm/mach-sa1100/jornada720_ssp.c +++ b/arch/arm/mach-sa1100/jornada720_ssp.c @@ -33,7 +33,7 @@ static unsigned long jornada_ssp_flags; * we need to reverse all data we receive from the mcu due to its physical location * returns : 01110111 -> 11101110 */ -u8 inline jornada_ssp_reverse(u8 byte) +inline u8 jornada_ssp_reverse(u8 byte) { return ((0x80 & byte) >> 7) | -- cgit v1.2.3-59-g8ed1b From 8d95a3dca060b09ddafa4cd1c82f27ab19490fad Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 12 Jul 2017 14:37:10 -0700 Subject: CRIS: gpio: move inline before return type Move inline to be like the rest of the kernel. Link: http://lkml.kernel.org/r/6bf1bec049897c4158f698b866810f47c728f233.1499284835.git.joe@perches.com Signed-off-by: Joe Perches Cc: Mikael Starvik Cc: Jesper Nilsson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/cris/arch-v10/drivers/gpio.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/cris/arch-v10/drivers/gpio.c b/arch/cris/arch-v10/drivers/gpio.c index 64285e0d3481..dfd3b3ba5e4e 100644 --- a/arch/cris/arch-v10/drivers/gpio.c +++ b/arch/cris/arch-v10/drivers/gpio.c @@ -399,7 +399,7 @@ out: /* Main device API. ioctl's to read/set/clear bits, as well as to * set alarms to wait for using a subsequent select(). */ -unsigned long inline setget_input(struct gpio_private *priv, unsigned long arg) +inline unsigned long setget_input(struct gpio_private *priv, unsigned long arg) { /* Set direction 0=unchanged 1=input, * return mask with 1=input */ @@ -450,7 +450,7 @@ unsigned long inline setget_input(struct gpio_private *priv, unsigned long arg) return dir_g_in_bits; } /* setget_input */ -unsigned long inline setget_output(struct gpio_private *priv, unsigned long arg) +inline unsigned long setget_output(struct gpio_private *priv, unsigned long arg) { if (USE_PORTS(priv)) { *priv->dir = *priv->dir_shadow |= -- cgit v1.2.3-59-g8ed1b From 1d731bb77247b9456680f379c31c30f9774473ce Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 12 Jul 2017 14:37:13 -0700 Subject: FRV: tlbflush: move asmlinkage before return type Make the use of asmlinkage like the rest of the kernel. Link: http://lkml.kernel.org/r/efb2dfed4d9315bf68ec0334c81b65af176a0174.1499284835.git.joe@perches.com Signed-off-by: Joe Perches Cc: David Howells Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/frv/include/asm/tlbflush.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/frv/include/asm/tlbflush.h b/arch/frv/include/asm/tlbflush.h index 7ac5eafc5d98..75879420f578 100644 --- a/arch/frv/include/asm/tlbflush.h +++ b/arch/frv/include/asm/tlbflush.h @@ -18,10 +18,10 @@ #ifdef CONFIG_MMU #ifndef __ASSEMBLY__ -extern void asmlinkage __flush_tlb_all(void); -extern void asmlinkage __flush_tlb_mm(unsigned long contextid); -extern void asmlinkage __flush_tlb_page(unsigned long contextid, unsigned long start); -extern void asmlinkage __flush_tlb_range(unsigned long contextid, +extern asmlinkage void __flush_tlb_all(void); +extern asmlinkage void __flush_tlb_mm(unsigned long contextid); +extern asmlinkage void __flush_tlb_page(unsigned long contextid, unsigned long start); +extern asmlinkage void __flush_tlb_range(unsigned long contextid, unsigned long start, unsigned long end); #endif /* !__ASSEMBLY__ */ -- cgit v1.2.3-59-g8ed1b From d778931d7b32ada39de0a45267b0c4af0600e277 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 12 Jul 2017 14:37:16 -0700 Subject: ia64: move inline before return type Make the use of inline like the rest of the kernel. Link: http://lkml.kernel.org/r/d47074493af80ce12590340294bc49618165c30d.1499284835.git.joe@perches.com Signed-off-by: Joe Perches Cc: Tony Luck Cc: Fenghua Yu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/kernel/mca.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 79c7c46d7dc1..555b11180156 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -334,7 +334,7 @@ static void ia64_mlogbuf_dump_from_init(void) ia64_mlogbuf_dump(); } -static void inline +static inline void ia64_mca_spin(const char *func) { if (monarch_cpu == smp_processor_id()) -- cgit v1.2.3-59-g8ed1b From c02f2a911f2bb833f982d869e3a4ae67e1468969 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 12 Jul 2017 14:37:19 -0700 Subject: ia64: sn: pci: move inline before type Make the use of inline like the rest of the kernel. Link: http://lkml.kernel.org/r/f42b2202bd0d4e7ccf79ce5348bb255a035e67bb.1499284835.git.joe@perches.com Signed-off-by: Joe Perches Cc: Tony Luck Cc: Fenghua Yu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/sn/pci/pcibr/pcibr_ate.c | 2 +- arch/ia64/sn/pci/tioce_provider.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/ia64/sn/pci/pcibr/pcibr_ate.c b/arch/ia64/sn/pci/pcibr/pcibr_ate.c index 5bc34eac9e01..b67bb4cb73ff 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_ate.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_ate.c @@ -140,7 +140,7 @@ static inline u64 __iomem *pcibr_ate_addr(struct pcibus_info *pcibus_info, /* * Update the ate. */ -void inline +inline void ate_write(struct pcibus_info *pcibus_info, int ate_index, int count, volatile u64 ate) { diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c index 46d3df4b03a1..3bd9abc35485 100644 --- a/arch/ia64/sn/pci/tioce_provider.c +++ b/arch/ia64/sn/pci/tioce_provider.c @@ -52,7 +52,7 @@ * All registers defined in struct tioce will meet that criteria. */ -static void inline +static inline void tioce_mmr_war_pre(struct tioce_kernel *kern, void __iomem *mmr_addr) { u64 mmr_base; @@ -78,7 +78,7 @@ tioce_mmr_war_pre(struct tioce_kernel *kern, void __iomem *mmr_addr) } } -static void inline +static inline void tioce_mmr_war_post(struct tioce_kernel *kern, void __iomem *mmr_addr) { u64 mmr_base; -- cgit v1.2.3-59-g8ed1b From 9d8a9ae2818fa44099195146a7fa85963409a2e9 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 12 Jul 2017 14:37:22 -0700 Subject: m68k: coldfire: move inline before return type Make the code like the rest of the kernel. Link: http://lkml.kernel.org/r/14db9c166d5b68efa77e337cfe49bb9b29bca3f7.1499284835.git.joe@perches.com Signed-off-by: Joe Perches Acked-by: Greg Ungerer Cc: Geert Uytterhoeven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/m68k/coldfire/intc-simr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/m68k/coldfire/intc-simr.c b/arch/m68k/coldfire/intc-simr.c index 7cf2c156f72d..15c4b7a6e38f 100644 --- a/arch/m68k/coldfire/intc-simr.c +++ b/arch/m68k/coldfire/intc-simr.c @@ -35,7 +35,7 @@ #define EINT7 67 /* EDGE Port interrupt 7 */ static unsigned int irqebitmap[] = { 0, 1, 4, 7 }; -static unsigned int inline irq2ebit(unsigned int irq) +static inline unsigned int irq2ebit(unsigned int irq) { return irqebitmap[irq - EINT0]; } @@ -51,7 +51,7 @@ static unsigned int inline irq2ebit(unsigned int irq) #define EINT1 65 /* EDGE Port interrupt 1 */ #define EINT7 71 /* EDGE Port interrupt 7 */ -static unsigned int inline irq2ebit(unsigned int irq) +static inline unsigned int irq2ebit(unsigned int irq) { return irq - EINT0; } -- cgit v1.2.3-59-g8ed1b From b745fcb949f90762e7e126737683e2a26a4e8035 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 12 Jul 2017 14:37:25 -0700 Subject: MIPS: SMP: move asmlinkage before return type Make the code like the rest of the kernel. Link: http://lkml.kernel.org/r/756d3fb543e981b9284e756fa27616725a354b28.1499284835.git.joe@perches.com Signed-off-by: Joe Perches Cc: Ralf Baechle Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/mips/include/asm/smp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index 98a117a05fbc..bab3d41e5987 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h @@ -47,7 +47,7 @@ extern int __cpu_logical_map[NR_CPUS]; /* Mask of CPUs which are currently definitely operating coherently */ extern cpumask_t cpu_coherent_mask; -extern void asmlinkage smp_bootstrap(void); +extern asmlinkage void smp_bootstrap(void); extern void calculate_cpu_foreign_map(void); -- cgit v1.2.3-59-g8ed1b From 0cef25c1d89fb617683fc76645bb27ab297f2a1e Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 12 Jul 2017 14:37:28 -0700 Subject: sh: move inline before return type Make the code like the rest of the kernel. Link: http://lkml.kernel.org/r/f81bb2a67a97b1fd8b6ea99bd350d8a0f6864fb1.1499284835.git.joe@perches.com Signed-off-by: Joe Perches Cc: Yoshinori Sato Cc: Rich Felker Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/sh/mm/cache-sh5.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c index d94dadedf74f..445b5e69b73c 100644 --- a/arch/sh/mm/cache-sh5.c +++ b/arch/sh/mm/cache-sh5.c @@ -234,7 +234,7 @@ static void sh64_icache_inv_current_user_range(unsigned long start, unsigned lon #define DUMMY_ALLOCO_AREA_SIZE ((L1_CACHE_BYTES << 10) + (1024 * 4)) static unsigned char dummy_alloco_area[DUMMY_ALLOCO_AREA_SIZE] __cacheline_aligned = { 0, }; -static void inline sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets) +static inline void sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets) { /* Purge all ways in a particular block of sets, specified by the base set number and number of sets. Can handle wrap-around, if that's -- cgit v1.2.3-59-g8ed1b From 0825f49f225bd24c3ae46ff2bb716067e3684fc3 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 12 Jul 2017 14:37:31 -0700 Subject: x86/efi: move asmlinkage before return type Make the code like the rest of the kernel. Link: http://lkml.kernel.org/r/1cd3d401626e51ea0e2333a860e76e80bc560a4c.1499284835.git.joe@perches.com Signed-off-by: Joe Perches Cc: Matt Fleming Cc: Ard Biesheuvel Cc: Thomas Gleixner Cc: "H. Peter Anvin" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/include/asm/efi.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index d2ff779f347e..796ff6c1aa53 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -33,7 +33,7 @@ #ifdef CONFIG_X86_32 -extern unsigned long asmlinkage efi_call_phys(void *, ...); +extern asmlinkage unsigned long efi_call_phys(void *, ...); #define arch_efi_call_virt_setup() kernel_fpu_begin() #define arch_efi_call_virt_teardown() kernel_fpu_end() @@ -52,7 +52,7 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...); #define EFI_LOADER_SIGNATURE "EL64" -extern u64 asmlinkage efi_call(void *fp, ...); +extern asmlinkage u64 efi_call(void *fp, ...); #define efi_call_phys(f, args...) efi_call((f), args) -- cgit v1.2.3-59-g8ed1b From e0710e510c59602e4fa80d784c946e02e8968523 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 12 Jul 2017 14:37:34 -0700 Subject: drivers: s390: move static and inline before return type Make the code like the rest of the kernel. Link: http://lkml.kernel.org/r/3f980cd89084ae09716353aba3171e4b3815e690.1499284835.git.joe@perches.com Signed-off-by: Joe Perches Cc: Julian Wiedmann Cc: Ursula Braun Cc: Martin Schwidefsky Cc: Heiko Carstens Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/s390/net/ctcm_main.c | 2 +- drivers/s390/net/qeth_l3_main.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index 1563b1458e44..2ade6131a89f 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c @@ -1115,7 +1115,7 @@ static const struct net_device_ops ctcm_mpc_netdev_ops = { .ndo_start_xmit = ctcmpc_tx, }; -void static ctcm_dev_setup(struct net_device *dev) +static void ctcm_dev_setup(struct net_device *dev) { dev->type = ARPHRD_SLIP; dev->tx_queue_len = 100; diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 3062cde33a3d..8975cd321390 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2408,7 +2408,7 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return rc; } -int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb) +inline int qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb) { int cast_type = RTN_UNSPEC; struct neighbour *n = NULL; -- cgit v1.2.3-59-g8ed1b From a9e5bfdb9d786efbc6995edfe6902788527457ad Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 12 Jul 2017 14:37:37 -0700 Subject: drivers: tty: serial: move inline before return type Make the code like the rest of the kernel. Link: http://lkml.kernel.org/r/55d3e89d50bb03d603bfb28019fab07f48bdc714.1499284835.git.joe@perches.com Signed-off-by: Joe Perches Cc: Pat Gefre Cc: Greg Kroah-Hartman Cc: Jiri Slaby Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/tty/serial/ioc3_serial.c | 4 ++-- drivers/tty/serial/ioc4_serial.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/tty/serial/ioc3_serial.c b/drivers/tty/serial/ioc3_serial.c index 2a61dd6b4009..906ee770ff4a 100644 --- a/drivers/tty/serial/ioc3_serial.c +++ b/drivers/tty/serial/ioc3_serial.c @@ -377,7 +377,7 @@ static struct ioc3_port *get_ioc3_port(struct uart_port *the_port) * called per port from attach... * @port: port to initialize */ -static int inline port_init(struct ioc3_port *port) +static inline int port_init(struct ioc3_port *port) { uint32_t sio_cr; struct port_hooks *hooks = port->ip_hooks; @@ -1430,7 +1430,7 @@ static int receive_chars(struct uart_port *the_port) * @pending: interrupts to handle */ -static int inline +static inline int ioc3uart_intr_one(struct ioc3_submodule *is, struct ioc3_driver_data *idd, unsigned int pending) diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c index f96bcf9bee25..43d7d32eb150 100644 --- a/drivers/tty/serial/ioc4_serial.c +++ b/drivers/tty/serial/ioc4_serial.c @@ -824,7 +824,7 @@ pending_intrs(struct ioc4_soft *soft, int type) * called per port from attach... * @port: port to initialize */ -static int inline port_init(struct ioc4_port *port) +static inline int port_init(struct ioc4_port *port) { uint32_t sio_cr; struct hooks *hooks = port->ip_hooks; @@ -1048,7 +1048,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg) * IOC4 with serial ports in the system. * @idd: Master module data for this IOC4 */ -static int inline ioc4_attach_local(struct ioc4_driver_data *idd) +static inline int ioc4_attach_local(struct ioc4_driver_data *idd) { struct ioc4_port *port; struct ioc4_port *ports[IOC4_NUM_SERIAL_PORTS]; -- cgit v1.2.3-59-g8ed1b From 4abf87f41a6cedf7a780af63991d6bebec303be0 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 12 Jul 2017 14:37:40 -0700 Subject: USB: serial: safe_serial: move __inline__ before return type Make the code like the rest of the kernel. Also use inline instead of __inline__. Link: http://lkml.kernel.org/r/a5072b74b6c293e6ec93c4900482e9d3267f15b2.1499284835.git.joe@perches.com Signed-off-by: Joe Perches Cc: Johan Hovold Cc: Greg Kroah-Hartman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/usb/serial/safe_serial.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c index 8a069aa154ed..27d7a7016298 100644 --- a/drivers/usb/serial/safe_serial.c +++ b/drivers/usb/serial/safe_serial.c @@ -180,7 +180,7 @@ static const __u16 crc10_table[256] = { * Perform a memcpy and calculate fcs using ppp 10bit CRC algorithm. Return * new 10 bit FCS. */ -static __u16 __inline__ fcs_compute10(unsigned char *sp, int len, __u16 fcs) +static inline __u16 fcs_compute10(unsigned char *sp, int len, __u16 fcs) { for (; len-- > 0; fcs = CRC10_FCS(fcs, *sp++)); return fcs; -- cgit v1.2.3-59-g8ed1b From dce3944717523bfe65e2b3cbd1d3af0ccc27ace9 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 12 Jul 2017 14:37:43 -0700 Subject: video: fbdev: intelfb: move inline before return type Make the code like the rest of the kernel. But there is an oddity here because the inline should probably be removed. It's an extern function in intelfb.h and it is used in intelfbdrv.c and intelfbhw.c. The inline is kept here as I suppose it's possible for some compiler to make the uses inline in intelfbdrv and and also create an external function for intelfbhw. Link: http://lkml.kernel.org/r/8ba151a1fdc84e42cbf4aafc798513c0158edee1.1499284835.git.joe@perches.com Signed-off-by: Joe Perches Cc: Maik Broemme Cc: Bartlomiej Zolnierkiewicz Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/video/fbdev/intelfb/intelfbdrv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c index 6b444400a86c..ffc391208b27 100644 --- a/drivers/video/fbdev/intelfb/intelfbdrv.c +++ b/drivers/video/fbdev/intelfb/intelfbdrv.c @@ -907,7 +907,7 @@ static void intelfb_pci_unregister(struct pci_dev *pdev) * helper functions * ***************************************************************/ -int __inline__ intelfb_var_to_depth(const struct fb_var_screeninfo *var) +__inline__ int intelfb_var_to_depth(const struct fb_var_screeninfo *var) { DBG_MSG("intelfb_var_to_depth: bpp: %d, green.length is %d\n", var->bits_per_pixel, var->green.length); -- cgit v1.2.3-59-g8ed1b From ba168a46b0b9e06f38f63ef9adf182950c82d6e0 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 12 Jul 2017 14:37:46 -0700 Subject: video: fbdev: omap: move inline before return type Make the code like the rest of the kernel. Link: http://lkml.kernel.org/r/bc5927726abc70d7c066df7ab4cb7cfce4a7b577.1499284835.git.joe@perches.com Signed-off-by: Joe Perches Cc: Tomi Valkeinen Cc: Bartlomiej Zolnierkiewicz Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/video/fbdev/omap/lcdc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/video/fbdev/omap/lcdc.c b/drivers/video/fbdev/omap/lcdc.c index e3d9b9ea5498..938cba0d24ae 100644 --- a/drivers/video/fbdev/omap/lcdc.c +++ b/drivers/video/fbdev/omap/lcdc.c @@ -79,12 +79,12 @@ static struct omap_lcd_controller { unsigned long vram_size; } lcdc; -static void inline enable_irqs(int mask) +static inline void enable_irqs(int mask) { lcdc.irq_mask |= mask; } -static void inline disable_irqs(int mask) +static inline void disable_irqs(int mask) { lcdc.irq_mask &= ~mask; } @@ -466,7 +466,7 @@ static void calc_ck_div(int is_tft, int pck, int *pck_div) } } -static void inline setup_regs(void) +static inline void setup_regs(void) { u32 l; struct lcd_panel *panel = lcdc.fbdev->panel; -- cgit v1.2.3-59-g8ed1b From c945dccc80856107f109c36a7d0e29a371b5d1b5 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 12 Jul 2017 14:37:48 -0700 Subject: ARM: samsung: usb-ohci: move inline before return type Make the code like the rest of the kernel. Link: http://lkml.kernel.org/r/667a515b8d0f10f2465d519f8595edd91552fc5e.1499284835.git.joe@perches.com Signed-off-by: Joe Perches Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/platform_data/usb-ohci-s3c2410.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/platform_data/usb-ohci-s3c2410.h b/include/linux/platform_data/usb-ohci-s3c2410.h index 7fa1fbefc3f2..cc7554ae6e8b 100644 --- a/include/linux/platform_data/usb-ohci-s3c2410.h +++ b/include/linux/platform_data/usb-ohci-s3c2410.h @@ -31,7 +31,7 @@ struct s3c2410_hcd_info { void (*report_oc)(struct s3c2410_hcd_info *, int ports); }; -static void inline s3c2410_usb_report_oc(struct s3c2410_hcd_info *info, int ports) +static inline void s3c2410_usb_report_oc(struct s3c2410_hcd_info *info, int ports) { if (info->report_oc != NULL) { (info->report_oc)(info, ports); -- cgit v1.2.3-59-g8ed1b From 3e8f399da490e6ac20a3cfd6aa404c9aa961a9a2 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Wed, 12 Jul 2017 14:37:51 -0700 Subject: writeback: rework wb_[dec|inc]_stat family of functions Currently the writeback statistics code uses a percpu counters to hold various statistics. Furthermore we have 2 families of functions - those which disable local irq and those which doesn't and whose names begin with double underscore. However, they both end up calling __add_wb_stats which in turn calls percpu_counter_add_batch which is already irq-safe. Exploiting this fact allows to eliminated the __wb_* functions since they don't add any further protection than we already have. Furthermore, refactor the wb_* function to call __add_wb_stat directly without the irq-disabling dance. This will likely result in better runtime of code which deals with modifying the stat counters. While at it also document why percpu_counter_add_batch is in fact preempt and irq-safe since at least 3 people got confused. Link: http://lkml.kernel.org/r/1498029937-27293-1-git-send-email-nborisov@suse.com Signed-off-by: Nikolay Borisov Acked-by: Tejun Heo Reviewed-by: Jan Kara Cc: Josef Bacik Cc: Mel Gorman Cc: Jeff Layton Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/fs-writeback.c | 8 ++++---- include/linux/backing-dev.h | 24 ++---------------------- lib/percpu_counter.c | 7 +++++++ mm/page-writeback.c | 10 +++++----- 4 files changed, 18 insertions(+), 31 deletions(-) diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 8b426f83909f..245c430a2e41 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -380,8 +380,8 @@ static void inode_switch_wbs_work_fn(struct work_struct *work) struct page *page = radix_tree_deref_slot_protected(slot, &mapping->tree_lock); if (likely(page) && PageDirty(page)) { - __dec_wb_stat(old_wb, WB_RECLAIMABLE); - __inc_wb_stat(new_wb, WB_RECLAIMABLE); + dec_wb_stat(old_wb, WB_RECLAIMABLE); + inc_wb_stat(new_wb, WB_RECLAIMABLE); } } @@ -391,8 +391,8 @@ static void inode_switch_wbs_work_fn(struct work_struct *work) &mapping->tree_lock); if (likely(page)) { WARN_ON_ONCE(!PageWriteback(page)); - __dec_wb_stat(old_wb, WB_WRITEBACK); - __inc_wb_stat(new_wb, WB_WRITEBACK); + dec_wb_stat(old_wb, WB_WRITEBACK); + inc_wb_stat(new_wb, WB_WRITEBACK); } } diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 334165c911f0..854e1bdd0b2a 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -69,34 +69,14 @@ static inline void __add_wb_stat(struct bdi_writeback *wb, percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH); } -static inline void __inc_wb_stat(struct bdi_writeback *wb, - enum wb_stat_item item) -{ - __add_wb_stat(wb, item, 1); -} - static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) { - unsigned long flags; - - local_irq_save(flags); - __inc_wb_stat(wb, item); - local_irq_restore(flags); -} - -static inline void __dec_wb_stat(struct bdi_writeback *wb, - enum wb_stat_item item) -{ - __add_wb_stat(wb, item, -1); + __add_wb_stat(wb, item, 1); } static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) { - unsigned long flags; - - local_irq_save(flags); - __dec_wb_stat(wb, item); - local_irq_restore(flags); + __add_wb_stat(wb, item, -1); } static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 8ee7e5ec21be..3bf4a9984f4c 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -72,6 +72,13 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount) } EXPORT_SYMBOL(percpu_counter_set); +/** + * This function is both preempt and irq safe. The former is due to explicit + * preemption disable. The latter is guaranteed by the fact that the slow path + * is explicitly protected by an irq-safe spinlock whereas the fast patch uses + * this_cpu_add which is irq-safe by definition. Hence there is no need muck + * with irq state before calling this one + */ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) { s64 count; diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 0b60cc7ddac2..96e93b214d31 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -601,7 +601,7 @@ static inline void __wb_writeout_inc(struct bdi_writeback *wb) { struct wb_domain *cgdom; - __inc_wb_stat(wb, WB_WRITTEN); + inc_wb_stat(wb, WB_WRITTEN); wb_domain_writeout_inc(&global_wb_domain, &wb->completions, wb->bdi->max_prop_frac); @@ -2435,8 +2435,8 @@ void account_page_dirtied(struct page *page, struct address_space *mapping) __inc_lruvec_page_state(page, NR_FILE_DIRTY); __inc_zone_page_state(page, NR_ZONE_WRITE_PENDING); __inc_node_page_state(page, NR_DIRTIED); - __inc_wb_stat(wb, WB_RECLAIMABLE); - __inc_wb_stat(wb, WB_DIRTIED); + inc_wb_stat(wb, WB_RECLAIMABLE); + inc_wb_stat(wb, WB_DIRTIED); task_io_account_write(PAGE_SIZE); current->nr_dirtied++; this_cpu_inc(bdp_ratelimits); @@ -2741,7 +2741,7 @@ int test_clear_page_writeback(struct page *page) if (bdi_cap_account_writeback(bdi)) { struct bdi_writeback *wb = inode_to_wb(inode); - __dec_wb_stat(wb, WB_WRITEBACK); + dec_wb_stat(wb, WB_WRITEBACK); __wb_writeout_inc(wb); } } @@ -2786,7 +2786,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write) page_index(page), PAGECACHE_TAG_WRITEBACK); if (bdi_cap_account_writeback(bdi)) - __inc_wb_stat(inode_to_wb(inode), WB_WRITEBACK); + inc_wb_stat(inode_to_wb(inode), WB_WRITEBACK); /* * We can come through here when swapping anonymous -- cgit v1.2.3-59-g8ed1b