From f303fccb82928790ec58eea82722bd5c54d300b3 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 9 Feb 2016 17:59:38 -0500 Subject: workqueue: implement "workqueue.debug_force_rr_cpu" debug feature Workqueue used to guarantee local execution for work items queued without explicit target CPU. The guarantee is gone now which can break some usages in subtle ways. To flush out those cases, this patch implements a debug feature which forces round-robin CPU selection for all such work items. The debug feature defaults to off and can be enabled with a kernel parameter. The default can be flipped with a debug config option. If you hit this commit during bisection, please refer to 041bd12e272c ("Revert "workqueue: make sure delayed work run in local cpu"") for more information and ping me. Signed-off-by: Tejun Heo --- lib/Kconfig.debug | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'lib/Kconfig.debug') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index ecb9e75614bf..8bfd1aca7a3d 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1400,6 +1400,21 @@ config RCU_EQS_DEBUG endmenu # "RCU Debugging" +config DEBUG_WQ_FORCE_RR_CPU + bool "Force round-robin CPU selection for unbound work items" + depends on DEBUG_KERNEL + default n + help + Workqueue used to implicitly guarantee that work items queued + without explicit CPU specified are put on the local CPU. This + guarantee is no longer true and while local CPU is still + preferred work items may be put on foreign CPUs. Kernel + parameter "workqueue.debug_force_rr_cpu" is added to force + round-robin CPU selection to flush out usages which depend on the + now broken guarantee. This config option enables the debug + feature by default. When enabled, memory and cache locality will + be impacted. + config DEBUG_BLOCK_EXT_DEVT bool "Force extended block device numbers and spread them" depends on DEBUG_KERNEL -- cgit v1.3-14-g43fede From 5fd003f56c2c584b62a0486ad25bbd4be02b8b6c Mon Sep 17 00:00:00 2001 From: David Decotigny Date: Fri, 19 Feb 2016 09:24:00 -0500 Subject: test_bitmap: unit tests for lib/bitmap.c This is mainly testing bitmap construction and conversion to/from u32[] for now. Tested: qemu i386, x86_64, ppc, ppc64 BE and LE, ARM. Signed-off-by: David Decotigny Signed-off-by: David S. Miller --- lib/Kconfig.debug | 8 + lib/Makefile | 1 + lib/test_bitmap.c | 358 ++++++++++++++++++++++++++++++++++ tools/testing/selftests/lib/Makefile | 2 +- tools/testing/selftests/lib/bitmap.sh | 10 + 5 files changed, 378 insertions(+), 1 deletion(-) create mode 100644 lib/test_bitmap.c create mode 100755 tools/testing/selftests/lib/bitmap.sh (limited to 'lib/Kconfig.debug') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index ecb9e75614bf..f890ee5e1385 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1738,6 +1738,14 @@ config TEST_KSTRTOX config TEST_PRINTF tristate "Test printf() family of functions at runtime" +config TEST_BITMAP + tristate "Test bitmap_*() family of functions at runtime" + default n + help + Enable this option to test the bitmap functions at boot. + + If unsure, say N. + config TEST_RHASHTABLE tristate "Perform selftest on resizable hash table" default n diff --git a/lib/Makefile b/lib/Makefile index a7c26a41a738..dda4039588b1 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -43,6 +43,7 @@ obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o obj-$(CONFIG_TEST_PRINTF) += test_printf.o +obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o ifeq ($(CONFIG_DEBUG_KOBJECT),y) CFLAGS_kobject.o += -DDEBUG diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c new file mode 100644 index 000000000000..e2cbd43d193c --- /dev/null +++ b/lib/test_bitmap.c @@ -0,0 +1,358 @@ +/* + * Test cases for printf facility. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include + +static unsigned total_tests __initdata; +static unsigned failed_tests __initdata; + +static char pbl_buffer[PAGE_SIZE] __initdata; + + +static bool __init +__check_eq_uint(const char *srcfile, unsigned int line, + const unsigned int exp_uint, unsigned int x) +{ + if (exp_uint != x) { + pr_warn("[%s:%u] expected %u, got %u\n", + srcfile, line, exp_uint, x); + return false; + } + return true; +} + + +static bool __init +__check_eq_bitmap(const char *srcfile, unsigned int line, + const unsigned long *exp_bmap, unsigned int exp_nbits, + const unsigned long *bmap, unsigned int nbits) +{ + if (exp_nbits != nbits) { + pr_warn("[%s:%u] bitmap length mismatch: expected %u, got %u\n", + srcfile, line, exp_nbits, nbits); + return false; + } + + if (!bitmap_equal(exp_bmap, bmap, nbits)) { + pr_warn("[%s:%u] bitmaps contents differ: expected \"%*pbl\", got \"%*pbl\"\n", + srcfile, line, + exp_nbits, exp_bmap, nbits, bmap); + return false; + } + return true; +} + +static bool __init +__check_eq_pbl(const char *srcfile, unsigned int line, + const char *expected_pbl, + const unsigned long *bitmap, unsigned int nbits) +{ + snprintf(pbl_buffer, sizeof(pbl_buffer), "%*pbl", nbits, bitmap); + if (strcmp(expected_pbl, pbl_buffer)) { + pr_warn("[%s:%u] expected \"%s\", got \"%s\"\n", + srcfile, line, + expected_pbl, pbl_buffer); + return false; + } + return true; +} + +static bool __init +__check_eq_u32_array(const char *srcfile, unsigned int line, + const u32 *exp_arr, unsigned int exp_len, + const u32 *arr, unsigned int len) +{ + if (exp_len != len) { + pr_warn("[%s:%u] array length differ: expected %u, got %u\n", + srcfile, line, + exp_len, len); + return false; + } + + if (memcmp(exp_arr, arr, len*sizeof(*arr))) { + pr_warn("[%s:%u] array contents differ\n", srcfile, line); + print_hex_dump(KERN_WARNING, " exp: ", DUMP_PREFIX_OFFSET, + 32, 4, exp_arr, exp_len*sizeof(*exp_arr), false); + print_hex_dump(KERN_WARNING, " got: ", DUMP_PREFIX_OFFSET, + 32, 4, arr, len*sizeof(*arr), false); + return false; + } + + return true; +} + +#define __expect_eq(suffix, ...) \ + ({ \ + int result = 0; \ + total_tests++; \ + if (!__check_eq_ ## suffix(__FILE__, __LINE__, \ + ##__VA_ARGS__)) { \ + failed_tests++; \ + result = 1; \ + } \ + result; \ + }) + +#define expect_eq_uint(...) __expect_eq(uint, ##__VA_ARGS__) +#define expect_eq_bitmap(...) __expect_eq(bitmap, ##__VA_ARGS__) +#define expect_eq_pbl(...) __expect_eq(pbl, ##__VA_ARGS__) +#define expect_eq_u32_array(...) __expect_eq(u32_array, ##__VA_ARGS__) + +static void __init test_zero_fill_copy(void) +{ + DECLARE_BITMAP(bmap1, 1024); + DECLARE_BITMAP(bmap2, 1024); + + bitmap_zero(bmap1, 1024); + bitmap_zero(bmap2, 1024); + + /* single-word bitmaps */ + expect_eq_pbl("", bmap1, 23); + + bitmap_fill(bmap1, 19); + expect_eq_pbl("0-18", bmap1, 1024); + + bitmap_copy(bmap2, bmap1, 23); + expect_eq_pbl("0-18", bmap2, 1024); + + bitmap_fill(bmap2, 23); + expect_eq_pbl("0-22", bmap2, 1024); + + bitmap_copy(bmap2, bmap1, 23); + expect_eq_pbl("0-18", bmap2, 1024); + + bitmap_zero(bmap1, 23); + expect_eq_pbl("", bmap1, 1024); + + /* multi-word bitmaps */ + bitmap_zero(bmap1, 1024); + expect_eq_pbl("", bmap1, 1024); + + bitmap_fill(bmap1, 109); + expect_eq_pbl("0-108", bmap1, 1024); + + bitmap_copy(bmap2, bmap1, 1024); + expect_eq_pbl("0-108", bmap2, 1024); + + bitmap_fill(bmap2, 1024); + expect_eq_pbl("0-1023", bmap2, 1024); + + bitmap_copy(bmap2, bmap1, 1024); + expect_eq_pbl("0-108", bmap2, 1024); + + /* the following tests assume a 32- or 64-bit arch (even 128b + * if we care) + */ + + bitmap_fill(bmap2, 1024); + bitmap_copy(bmap2, bmap1, 109); /* ... but 0-padded til word length */ + expect_eq_pbl("0-108,128-1023", bmap2, 1024); + + bitmap_fill(bmap2, 1024); + bitmap_copy(bmap2, bmap1, 97); /* ... but aligned on word length */ + expect_eq_pbl("0-108,128-1023", bmap2, 1024); + + bitmap_zero(bmap2, 97); /* ... but 0-padded til word length */ + expect_eq_pbl("128-1023", bmap2, 1024); +} + +static void __init test_bitmap_u32_array_conversions(void) +{ + DECLARE_BITMAP(bmap1, 1024); + DECLARE_BITMAP(bmap2, 1024); + u32 exp_arr[32], arr[32]; + unsigned nbits; + + for (nbits = 0 ; nbits < 257 ; ++nbits) { + const unsigned int used_u32s = DIV_ROUND_UP(nbits, 32); + unsigned int i, rv; + + bitmap_zero(bmap1, nbits); + bitmap_set(bmap1, nbits, 1024 - nbits); /* garbage */ + + memset(arr, 0xff, sizeof(arr)); + rv = bitmap_to_u32array(arr, used_u32s, bmap1, nbits); + expect_eq_uint(nbits, rv); + + memset(exp_arr, 0xff, sizeof(exp_arr)); + memset(exp_arr, 0, used_u32s*sizeof(*exp_arr)); + expect_eq_u32_array(exp_arr, 32, arr, 32); + + bitmap_fill(bmap2, 1024); + rv = bitmap_from_u32array(bmap2, nbits, arr, used_u32s); + expect_eq_uint(nbits, rv); + expect_eq_bitmap(bmap1, 1024, bmap2, 1024); + + for (i = 0 ; i < nbits ; ++i) { + /* + * test conversion bitmap -> u32[] + */ + + bitmap_zero(bmap1, 1024); + __set_bit(i, bmap1); + bitmap_set(bmap1, nbits, 1024 - nbits); /* garbage */ + + memset(arr, 0xff, sizeof(arr)); + rv = bitmap_to_u32array(arr, used_u32s, bmap1, nbits); + expect_eq_uint(nbits, rv); + + /* 1st used u32 words contain expected bit set, the + * remaining words are left unchanged (0xff) + */ + memset(exp_arr, 0xff, sizeof(exp_arr)); + memset(exp_arr, 0, used_u32s*sizeof(*exp_arr)); + exp_arr[i/32] = (1U<<(i%32)); + expect_eq_u32_array(exp_arr, 32, arr, 32); + + + /* same, with longer array to fill + */ + memset(arr, 0xff, sizeof(arr)); + rv = bitmap_to_u32array(arr, 32, bmap1, nbits); + expect_eq_uint(nbits, rv); + + /* 1st used u32 words contain expected bit set, the + * remaining words are all 0s + */ + memset(exp_arr, 0, sizeof(exp_arr)); + exp_arr[i/32] = (1U<<(i%32)); + expect_eq_u32_array(exp_arr, 32, arr, 32); + + /* + * test conversion u32[] -> bitmap + */ + + /* the 1st nbits of bmap2 are identical to + * bmap1, the remaining bits of bmap2 are left + * unchanged (all 1s) + */ + bitmap_fill(bmap2, 1024); + rv = bitmap_from_u32array(bmap2, nbits, + exp_arr, used_u32s); + expect_eq_uint(nbits, rv); + + expect_eq_bitmap(bmap1, 1024, bmap2, 1024); + + /* same, with more bits to fill + */ + memset(arr, 0xff, sizeof(arr)); /* garbage */ + memset(arr, 0, used_u32s*sizeof(u32)); + arr[i/32] = (1U<<(i%32)); + + bitmap_fill(bmap2, 1024); + rv = bitmap_from_u32array(bmap2, 1024, arr, used_u32s); + expect_eq_uint(used_u32s*32, rv); + + /* the 1st nbits of bmap2 are identical to + * bmap1, the remaining bits of bmap2 are cleared + */ + bitmap_zero(bmap1, 1024); + __set_bit(i, bmap1); + expect_eq_bitmap(bmap1, 1024, bmap2, 1024); + + + /* + * test short conversion bitmap -> u32[] (1 + * word too short) + */ + if (used_u32s > 1) { + bitmap_zero(bmap1, 1024); + __set_bit(i, bmap1); + bitmap_set(bmap1, nbits, + 1024 - nbits); /* garbage */ + memset(arr, 0xff, sizeof(arr)); + + rv = bitmap_to_u32array(arr, used_u32s - 1, + bmap1, nbits); + expect_eq_uint((used_u32s - 1)*32, rv); + + /* 1st used u32 words contain expected + * bit set, the remaining words are + * left unchanged (0xff) + */ + memset(exp_arr, 0xff, sizeof(exp_arr)); + memset(exp_arr, 0, + (used_u32s-1)*sizeof(*exp_arr)); + if ((i/32) < (used_u32s - 1)) + exp_arr[i/32] = (1U<<(i%32)); + expect_eq_u32_array(exp_arr, 32, arr, 32); + } + + /* + * test short conversion u32[] -> bitmap (3 + * bits too short) + */ + if (nbits > 3) { + memset(arr, 0xff, sizeof(arr)); /* garbage */ + memset(arr, 0, used_u32s*sizeof(*arr)); + arr[i/32] = (1U<<(i%32)); + + bitmap_zero(bmap1, 1024); + rv = bitmap_from_u32array(bmap1, nbits - 3, + arr, used_u32s); + expect_eq_uint(nbits - 3, rv); + + /* we are expecting the bit < nbits - + * 3 (none otherwise), and the rest of + * bmap1 unchanged (0-filled) + */ + bitmap_zero(bmap2, 1024); + if (i < nbits - 3) + __set_bit(i, bmap2); + expect_eq_bitmap(bmap2, 1024, bmap1, 1024); + + /* do the same with bmap1 initially + * 1-filled + */ + + bitmap_fill(bmap1, 1024); + rv = bitmap_from_u32array(bmap1, nbits - 3, + arr, used_u32s); + expect_eq_uint(nbits - 3, rv); + + /* we are expecting the bit < nbits - + * 3 (none otherwise), and the rest of + * bmap1 unchanged (1-filled) + */ + bitmap_zero(bmap2, 1024); + if (i < nbits - 3) + __set_bit(i, bmap2); + bitmap_set(bmap2, nbits-3, 1024 - nbits + 3); + expect_eq_bitmap(bmap2, 1024, bmap1, 1024); + } + } + } +} + +static int __init test_bitmap_init(void) +{ + test_zero_fill_copy(); + test_bitmap_u32_array_conversions(); + + if (failed_tests == 0) + pr_info("all %u tests passed\n", total_tests); + else + pr_warn("failed %u out of %u tests\n", + failed_tests, total_tests); + + return failed_tests ? -EINVAL : 0; +} + +static void __exit test_bitmap_cleanup(void) +{ +} + +module_init(test_bitmap_init); +module_exit(test_bitmap_cleanup); + +MODULE_AUTHOR("david decotigny "); +MODULE_LICENSE("GPL"); diff --git a/tools/testing/selftests/lib/Makefile b/tools/testing/selftests/lib/Makefile index 47147b968514..08360060ab14 100644 --- a/tools/testing/selftests/lib/Makefile +++ b/tools/testing/selftests/lib/Makefile @@ -3,6 +3,6 @@ # No binaries, but make sure arg-less "make" doesn't trigger "run_tests" all: -TEST_PROGS := printf.sh +TEST_PROGS := printf.sh bitmap.sh include ../lib.mk diff --git a/tools/testing/selftests/lib/bitmap.sh b/tools/testing/selftests/lib/bitmap.sh new file mode 100755 index 000000000000..2da187b6ddad --- /dev/null +++ b/tools/testing/selftests/lib/bitmap.sh @@ -0,0 +1,10 @@ +#!/bin/sh +# Runs bitmap infrastructure tests using test_bitmap kernel module + +if /sbin/modprobe -q test_bitmap; then + /sbin/modprobe -q -r test_bitmap + echo "bitmap: ok" +else + echo "bitmap: [FAIL]" + exit 1 +fi -- cgit v1.3-14-g43fede From b9ab5ebb14ec389bd80f66613f1fe3f8f65f2521 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Sun, 28 Feb 2016 22:22:42 -0600 Subject: objtool: Add CONFIG_STACK_VALIDATION option Add a CONFIG_STACK_VALIDATION option which will run "objtool check" for each .o file to ensure the validity of its stack metadata. Signed-off-by: Josh Poimboeuf Cc: Andrew Morton Cc: Andy Lutomirski Cc: Arnaldo Carvalho de Melo Cc: Bernd Petrovitsch Cc: Borislav Petkov Cc: Chris J Arges Cc: Jiri Slaby Cc: Linus Torvalds Cc: Michal Marek Cc: Namhyung Kim Cc: Pedro Alves Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: live-patching@vger.kernel.org Link: http://lkml.kernel.org/r/92baab69a6bf9bc7043af0bfca9fb964a1d45546.1456719558.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar --- Makefile | 5 ++++- arch/Kconfig | 6 ++++++ lib/Kconfig.debug | 12 ++++++++++++ scripts/Makefile.build | 39 +++++++++++++++++++++++++++++++++++---- 4 files changed, 57 insertions(+), 5 deletions(-) (limited to 'lib/Kconfig.debug') diff --git a/Makefile b/Makefile index fbe1b921798f..62be03b2add4 100644 --- a/Makefile +++ b/Makefile @@ -993,7 +993,10 @@ prepare0: archprepare FORCE $(Q)$(MAKE) $(build)=. # All the preparing.. -prepare: prepare0 +prepare: prepare0 prepare-objtool + +PHONY += prepare-objtool +prepare-objtool: $(if $(CONFIG_STACK_VALIDATION), tools/objtool FORCE) # Generate some files # --------------------------------------------------------------------------- diff --git a/arch/Kconfig b/arch/Kconfig index f6b649d88ec8..81869a5e7e17 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -583,6 +583,12 @@ config HAVE_COPY_THREAD_TLS normal C parameter passing, rather than extracting the syscall argument from pt_regs. +config HAVE_STACK_VALIDATION + bool + help + Architecture supports the 'objtool check' host tool command, which + performs compile-time stack metadata validation. + # # ABI hall of shame # diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 8bfd1aca7a3d..855265621863 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -342,6 +342,18 @@ config FRAME_POINTER larger and slower, but it gives very useful debugging information in case of kernel bugs. (precise oopses/stacktraces/warnings) +config STACK_VALIDATION + bool "Compile-time stack metadata validation" + depends on HAVE_STACK_VALIDATION + default n + help + Add compile-time checks to validate stack metadata, including frame + pointers (if CONFIG_FRAME_POINTER is enabled). This helps ensure + that runtime stack traces are more reliable. + + For more information, see + tools/objtool/Documentation/stack-validation.txt. + config DEBUG_FORCE_WEAK_PER_CPU bool "Force weak per-cpu definitions" depends on DEBUG_KERNEL diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 2c47f9c305aa..130a452d42ee 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -241,10 +241,32 @@ cmd_record_mcount = \ fi; endif +ifdef CONFIG_STACK_VALIDATION + +__objtool_obj := $(objtree)/tools/objtool/objtool + +objtool_args = check +ifndef CONFIG_FRAME_POINTER +objtool_args += --no-fp +endif + +# 'OBJECT_FILES_NON_STANDARD := y': skip objtool checking for a directory +# 'OBJECT_FILES_NON_STANDARD_foo.o := 'y': skip objtool checking for a file +# 'OBJECT_FILES_NON_STANDARD_foo.o := 'n': override directory skip for a file +cmd_objtool = $(if $(patsubst y%,, \ + $(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \ + $(__objtool_obj) $(objtool_args) "$(@)";) +objtool_obj = $(if $(patsubst y%,, \ + $(OBJECT_FILES_NON_STANDARD_$(basetarget).o)$(OBJECT_FILES_NON_STANDARD)n), \ + $(__objtool_obj)) + +endif # CONFIG_STACK_VALIDATION + define rule_cc_o_c $(call echo-cmd,checksrc) $(cmd_checksrc) \ $(call echo-cmd,cc_o_c) $(cmd_cc_o_c); \ $(cmd_modversions) \ + $(cmd_objtool) \ $(call echo-cmd,record_mcount) \ $(cmd_record_mcount) \ scripts/basic/fixdep $(depfile) $@ '$(call make-cmd,cc_o_c)' > \ @@ -253,14 +275,23 @@ define rule_cc_o_c mv -f $(dot-target).tmp $(dot-target).cmd endef +define rule_as_o_S + $(call echo-cmd,as_o_S) $(cmd_as_o_S); \ + $(cmd_objtool) \ + scripts/basic/fixdep $(depfile) $@ '$(call make-cmd,as_o_S)' > \ + $(dot-target).tmp; \ + rm -f $(depfile); \ + mv -f $(dot-target).tmp $(dot-target).cmd +endef + # Built-in and composite module parts -$(obj)/%.o: $(src)/%.c $(recordmcount_source) FORCE +$(obj)/%.o: $(src)/%.c $(recordmcount_source) $(objtool_obj) FORCE $(call cmd,force_checksrc) $(call if_changed_rule,cc_o_c) # Single-part modules are special since we need to mark them in $(MODVERDIR) -$(single-used-m): $(obj)/%.o: $(src)/%.c $(recordmcount_source) FORCE +$(single-used-m): $(obj)/%.o: $(src)/%.c $(recordmcount_source) $(objtool_obj) FORCE $(call cmd,force_checksrc) $(call if_changed_rule,cc_o_c) @{ echo $(@:.o=.ko); echo $@; } > $(MODVERDIR)/$(@F:.o=.mod) @@ -290,8 +321,8 @@ $(obj)/%.s: $(src)/%.S FORCE quiet_cmd_as_o_S = AS $(quiet_modtag) $@ cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $< -$(obj)/%.o: $(src)/%.S FORCE - $(call if_changed_dep,as_o_S) +$(obj)/%.o: $(src)/%.S $(objtool_obj) FORCE + $(call if_changed_rule,as_o_S) targets += $(real-objs-y) $(real-objs-m) $(lib-y) targets += $(extra-y) $(MAKECMDGOALS) $(always) -- cgit v1.3-14-g43fede From 757c989b9994f51b42d6be1bd33c7c12d16a3ac7 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 26 Feb 2016 18:43:32 +0000 Subject: cpu/hotplug: Make target state writeable Make it possible to write a target state to the per cpu state file, so we can switch between states. Signed-off-by: Thomas Gleixner Cc: linux-arch@vger.kernel.org Cc: Rik van Riel Cc: Rafael Wysocki Cc: "Srivatsa S. Bhat" Cc: Peter Zijlstra Cc: Arjan van de Ven Cc: Sebastian Siewior Cc: Rusty Russell Cc: Steven Rostedt Cc: Oleg Nesterov Cc: Tejun Heo Cc: Andrew Morton Cc: Paul McKenney Cc: Linus Torvalds Cc: Paul Turner Link: http://lkml.kernel.org/r/20160226182341.022814799@linutronix.de Signed-off-by: Thomas Gleixner --- kernel/cpu.c | 73 +++++++++++++++++++++++++++++++++++++++++++++++++------ lib/Kconfig.debug | 13 ++++++++++ 2 files changed, 78 insertions(+), 8 deletions(-) (limited to 'lib/Kconfig.debug') diff --git a/kernel/cpu.c b/kernel/cpu.c index 1979b8927b86..be9335da82f1 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -48,12 +48,14 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state); * @teardown: Teardown function of the step * @skip_onerr: Do not invoke the functions on error rollback * Will go away once the notifiers are gone + * @cant_stop: Bringup/teardown can't be stopped at this step */ struct cpuhp_step { const char *name; int (*startup)(unsigned int cpu); int (*teardown)(unsigned int cpu); bool skip_onerr; + bool cant_stop; }; static DEFINE_MUTEX(cpuhp_state_mutex); @@ -558,7 +560,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, if (num_online_cpus() == 1) return -EBUSY; - if (!cpu_online(cpu)) + if (!cpu_present(cpu)) return -EINVAL; cpu_hotplug_begin(); @@ -683,16 +685,25 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) cpu_hotplug_begin(); - if (cpu_online(cpu) || !cpu_present(cpu)) { + if (!cpu_present(cpu)) { ret = -EINVAL; goto out; } - /* Let it fail before we try to bring the cpu up */ - idle = idle_thread_get(cpu); - if (IS_ERR(idle)) { - ret = PTR_ERR(idle); + /* + * The caller of do_cpu_up might have raced with another + * caller. Ignore it for now. + */ + if (st->state >= target) goto out; + + if (st->state == CPUHP_OFFLINE) { + /* Let it fail before we try to bring the cpu up */ + idle = idle_thread_get(cpu); + if (IS_ERR(idle)) { + ret = PTR_ERR(idle); + goto out; + } } cpuhp_tasks_frozen = tasks_frozen; @@ -909,27 +920,32 @@ static struct cpuhp_step cpuhp_bp_states[] = { .name = "threads:create", .startup = smpboot_create_threads, .teardown = NULL, + .cant_stop = true, }, [CPUHP_NOTIFY_PREPARE] = { .name = "notify:prepare", .startup = notify_prepare, .teardown = notify_dead, .skip_onerr = true, + .cant_stop = true, }, [CPUHP_BRINGUP_CPU] = { .name = "cpu:bringup", .startup = bringup_cpu, .teardown = NULL, + .cant_stop = true, }, [CPUHP_TEARDOWN_CPU] = { .name = "cpu:teardown", .startup = NULL, .teardown = takedown_cpu, + .cant_stop = true, }, [CPUHP_NOTIFY_ONLINE] = { .name = "notify:online", .startup = notify_online, .teardown = notify_down_prepare, + .cant_stop = true, }, #endif [CPUHP_ONLINE] = { @@ -947,6 +963,7 @@ static struct cpuhp_step cpuhp_ap_states[] = { .startup = notify_starting, .teardown = notify_dying, .skip_onerr = true, + .cant_stop = true, }, #endif [CPUHP_ONLINE] = { @@ -979,6 +996,46 @@ static ssize_t show_cpuhp_state(struct device *dev, } static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL); +static ssize_t write_cpuhp_target(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); + struct cpuhp_step *sp; + int target, ret; + + ret = kstrtoint(buf, 10, &target); + if (ret) + return ret; + +#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL + if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE) + return -EINVAL; +#else + if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE) + return -EINVAL; +#endif + + ret = lock_device_hotplug_sysfs(); + if (ret) + return ret; + + mutex_lock(&cpuhp_state_mutex); + sp = cpuhp_get_step(target); + ret = !sp->name || sp->cant_stop ? -EINVAL : 0; + mutex_unlock(&cpuhp_state_mutex); + if (ret) + return ret; + + if (st->state < target) + ret = do_cpu_up(dev->id, target); + else + ret = do_cpu_down(dev->id, target); + + unlock_device_hotplug(); + return ret ? ret : count; +} + static ssize_t show_cpuhp_target(struct device *dev, struct device_attribute *attr, char *buf) { @@ -986,7 +1043,7 @@ static ssize_t show_cpuhp_target(struct device *dev, return sprintf(buf, "%d\n", st->target); } -static DEVICE_ATTR(target, 0444, show_cpuhp_target, NULL); +static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target); static struct attribute *cpuhp_cpu_attrs[] = { &dev_attr_state.attr, @@ -1007,7 +1064,7 @@ static ssize_t show_cpuhp_states(struct device *dev, int i; mutex_lock(&cpuhp_state_mutex); - for (i = 0; i <= CPUHP_ONLINE; i++) { + for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) { struct cpuhp_step *sp = cpuhp_get_step(i); if (sp->name) { diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 8bfd1aca7a3d..f28f7fad452f 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1442,6 +1442,19 @@ config DEBUG_BLOCK_EXT_DEVT Say N if you are unsure. +config CPU_HOTPLUG_STATE_CONTROL + bool "Enable CPU hotplug state control" + depends on DEBUG_KERNEL + depends on HOTPLUG_CPU + default n + help + Allows to write steps between "offline" and "online" to the CPUs + sysfs target file so states can be stepped granular. This is a debug + option for now as the hotplug machinery cannot be stopped and + restarted at arbitrary points yet. + + Say N if your are unsure. + config NOTIFIER_ERROR_INJECTION tristate "Notifier error injection" depends on DEBUG_KERNEL -- cgit v1.3-14-g43fede From 5c9a8750a6409c63a0f01d51a9024861022f6593 Mon Sep 17 00:00:00 2001 From: Dmitry Vyukov Date: Tue, 22 Mar 2016 14:27:30 -0700 Subject: kernel: add kcov code coverage kcov provides code coverage collection for coverage-guided fuzzing (randomized testing). Coverage-guided fuzzing is a testing technique that uses coverage feedback to determine new interesting inputs to a system. A notable user-space example is AFL (http://lcamtuf.coredump.cx/afl/). However, this technique is not widely used for kernel testing due to missing compiler and kernel support. kcov does not aim to collect as much coverage as possible. It aims to collect more or less stable coverage that is function of syscall inputs. To achieve this goal it does not collect coverage in soft/hard interrupts and instrumentation of some inherently non-deterministic or non-interesting parts of kernel is disbled (e.g. scheduler, locking). Currently there is a single coverage collection mode (tracing), but the API anticipates additional collection modes. Initially I also implemented a second mode which exposes coverage in a fixed-size hash table of counters (what Quentin used in his original patch). I've dropped the second mode for simplicity. This patch adds the necessary support on kernel side. The complimentary compiler support was added in gcc revision 231296. We've used this support to build syzkaller system call fuzzer, which has found 90 kernel bugs in just 2 months: https://github.com/google/syzkaller/wiki/Found-Bugs We've also found 30+ bugs in our internal systems with syzkaller. Another (yet unexplored) direction where kcov coverage would greatly help is more traditional "blob mutation". For example, mounting a random blob as a filesystem, or receiving a random blob over wire. Why not gcov. Typical fuzzing loop looks as follows: (1) reset coverage, (2) execute a bit of code, (3) collect coverage, repeat. A typical coverage can be just a dozen of basic blocks (e.g. an invalid input). In such context gcov becomes prohibitively expensive as reset/collect coverage steps depend on total number of basic blocks/edges in program (in case of kernel it is about 2M). Cost of kcov depends only on number of executed basic blocks/edges. On top of that, kernel requires per-thread coverage because there are always background threads and unrelated processes that also produce coverage. With inlined gcov instrumentation per-thread coverage is not possible. kcov exposes kernel PCs and control flow to user-space which is insecure. But debugfs should not be mapped as user accessible. Based on a patch by Quentin Casasnovas. [akpm@linux-foundation.org: make task_struct.kcov_mode have type `enum kcov_mode'] [akpm@linux-foundation.org: unbreak allmodconfig] [akpm@linux-foundation.org: follow x86 Makefile layout standards] Signed-off-by: Dmitry Vyukov Reviewed-by: Kees Cook Cc: syzkaller Cc: Vegard Nossum Cc: Catalin Marinas Cc: Tavis Ormandy Cc: Will Deacon Cc: Quentin Casasnovas Cc: Kostya Serebryany Cc: Eric Dumazet Cc: Alexander Potapenko Cc: Kees Cook Cc: Bjorn Helgaas Cc: Sasha Levin Cc: David Drysdale Cc: Ard Biesheuvel Cc: Andrey Ryabinin Cc: Kirill A. Shutemov Cc: Jiri Slaby Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/kcov.txt | 111 ++++++++++++++ Makefile | 11 +- arch/x86/Kconfig | 1 + arch/x86/boot/Makefile | 7 + arch/x86/boot/compressed/Makefile | 3 + arch/x86/entry/vdso/Makefile | 3 + arch/x86/kernel/Makefile | 6 + arch/x86/kernel/apic/Makefile | 4 + arch/x86/kernel/cpu/Makefile | 4 + arch/x86/lib/Makefile | 3 + arch/x86/mm/Makefile | 3 + arch/x86/realmode/rm/Makefile | 3 + drivers/firmware/efi/libstub/Makefile | 3 + include/linux/kcov.h | 29 ++++ include/linux/sched.h | 11 ++ include/uapi/linux/kcov.h | 10 ++ kernel/Makefile | 12 ++ kernel/exit.c | 2 + kernel/fork.c | 3 + kernel/kcov.c | 273 ++++++++++++++++++++++++++++++++++ kernel/locking/Makefile | 3 + kernel/rcu/Makefile | 4 + kernel/sched/Makefile | 4 + lib/Kconfig.debug | 21 +++ lib/Makefile | 12 ++ mm/Makefile | 15 ++ mm/kasan/Makefile | 1 + scripts/Makefile.lib | 6 + 28 files changed, 567 insertions(+), 1 deletion(-) create mode 100644 Documentation/kcov.txt create mode 100644 include/linux/kcov.h create mode 100644 include/uapi/linux/kcov.h create mode 100644 kernel/kcov.c (limited to 'lib/Kconfig.debug') diff --git a/Documentation/kcov.txt b/Documentation/kcov.txt new file mode 100644 index 000000000000..779ff4ab1c1d --- /dev/null +++ b/Documentation/kcov.txt @@ -0,0 +1,111 @@ +kcov: code coverage for fuzzing +=============================== + +kcov exposes kernel code coverage information in a form suitable for coverage- +guided fuzzing (randomized testing). Coverage data of a running kernel is +exported via the "kcov" debugfs file. Coverage collection is enabled on a task +basis, and thus it can capture precise coverage of a single system call. + +Note that kcov does not aim to collect as much coverage as possible. It aims +to collect more or less stable coverage that is function of syscall inputs. +To achieve this goal it does not collect coverage in soft/hard interrupts +and instrumentation of some inherently non-deterministic parts of kernel is +disbled (e.g. scheduler, locking). + +Usage: +====== + +Configure kernel with: + + CONFIG_KCOV=y + +CONFIG_KCOV requires gcc built on revision 231296 or later. +Profiling data will only become accessible once debugfs has been mounted: + + mount -t debugfs none /sys/kernel/debug + +The following program demonstrates kcov usage from within a test program: + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define KCOV_INIT_TRACE _IOR('c', 1, unsigned long) +#define KCOV_ENABLE _IO('c', 100) +#define KCOV_DISABLE _IO('c', 101) +#define COVER_SIZE (64<<10) + +int main(int argc, char **argv) +{ + int fd; + unsigned long *cover, n, i; + + /* A single fd descriptor allows coverage collection on a single + * thread. + */ + fd = open("/sys/kernel/debug/kcov", O_RDWR); + if (fd == -1) + perror("open"), exit(1); + /* Setup trace mode and trace size. */ + if (ioctl(fd, KCOV_INIT_TRACE, COVER_SIZE)) + perror("ioctl"), exit(1); + /* Mmap buffer shared between kernel- and user-space. */ + cover = (unsigned long*)mmap(NULL, COVER_SIZE * sizeof(unsigned long), + PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + if ((void*)cover == MAP_FAILED) + perror("mmap"), exit(1); + /* Enable coverage collection on the current thread. */ + if (ioctl(fd, KCOV_ENABLE, 0)) + perror("ioctl"), exit(1); + /* Reset coverage from the tail of the ioctl() call. */ + __atomic_store_n(&cover[0], 0, __ATOMIC_RELAXED); + /* That's the target syscal call. */ + read(-1, NULL, 0); + /* Read number of PCs collected. */ + n = __atomic_load_n(&cover[0], __ATOMIC_RELAXED); + for (i = 0; i < n; i++) + printf("0x%lx\n", cover[i + 1]); + /* Disable coverage collection for the current thread. After this call + * coverage can be enabled for a different thread. + */ + if (ioctl(fd, KCOV_DISABLE, 0)) + perror("ioctl"), exit(1); + /* Free resources. */ + if (munmap(cover, COVER_SIZE * sizeof(unsigned long))) + perror("munmap"), exit(1); + if (close(fd)) + perror("close"), exit(1); + return 0; +} + +After piping through addr2line output of the program looks as follows: + +SyS_read +fs/read_write.c:562 +__fdget_pos +fs/file.c:774 +__fget_light +fs/file.c:746 +__fget_light +fs/file.c:750 +__fget_light +fs/file.c:760 +__fdget_pos +fs/file.c:784 +SyS_read +fs/read_write.c:562 + +If a program needs to collect coverage from several threads (independently), +it needs to open /sys/kernel/debug/kcov in each thread separately. + +The interface is fine-grained to allow efficient forking of test processes. +That is, a parent process opens /sys/kernel/debug/kcov, enables trace mode, +mmaps coverage buffer and then forks child processes in a loop. Child processes +only need to enable coverage (disable happens automatically on thread end). diff --git a/Makefile b/Makefile index e055b969c325..b98a4f70d1b5 100644 --- a/Makefile +++ b/Makefile @@ -365,6 +365,7 @@ LDFLAGS_MODULE = CFLAGS_KERNEL = AFLAGS_KERNEL = CFLAGS_GCOV = -fprofile-arcs -ftest-coverage +CFLAGS_KCOV = -fsanitize-coverage=trace-pc # Use USERINCLUDE when you must reference the UAPI directories only. @@ -411,7 +412,7 @@ export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS -export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV CFLAGS_KASAN CFLAGS_UBSAN +export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV CFLAGS_KCOV CFLAGS_KASAN CFLAGS_UBSAN export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL @@ -673,6 +674,14 @@ endif endif KBUILD_CFLAGS += $(stackp-flag) +ifdef CONFIG_KCOV + ifeq ($(call cc-option, $(CFLAGS_KCOV)),) + $(warning Cannot use CONFIG_KCOV: \ + -fsanitize-coverage=trace-pc is not supported by compiler) + CFLAGS_KCOV = + endif +endif + ifeq ($(cc-name),clang) KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,) KBUILD_CPPFLAGS += $(call cc-option,-Wno-unknown-warning-option,) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 8b680a5cb25b..54478b7635de 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -28,6 +28,7 @@ config X86 select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_FAST_MULTIPLIER select ARCH_HAS_GCOV_PROFILE_ALL + select ARCH_HAS_KCOV if X86_64 select ARCH_HAS_PMEM_API if X86_64 select ARCH_HAS_MMIO_FLUSH select ARCH_HAS_SG_CHAIN diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index 0bf6749522d9..b1ef9e489084 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile @@ -12,6 +12,13 @@ KASAN_SANITIZE := n OBJECT_FILES_NON_STANDARD := y +# Kernel does not boot with kcov instrumentation here. +# One of the problems observed was insertion of __sanitizer_cov_trace_pc() +# callback into middle of per-cpu data enabling code. Thus the callback observed +# inconsistent state and crashed. We are interested mostly in syscall coverage, +# so boot code is not interesting anyway. +KCOV_INSTRUMENT := n + # If you want to preset the SVGA mode, uncomment the next line and # set SVGA_MODE to whatever number you want. # Set it to -DSVGA_MODE=NORMAL_VGA if you just want the EGA/VGA mode. diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 5e1d26e09407..6915ff2bd996 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -19,6 +19,9 @@ KASAN_SANITIZE := n OBJECT_FILES_NON_STANDARD := y +# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in. +KCOV_INSTRUMENT := n + targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \ vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4 diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index f9fb859c98b9..6874da5f67fc 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile @@ -7,6 +7,9 @@ KASAN_SANITIZE := n UBSAN_SANITIZE := n OBJECT_FILES_NON_STANDARD := y +# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in. +KCOV_INSTRUMENT := n + VDSO64-$(CONFIG_X86_64) := y VDSOX32-$(CONFIG_X86_X32_ABI) := y VDSO32-$(CONFIG_X86_32) := y diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index d5fb0871aba3..adaae2c781c1 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -25,6 +25,12 @@ OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y OBJECT_FILES_NON_STANDARD_mcount_$(BITS).o := y OBJECT_FILES_NON_STANDARD_test_nx.o := y +# If instrumentation of this dir is enabled, boot hangs during first second. +# Probably could be more selective here, but note that files related to irqs, +# boot, dumpstack/stacktrace, etc are either non-interesting or can lead to +# non-deterministic coverage. +KCOV_INSTRUMENT := n + CFLAGS_irq.o := -I$(src)/../include/asm/trace obj-y := process_$(BITS).o signal.o diff --git a/arch/x86/kernel/apic/Makefile b/arch/x86/kernel/apic/Makefile index 8bb12ddc5db8..8e63ebdcbd0b 100644 --- a/arch/x86/kernel/apic/Makefile +++ b/arch/x86/kernel/apic/Makefile @@ -2,6 +2,10 @@ # Makefile for local APIC drivers and for the IO-APIC code # +# Leads to non-deterministic coverage that is not a function of syscall inputs. +# In particualr, smp_apic_timer_interrupt() is called in random places. +KCOV_INSTRUMENT := n + obj-$(CONFIG_X86_LOCAL_APIC) += apic.o apic_noop.o ipi.o vector.o obj-y += hw_nmi.o diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 0d373d7affc8..4a8697f7d4ef 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -8,6 +8,10 @@ CFLAGS_REMOVE_common.o = -pg CFLAGS_REMOVE_perf_event.o = -pg endif +# If these files are instrumented, boot hangs during the first second. +KCOV_INSTRUMENT_common.o := n +KCOV_INSTRUMENT_perf_event.o := n + # Make sure load_percpu_segment has no stackprotector nostackp := $(call cc-option, -fno-stack-protector) CFLAGS_common.o := $(nostackp) diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index a501fa25da41..72a576752a7e 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -2,6 +2,9 @@ # Makefile for x86 specific library files. # +# Produces uninteresting flaky coverage. +KCOV_INSTRUMENT_delay.o := n + inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk inat_tables_maps = $(srctree)/arch/x86/lib/x86-opcode-map.txt quiet_cmd_inat_tables = GEN $@ diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 67cf2e1e557b..f98913258c63 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -1,3 +1,6 @@ +# Kernel does not boot with instrumentation of tlb.c. +KCOV_INSTRUMENT_tlb.o := n + obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ pat.o pgtable.o physaddr.o gup.o setup_nx.o diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile index 053abe7b0ef7..b95964610ea7 100644 --- a/arch/x86/realmode/rm/Makefile +++ b/arch/x86/realmode/rm/Makefile @@ -9,6 +9,9 @@ KASAN_SANITIZE := n OBJECT_FILES_NON_STANDARD := y +# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in. +KCOV_INSTRUMENT := n + always := realmode.bin realmode.relocs wakeup-objs := wakeup_asm.o wakemain.o video-mode.o diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index a15841eced4e..da99bbb74aeb 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -25,6 +25,9 @@ KASAN_SANITIZE := n UBSAN_SANITIZE := n OBJECT_FILES_NON_STANDARD := y +# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in. +KCOV_INSTRUMENT := n + lib-y := efi-stub-helper.o # include the stub's generic dependencies from lib/ when building for ARM/arm64 diff --git a/include/linux/kcov.h b/include/linux/kcov.h new file mode 100644 index 000000000000..2883ac98c280 --- /dev/null +++ b/include/linux/kcov.h @@ -0,0 +1,29 @@ +#ifndef _LINUX_KCOV_H +#define _LINUX_KCOV_H + +#include + +struct task_struct; + +#ifdef CONFIG_KCOV + +void kcov_task_init(struct task_struct *t); +void kcov_task_exit(struct task_struct *t); + +enum kcov_mode { + /* Coverage collection is not enabled yet. */ + KCOV_MODE_DISABLED = 0, + /* + * Tracing coverage collection mode. + * Covered PCs are collected in a per-task buffer. + */ + KCOV_MODE_TRACE = 1, +}; + +#else + +static inline void kcov_task_init(struct task_struct *t) {} +static inline void kcov_task_exit(struct task_struct *t) {} + +#endif /* CONFIG_KCOV */ +#endif /* _LINUX_KCOV_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 084ed9fba620..34495d2d2d7b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -51,6 +51,7 @@ struct sched_param { #include #include #include +#include #include #include #include @@ -1818,6 +1819,16 @@ struct task_struct { /* bitmask and counter of trace recursion */ unsigned long trace_recursion; #endif /* CONFIG_TRACING */ +#ifdef CONFIG_KCOV + /* Coverage collection mode enabled for this task (0 if disabled). */ + enum kcov_mode kcov_mode; + /* Size of the kcov_area. */ + unsigned kcov_size; + /* Buffer for coverage collection. */ + void *kcov_area; + /* kcov desciptor wired with this task or NULL. */ + struct kcov *kcov; +#endif #ifdef CONFIG_MEMCG struct mem_cgroup *memcg_in_oom; gfp_t memcg_oom_gfp_mask; diff --git a/include/uapi/linux/kcov.h b/include/uapi/linux/kcov.h new file mode 100644 index 000000000000..574e22ec640d --- /dev/null +++ b/include/uapi/linux/kcov.h @@ -0,0 +1,10 @@ +#ifndef _LINUX_KCOV_IOCTLS_H +#define _LINUX_KCOV_IOCTLS_H + +#include + +#define KCOV_INIT_TRACE _IOR('c', 1, unsigned long) +#define KCOV_ENABLE _IO('c', 100) +#define KCOV_DISABLE _IO('c', 101) + +#endif /* _LINUX_KCOV_IOCTLS_H */ diff --git a/kernel/Makefile b/kernel/Makefile index baa55e50a315..f0c40bf49d9f 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -18,6 +18,17 @@ ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_irq_work.o = $(CC_FLAGS_FTRACE) endif +# Prevents flicker of uninteresting __do_softirq()/__local_bh_disable_ip() +# in coverage traces. +KCOV_INSTRUMENT_softirq.o := n +# These are called from save_stack_trace() on slub debug path, +# and produce insane amounts of uninteresting coverage. +KCOV_INSTRUMENT_module.o := n +KCOV_INSTRUMENT_extable.o := n +# Don't self-instrument. +KCOV_INSTRUMENT_kcov.o := n +KASAN_SANITIZE_kcov.o := n + # cond_syscall is currently not LTO compatible CFLAGS_sys_ni.o = $(DISABLE_LTO) @@ -68,6 +79,7 @@ obj-$(CONFIG_AUDITSYSCALL) += auditsc.o obj-$(CONFIG_AUDIT_WATCH) += audit_watch.o audit_fsnotify.o obj-$(CONFIG_AUDIT_TREE) += audit_tree.o obj-$(CONFIG_GCOV_KERNEL) += gcov/ +obj-$(CONFIG_KCOV) += kcov.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_KGDB) += debug/ obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o diff --git a/kernel/exit.c b/kernel/exit.c index 10e088237fed..953d1a1c0387 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -53,6 +53,7 @@ #include #include #include +#include #include #include @@ -655,6 +656,7 @@ void do_exit(long code) TASKS_RCU(int tasks_rcu_i); profile_task_exit(tsk); + kcov_task_exit(tsk); WARN_ON(blk_needs_flush_plug(tsk)); diff --git a/kernel/fork.c b/kernel/fork.c index 5b8d1e7ceeea..d277e83ed3e0 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -75,6 +75,7 @@ #include #include #include +#include #include #include @@ -392,6 +393,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) account_kernel_stack(ti, 1); + kcov_task_init(tsk); + return tsk; free_ti: diff --git a/kernel/kcov.c b/kernel/kcov.c new file mode 100644 index 000000000000..3efbee0834a8 --- /dev/null +++ b/kernel/kcov.c @@ -0,0 +1,273 @@ +#define pr_fmt(fmt) "kcov: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * kcov descriptor (one per opened debugfs file). + * State transitions of the descriptor: + * - initial state after open() + * - then there must be a single ioctl(KCOV_INIT_TRACE) call + * - then, mmap() call (several calls are allowed but not useful) + * - then, repeated enable/disable for a task (only one task a time allowed) + */ +struct kcov { + /* + * Reference counter. We keep one for: + * - opened file descriptor + * - task with enabled coverage (we can't unwire it from another task) + */ + atomic_t refcount; + /* The lock protects mode, size, area and t. */ + spinlock_t lock; + enum kcov_mode mode; + /* Size of arena (in long's for KCOV_MODE_TRACE). */ + unsigned size; + /* Coverage buffer shared with user space. */ + void *area; + /* Task for which we collect coverage, or NULL. */ + struct task_struct *t; +}; + +/* + * Entry point from instrumented code. + * This is called once per basic-block/edge. + */ +void __sanitizer_cov_trace_pc(void) +{ + struct task_struct *t; + enum kcov_mode mode; + + t = current; + /* + * We are interested in code coverage as a function of a syscall inputs, + * so we ignore code executed in interrupts. + */ + if (!t || in_interrupt()) + return; + mode = READ_ONCE(t->kcov_mode); + if (mode == KCOV_MODE_TRACE) { + unsigned long *area; + unsigned long pos; + + /* + * There is some code that runs in interrupts but for which + * in_interrupt() returns false (e.g. preempt_schedule_irq()). + * READ_ONCE()/barrier() effectively provides load-acquire wrt + * interrupts, there are paired barrier()/WRITE_ONCE() in + * kcov_ioctl_locked(). + */ + barrier(); + area = t->kcov_area; + /* The first word is number of subsequent PCs. */ + pos = READ_ONCE(area[0]) + 1; + if (likely(pos < t->kcov_size)) { + area[pos] = _RET_IP_; + WRITE_ONCE(area[0], pos); + } + } +} +EXPORT_SYMBOL(__sanitizer_cov_trace_pc); + +static void kcov_get(struct kcov *kcov) +{ + atomic_inc(&kcov->refcount); +} + +static void kcov_put(struct kcov *kcov) +{ + if (atomic_dec_and_test(&kcov->refcount)) { + vfree(kcov->area); + kfree(kcov); + } +} + +void kcov_task_init(struct task_struct *t) +{ + t->kcov_mode = KCOV_MODE_DISABLED; + t->kcov_size = 0; + t->kcov_area = NULL; + t->kcov = NULL; +} + +void kcov_task_exit(struct task_struct *t) +{ + struct kcov *kcov; + + kcov = t->kcov; + if (kcov == NULL) + return; + spin_lock(&kcov->lock); + if (WARN_ON(kcov->t != t)) { + spin_unlock(&kcov->lock); + return; + } + /* Just to not leave dangling references behind. */ + kcov_task_init(t); + kcov->t = NULL; + spin_unlock(&kcov->lock); + kcov_put(kcov); +} + +static int kcov_mmap(struct file *filep, struct vm_area_struct *vma) +{ + int res = 0; + void *area; + struct kcov *kcov = vma->vm_file->private_data; + unsigned long size, off; + struct page *page; + + area = vmalloc_user(vma->vm_end - vma->vm_start); + if (!area) + return -ENOMEM; + + spin_lock(&kcov->lock); + size = kcov->size * sizeof(unsigned long); + if (kcov->mode == KCOV_MODE_DISABLED || vma->vm_pgoff != 0 || + vma->vm_end - vma->vm_start != size) { + res = -EINVAL; + goto exit; + } + if (!kcov->area) { + kcov->area = area; + vma->vm_flags |= VM_DONTEXPAND; + spin_unlock(&kcov->lock); + for (off = 0; off < size; off += PAGE_SIZE) { + page = vmalloc_to_page(kcov->area + off); + if (vm_insert_page(vma, vma->vm_start + off, page)) + WARN_ONCE(1, "vm_insert_page() failed"); + } + return 0; + } +exit: + spin_unlock(&kcov->lock); + vfree(area); + return res; +} + +static int kcov_open(struct inode *inode, struct file *filep) +{ + struct kcov *kcov; + + kcov = kzalloc(sizeof(*kcov), GFP_KERNEL); + if (!kcov) + return -ENOMEM; + atomic_set(&kcov->refcount, 1); + spin_lock_init(&kcov->lock); + filep->private_data = kcov; + return nonseekable_open(inode, filep); +} + +static int kcov_close(struct inode *inode, struct file *filep) +{ + kcov_put(filep->private_data); + return 0; +} + +static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd, + unsigned long arg) +{ + struct task_struct *t; + unsigned long size, unused; + + switch (cmd) { + case KCOV_INIT_TRACE: + /* + * Enable kcov in trace mode and setup buffer size. + * Must happen before anything else. + */ + if (kcov->mode != KCOV_MODE_DISABLED) + return -EBUSY; + /* + * Size must be at least 2 to hold current position and one PC. + * Later we allocate size * sizeof(unsigned long) memory, + * that must not overflow. + */ + size = arg; + if (size < 2 || size > INT_MAX / sizeof(unsigned long)) + return -EINVAL; + kcov->size = size; + kcov->mode = KCOV_MODE_TRACE; + return 0; + case KCOV_ENABLE: + /* + * Enable coverage for the current task. + * At this point user must have been enabled trace mode, + * and mmapped the file. Coverage collection is disabled only + * at task exit or voluntary by KCOV_DISABLE. After that it can + * be enabled for another task. + */ + unused = arg; + if (unused != 0 || kcov->mode == KCOV_MODE_DISABLED || + kcov->area == NULL) + return -EINVAL; + if (kcov->t != NULL) + return -EBUSY; + t = current; + /* Cache in task struct for performance. */ + t->kcov_size = kcov->size; + t->kcov_area = kcov->area; + /* See comment in __sanitizer_cov_trace_pc(). */ + barrier(); + WRITE_ONCE(t->kcov_mode, kcov->mode); + t->kcov = kcov; + kcov->t = t; + /* This is put either in kcov_task_exit() or in KCOV_DISABLE. */ + kcov_get(kcov); + return 0; + case KCOV_DISABLE: + /* Disable coverage for the current task. */ + unused = arg; + if (unused != 0 || current->kcov != kcov) + return -EINVAL; + t = current; + if (WARN_ON(kcov->t != t)) + return -EINVAL; + kcov_task_init(t); + kcov->t = NULL; + kcov_put(kcov); + return 0; + default: + return -ENOTTY; + } +} + +static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) +{ + struct kcov *kcov; + int res; + + kcov = filep->private_data; + spin_lock(&kcov->lock); + res = kcov_ioctl_locked(kcov, cmd, arg); + spin_unlock(&kcov->lock); + return res; +} + +static const struct file_operations kcov_fops = { + .open = kcov_open, + .unlocked_ioctl = kcov_ioctl, + .mmap = kcov_mmap, + .release = kcov_close, +}; + +static int __init kcov_init(void) +{ + if (!debugfs_create_file("kcov", 0600, NULL, NULL, &kcov_fops)) { + pr_err("failed to create kcov in debugfs\n"); + return -ENOMEM; + } + return 0; +} + +device_initcall(kcov_init); diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile index 8e96f6cc2a4a..31322a4275cd 100644 --- a/kernel/locking/Makefile +++ b/kernel/locking/Makefile @@ -1,3 +1,6 @@ +# Any varying coverage in these files is non-deterministic +# and is generally not a function of system call inputs. +KCOV_INSTRUMENT := n obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o diff --git a/kernel/rcu/Makefile b/kernel/rcu/Makefile index 61a16569ffbf..032b2c015beb 100644 --- a/kernel/rcu/Makefile +++ b/kernel/rcu/Makefile @@ -1,3 +1,7 @@ +# Any varying coverage in these files is non-deterministic +# and is generally not a function of system call inputs. +KCOV_INSTRUMENT := n + obj-y += update.o sync.o obj-$(CONFIG_SRCU) += srcu.o obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile index 302d6ebd64f7..414d9c16da42 100644 --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile @@ -2,6 +2,10 @@ ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_clock.o = $(CC_FLAGS_FTRACE) endif +# These files are disabled because they produce non-interesting flaky coverage +# that is not a function of syscall inputs. E.g. involuntary context switches. +KCOV_INSTRUMENT := n + ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) # According to Alan Modra , the -fno-omit-frame-pointer is # needed for x86 only. Why this used to be enabled for all architectures is beyond diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 5a60f45cd9bb..532d4d52d1df 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -696,6 +696,27 @@ source "lib/Kconfig.kasan" endmenu # "Memory Debugging" +config ARCH_HAS_KCOV + bool + help + KCOV does not have any arch-specific code, but currently it is enabled + only for x86_64. KCOV requires testing on other archs, and most likely + disabling of instrumentation for some early boot code. + +config KCOV + bool "Code coverage for fuzzing" + depends on ARCH_HAS_KCOV + select DEBUG_FS + help + KCOV exposes kernel code coverage information in a form suitable + for coverage-guided fuzzing (randomized testing). + + If RANDOMIZE_BASE is enabled, PC values will not be stable across + different machines and across reboots. If you need stable PC values, + disable RANDOMIZE_BASE. + + For more details, see Documentation/kcov.txt. + config DEBUG_SHIRQ bool "Debug shared IRQ handlers" depends on DEBUG_KERNEL diff --git a/lib/Makefile b/lib/Makefile index 4962d14c450f..a1de5b61ff40 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -7,6 +7,18 @@ ORIG_CFLAGS := $(KBUILD_CFLAGS) KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS)) endif +# These files are disabled because they produce lots of non-interesting and/or +# flaky coverage that is not a function of syscall inputs. For example, +# rbtree can be global and individual rotations don't correlate with inputs. +KCOV_INSTRUMENT_string.o := n +KCOV_INSTRUMENT_rbtree.o := n +KCOV_INSTRUMENT_list_debug.o := n +KCOV_INSTRUMENT_debugobjects.o := n +KCOV_INSTRUMENT_dynamic_debug.o := n +# Kernel does not boot if we instrument this file as it uses custom calling +# convention (see CONFIG_ARCH_HWEIGHT_CFLAGS). +KCOV_INSTRUMENT_hweight.o := n + lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o dump_stack.o timerqueue.o\ idr.o int_sqrt.o extable.o \ diff --git a/mm/Makefile b/mm/Makefile index 6da300a1414b..f5e797cbd128 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -5,6 +5,21 @@ KASAN_SANITIZE_slab_common.o := n KASAN_SANITIZE_slub.o := n +# These files are disabled because they produce non-interesting and/or +# flaky coverage that is not a function of syscall inputs. E.g. slab is out of +# free pages, or a task is migrated between nodes. +KCOV_INSTRUMENT_slab_common.o := n +KCOV_INSTRUMENT_slob.o := n +KCOV_INSTRUMENT_slab.o := n +KCOV_INSTRUMENT_slub.o := n +KCOV_INSTRUMENT_page_alloc.o := n +KCOV_INSTRUMENT_debug-pagealloc.o := n +KCOV_INSTRUMENT_kmemleak.o := n +KCOV_INSTRUMENT_kmemcheck.o := n +KCOV_INSTRUMENT_memcontrol.o := n +KCOV_INSTRUMENT_mmzone.o := n +KCOV_INSTRUMENT_vmstat.o := n + mmu-y := nommu.o mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \ mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \ diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile index a61460d9f5b0..131daadf40e4 100644 --- a/mm/kasan/Makefile +++ b/mm/kasan/Makefile @@ -1,5 +1,6 @@ KASAN_SANITIZE := n UBSAN_SANITIZE_kasan.o := n +KCOV_INSTRUMENT := n CFLAGS_REMOVE_kasan.o = -pg # Function splitter causes unnecessary splits in __asan_load1/__asan_store1 diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index ad50d5859ac4..ddf83d0181e7 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -136,6 +136,12 @@ _c_flags += $(if $(patsubst n%,, \ $(CFLAGS_UBSAN)) endif +ifeq ($(CONFIG_KCOV),y) +_c_flags += $(if $(patsubst n%,, \ + $(KCOV_INSTRUMENT_$(basetarget).o)$(KCOV_INSTRUMENT)y), \ + $(CFLAGS_KCOV)) +endif + # If building the kernel in a separate objtree expand all occurrences # of -Idir to -I$(srctree)/dir except for absolute paths (starting with '/'). -- cgit v1.3-14-g43fede From 6c31da3464b4d28825d1827ee41a3a217b2dcf0e Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Sat, 19 Mar 2016 17:54:10 +0100 Subject: parisc,metag: Implement CONFIG_DEBUG_STACK_USAGE option On parisc and metag the stack grows upwards, so for those we need to scan the stack downwards in order to calculate how much stack a process has used. Tested on a 64bit parisc kernel. Signed-off-by: Helge Deller --- include/linux/sched.h | 8 ++++++++ lib/Kconfig.debug | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'lib/Kconfig.debug') diff --git a/include/linux/sched.h b/include/linux/sched.h index 34495d2d2d7b..589c4780b077 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2870,10 +2870,18 @@ static inline unsigned long stack_not_used(struct task_struct *p) unsigned long *n = end_of_stack(p); do { /* Skip over canary */ +# ifdef CONFIG_STACK_GROWSUP + n--; +# else n++; +# endif } while (!*n); +# ifdef CONFIG_STACK_GROWSUP + return (unsigned long)end_of_stack(p) - (unsigned long)n; +# else return (unsigned long)n - (unsigned long)end_of_stack(p); +# endif } #endif extern void set_task_stack_end_magic(struct task_struct *tsk); diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 532d4d52d1df..1e9a607534ca 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -558,7 +558,7 @@ config DEBUG_KMEMLEAK_DEFAULT_OFF config DEBUG_STACK_USAGE bool "Stack utilization instrumentation" - depends on DEBUG_KERNEL && !IA64 && !PARISC && !METAG + depends on DEBUG_KERNEL && !IA64 help Enables the display of the minimum amount of free stack which each task has ever had available in the sysrq-T and sysrq-P debug output. -- cgit v1.3-14-g43fede