From 4840ce2267f9d887f333d88a037c82c566f84081 Mon Sep 17 00:00:00 2001 From: Xiongwei Song Date: Tue, 29 Jun 2021 21:59:16 +0800 Subject: locking/lockdep: Fix meaningless /proc/lockdep output of lock classes on !CONFIG_PROVE_LOCKING When enabling CONFIG_LOCK_STAT=y, then CONFIG_LOCKDEP=y is forcedly enabled, but CONFIG_PROVE_LOCKING is disabled. We can get output from /proc/lockdep, which currently includes usages of lock classes. But the usages are meaningless, see the output below: / # cat /proc/lockdep all lock classes: ffffffff9af63350 ....: cgroup_mutex ffffffff9af54eb8 ....: (console_sem).lock ffffffff9af54e60 ....: console_lock ffffffff9ae74c38 ....: console_owner_lock ffffffff9ae74c80 ....: console_owner ffffffff9ae66e60 ....: cpu_hotplug_lock Only one usage context for each lock, this is because each usage is only changed in mark_lock() that is in the CONFIG_PROVE_LOCKING=y section, however in the test situation, it's not. The fix is to move the usages reading and seq_print from the !CONFIG_PROVE_LOCKING section to its defined section. Also, locks_after list of lock_class is empty when !CONFIG_PROVE_LOCKING, so do the same thing as what have done for usages of lock classes. With this patch with !CONFIG_PROVE_LOCKING we can get the results below: / # cat /proc/lockdep all lock classes: ffffffff85163290: cgroup_mutex ffffffff85154dd8: (console_sem).lock ffffffff85154d80: console_lock ffffffff85074b58: console_owner_lock ffffffff85074ba0: console_owner ffffffff85066d60: cpu_hotplug_lock ... a class key and the relevant class name each line. Signed-off-by: Xiongwei Song Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Acked-by: Waiman Long Link: https://lore.kernel.org/r/20210629135916.308210-1-sxwjean@me.com --- kernel/locking/lockdep_proc.c | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c index 806978314496..b8d9a050c337 100644 --- a/kernel/locking/lockdep_proc.c +++ b/kernel/locking/lockdep_proc.c @@ -70,26 +70,28 @@ static int l_show(struct seq_file *m, void *v) #ifdef CONFIG_DEBUG_LOCKDEP seq_printf(m, " OPS:%8ld", debug_class_ops_read(class)); #endif -#ifdef CONFIG_PROVE_LOCKING - seq_printf(m, " FD:%5ld", lockdep_count_forward_deps(class)); - seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class)); -#endif + if (IS_ENABLED(CONFIG_PROVE_LOCKING)) { + seq_printf(m, " FD:%5ld", lockdep_count_forward_deps(class)); + seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class)); - get_usage_chars(class, usage); - seq_printf(m, " %s", usage); + get_usage_chars(class, usage); + seq_printf(m, " %s", usage); + } seq_printf(m, ": "); print_name(m, class); seq_puts(m, "\n"); - list_for_each_entry(entry, &class->locks_after, entry) { - if (entry->distance == 1) { - seq_printf(m, " -> [%p] ", entry->class->key); - print_name(m, entry->class); - seq_puts(m, "\n"); + if (IS_ENABLED(CONFIG_PROVE_LOCKING)) { + list_for_each_entry(entry, &class->locks_after, entry) { + if (entry->distance == 1) { + seq_printf(m, " -> [%p] ", entry->class->key); + print_name(m, entry->class); + seq_puts(m, "\n"); + } } + seq_puts(m, "\n"); } - seq_puts(m, "\n"); return 0; } -- cgit v1.2.3-59-g8ed1b From 9e667624c291753b8a5128f620f493d0b5226063 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 28 Jun 2021 13:24:10 +0200 Subject: jump_label: Fix jump_label_text_reserved() vs __init It turns out that jump_label_text_reserved() was reporting __init text as being reserved past the time when the __init text was freed and re-used. For a long time, this resulted in, at worst, not being able to kprobe text that happened to land at the re-used address. However a recent commit e7bf1ba97afd ("jump_label, x86: Emit short JMP") made it a fatal mistake because it now needs to read the instruction in order to determine the conflict -- an instruction that's no longer there. Fixes: 4c3ef6d79328 ("jump label: Add jump_label_text_reserved() to reserve jump points") Reported-by: kernel test robot Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Reviewed-by: Masami Hiramatsu Link: https://lore.kernel.org/r/20210628113045.045141693@infradead.org --- kernel/jump_label.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/kernel/jump_label.c b/kernel/jump_label.c index bdb0681bece8..b156e152d6b4 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -316,14 +316,16 @@ static int addr_conflict(struct jump_entry *entry, void *start, void *end) } static int __jump_label_text_reserved(struct jump_entry *iter_start, - struct jump_entry *iter_stop, void *start, void *end) + struct jump_entry *iter_stop, void *start, void *end, bool init) { struct jump_entry *iter; iter = iter_start; while (iter < iter_stop) { - if (addr_conflict(iter, start, end)) - return 1; + if (init || !jump_entry_is_init(iter)) { + if (addr_conflict(iter, start, end)) + return 1; + } iter++; } @@ -562,7 +564,7 @@ static int __jump_label_mod_text_reserved(void *start, void *end) ret = __jump_label_text_reserved(mod->jump_entries, mod->jump_entries + mod->num_jump_entries, - start, end); + start, end, mod->state == MODULE_STATE_COMING); module_put(mod); @@ -788,8 +790,9 @@ early_initcall(jump_label_init_module); */ int jump_label_text_reserved(void *start, void *end) { + bool init = system_state < SYSTEM_RUNNING; int ret = __jump_label_text_reserved(__start___jump_table, - __stop___jump_table, start, end); + __stop___jump_table, start, end, init); if (ret) return ret; -- cgit v1.2.3-59-g8ed1b From 2bee6d16e4379326b1eea454e68c98b17456769e Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 28 Jun 2021 13:24:11 +0200 Subject: static_call: Fix static_call_text_reserved() vs __init It turns out that static_call_text_reserved() was reporting __init text as being reserved past the time when the __init text was freed and re-used. This is mostly harmless and will at worst result in refusing a kprobe. Fixes: 6333e8f73b83 ("static_call: Avoid kprobes on inline static_call()s") Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Reviewed-by: Masami Hiramatsu Link: https://lore.kernel.org/r/20210628113045.106211657@infradead.org --- kernel/static_call.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/kernel/static_call.c b/kernel/static_call.c index 723fcc9d20db..43ba0b1e0edb 100644 --- a/kernel/static_call.c +++ b/kernel/static_call.c @@ -292,13 +292,15 @@ static int addr_conflict(struct static_call_site *site, void *start, void *end) static int __static_call_text_reserved(struct static_call_site *iter_start, struct static_call_site *iter_stop, - void *start, void *end) + void *start, void *end, bool init) { struct static_call_site *iter = iter_start; while (iter < iter_stop) { - if (addr_conflict(iter, start, end)) - return 1; + if (init || !static_call_is_init(iter)) { + if (addr_conflict(iter, start, end)) + return 1; + } iter++; } @@ -324,7 +326,7 @@ static int __static_call_mod_text_reserved(void *start, void *end) ret = __static_call_text_reserved(mod->static_call_sites, mod->static_call_sites + mod->num_static_call_sites, - start, end); + start, end, mod->state == MODULE_STATE_COMING); module_put(mod); @@ -459,8 +461,9 @@ static inline int __static_call_mod_text_reserved(void *start, void *end) int static_call_text_reserved(void *start, void *end) { + bool init = system_state < SYSTEM_RUNNING; int ret = __static_call_text_reserved(__start_static_call_sites, - __stop_static_call_sites, start, end); + __stop_static_call_sites, start, end, init); if (ret) return ret; -- cgit v1.2.3-59-g8ed1b From fa68bd09fc62240a383c0c601d3349c47db10c34 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 28 Jun 2021 13:24:12 +0200 Subject: kprobe/static_call: Restore missing static_call_text_reserved() Restore two hunks from commit: 6333e8f73b83 ("static_call: Avoid kprobes on inline static_call()s") that went walkabout in a Git merge commit. Fixes: 76d4acf22b48 ("Merge tag 'perf-kprobes-2020-12-14' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip") Signed-off-by: Peter Zijlstra (Intel) Acked-by: Masami Hiramatsu Link: https://lore.kernel.org/r/20210628113045.167127609@infradead.org Signed-off-by: Ingo Molnar --- kernel/kprobes.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index e41385afe79d..069388d26e3c 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -1551,6 +1552,7 @@ static int check_kprobe_address_safe(struct kprobe *p, if (!kernel_text_address((unsigned long) p->addr) || within_kprobe_blacklist((unsigned long) p->addr) || jump_label_text_reserved(p->addr, p->addr) || + static_call_text_reserved(p->addr, p->addr) || find_bug((unsigned long)p->addr)) { ret = -EINVAL; goto out; -- cgit v1.2.3-59-g8ed1b From 7e1088760cfe0bb1fdb1f0bd155bfd52f080683a Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 7 Jul 2021 09:30:32 +0100 Subject: locking/atomic: sparc: Fix arch_cmpxchg64_local() Anatoly reports that since commit: ff5b4f1ed580c59d ("locking/atomic: sparc: move to ARCH_ATOMIC") ... it's possible to reliably trigger an oops by running: stress-ng -v --mmap 1 -t 30s ... which results in a NULL pointer dereference in __split_huge_pmd_locked(). The underlying problem is that commit ff5b4f1ed580c59d left arch_cmpxchg64_local() defined in terms of cmpxchg_local() rather than arch_cmpxchg_local(). In we wrap these with macros which use identically-named variables. When cmpxchg_local() nests inside cmpxchg64_local(), this casues it to use an unitialized variable as the pointer, which can be NULL. This can also be seen in pmdp_establish(), where the compiler can generate the pointer with a `clr` instruction: 0000000000000360 : 360: 9d e3 bf 50 save %sp, -176, %sp 364: fa 5e 80 00 ldx [ %i2 ], %i5 368: 82 10 00 1b mov %i3, %g1 36c: 84 10 20 00 clr %g2 370: c3 f0 90 1d casx [ %g2 ], %i5, %g1 374: 80 a7 40 01 cmp %i5, %g1 378: 32 6f ff fc bne,a %xcc, 368 37c: fa 5e 80 00 ldx [ %i2 ], %i5 380: d0 5e 20 40 ldx [ %i0 + 0x40 ], %o0 384: 96 10 00 1b mov %i3, %o3 388: 94 10 00 1d mov %i5, %o2 38c: 92 10 00 19 mov %i1, %o1 390: 7f ff ff 84 call 1a0 <__set_pmd_acct> 394: b0 10 00 1d mov %i5, %i0 398: 81 cf e0 08 return %i7 + 8 39c: 01 00 00 00 nop This patch fixes the problem by defining arch_cmpxchg64_local() in terms of arch_cmpxchg_local(), avoiding potential shadowing, and resulting in working cmpxchg64_local() and variants, e.g. 0000000000000360 : 360: 9d e3 bf 50 save %sp, -176, %sp 364: fa 5e 80 00 ldx [ %i2 ], %i5 368: 82 10 00 1b mov %i3, %g1 36c: c3 f6 90 1d casx [ %i2 ], %i5, %g1 370: 80 a7 40 01 cmp %i5, %g1 374: 32 6f ff fd bne,a %xcc, 368 378: fa 5e 80 00 ldx [ %i2 ], %i5 37c: d0 5e 20 40 ldx [ %i0 + 0x40 ], %o0 380: 96 10 00 1b mov %i3, %o3 384: 94 10 00 1d mov %i5, %o2 388: 92 10 00 19 mov %i1, %o1 38c: 7f ff ff 85 call 1a0 <__set_pmd_acct> 390: b0 10 00 1d mov %i5, %i0 394: 81 cf e0 08 return %i7 + 8 398: 01 00 00 00 nop 39c: 01 00 00 00 nop Fixes: ff5b4f1ed580c59d ("locking/atomic: sparc: move to ARCH_ATOMIC") Reported-by: Anatoly Pugachev Signed-off-by: Mark Rutland Signed-off-by: Ingo Molnar Tested-by: Anatoly Pugachev Link: https://lore.kernel.org/r/20210707083032.567-1-mark.rutland@arm.com --- arch/sparc/include/asm/cmpxchg_64.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h index 8c39a9981187..12d00a42c0a3 100644 --- a/arch/sparc/include/asm/cmpxchg_64.h +++ b/arch/sparc/include/asm/cmpxchg_64.h @@ -201,7 +201,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, #define arch_cmpxchg64_local(ptr, o, n) \ ({ \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg_local((ptr), (o), (n)); \ + arch_cmpxchg_local((ptr), (o), (n)); \ }) #define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n)) -- cgit v1.2.3-59-g8ed1b