From cae74ba8c295bc41bda749ef27a8f2b3ee957a41 Mon Sep 17 00:00:00 2001 From: Sven Schnelle Date: Fri, 26 Apr 2024 12:02:15 +0200 Subject: s390/ftrace: Use unwinder instead of __builtin_return_address() Using __builtin_return_address(n) might return undefined values when used with values of n outside of the stack. This was noticed when __builtin_return_address() was called in ftrace on top level functions like the interrupt handlers. As this behaviour cannot be fixed, use the s390 stack unwinder and remove the ftrace compilation flags for unwind_bc.c and stacktrace.c to prevent the unwinding function polluting function traces. Another advantage is that this also works with clang. Reviewed-by: Heiko Carstens Signed-off-by: Sven Schnelle Signed-off-by: Alexander Gordeev --- arch/s390/kernel/stacktrace.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'arch/s390/kernel/stacktrace.c') diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index 94f440e38303..7c294da45bf5 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c @@ -101,3 +101,22 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, } pagefault_enable(); } + +unsigned long return_address(unsigned int n) +{ + struct unwind_state state; + unsigned long addr; + + /* Increment to skip current stack entry */ + n++; + + unwind_for_each_frame(&state, NULL, NULL, 0) { + addr = unwind_get_return_address(&state); + if (!addr) + break; + if (!n--) + return addr; + } + return 0; +} +EXPORT_SYMBOL_GPL(return_address); -- cgit v1.2.3-59-g8ed1b From ebd912ff9919a10609511383d94942362234c077 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 29 Apr 2024 14:28:44 +0200 Subject: s390/stacktrace: Merge perf_callchain_user() and arch_stack_walk_user() The two functions perf_callchain_user() and arch_stack_walk_user() are nearly identical. Reduce code duplication and add a common helper which can be called by both functions. Fixes: aa44433ac4ee ("s390: add USER_STACKTRACE support") Reviewed-by: Jens Remus Signed-off-by: Heiko Carstens Signed-off-by: Alexander Gordeev --- arch/s390/include/asm/stacktrace.h | 7 +++++++ arch/s390/kernel/perf_event.c | 34 +--------------------------------- arch/s390/kernel/stacktrace.c | 33 ++++++++++++++++++++++++++++----- 3 files changed, 36 insertions(+), 38 deletions(-) (limited to 'arch/s390/kernel/stacktrace.c') diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h index 433fde85b14e..4aefbe32265d 100644 --- a/arch/s390/include/asm/stacktrace.h +++ b/arch/s390/include/asm/stacktrace.h @@ -2,6 +2,7 @@ #ifndef _ASM_S390_STACKTRACE_H #define _ASM_S390_STACKTRACE_H +#include #include #include @@ -12,6 +13,12 @@ struct stack_frame_user { unsigned long empty2[4]; }; +struct perf_callchain_entry_ctx; + +void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie, + struct perf_callchain_entry_ctx *entry, + const struct pt_regs *regs, bool perf); + enum stack_type { STACK_TYPE_UNKNOWN, STACK_TYPE_TASK, diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c index dfa77da2fd2e..5fff629b1a89 100644 --- a/arch/s390/kernel/perf_event.c +++ b/arch/s390/kernel/perf_event.c @@ -218,39 +218,7 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { - struct stack_frame_user __user *sf; - unsigned long ip, sp; - bool first = true; - - if (is_compat_task()) - return; - perf_callchain_store(entry, instruction_pointer(regs)); - sf = (void __user *)user_stack_pointer(regs); - pagefault_disable(); - while (entry->nr < entry->max_stack) { - if (__get_user(sp, &sf->back_chain)) - break; - if (__get_user(ip, &sf->gprs[8])) - break; - if (ip & 0x1) { - /* - * If the instruction address is invalid, and this - * is the first stack frame, assume r14 has not - * been written to the stack yet. Otherwise exit. - */ - if (first && !(regs->gprs[14] & 0x1)) - ip = regs->gprs[14]; - else - break; - } - perf_callchain_store(entry, ip); - /* Sanity check: ABI requires SP to be aligned 8 bytes. */ - if (!sp || sp & 0x7) - break; - sf = (void __user *)sp; - first = false; - } - pagefault_enable(); + arch_stack_walk_user_common(NULL, NULL, entry, regs, true); } /* Perf definitions for PMU event attributes in sysfs */ diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index 7c294da45bf5..e580d4cd2729 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c @@ -5,6 +5,7 @@ * Copyright IBM Corp. 2006 */ +#include #include #include #include @@ -62,8 +63,23 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, return 0; } -void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, - const struct pt_regs *regs) +static inline bool store_ip(stack_trace_consume_fn consume_entry, void *cookie, + struct perf_callchain_entry_ctx *entry, bool perf, + unsigned long ip) +{ +#ifdef CONFIG_PERF_EVENTS + if (perf) { + if (perf_callchain_store(entry, ip)) + return false; + return true; + } +#endif + return consume_entry(cookie, ip); +} + +void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie, + struct perf_callchain_entry_ctx *entry, + const struct pt_regs *regs, bool perf) { struct stack_frame_user __user *sf; unsigned long ip, sp; @@ -71,7 +87,8 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, if (is_compat_task()) return; - if (!consume_entry(cookie, instruction_pointer(regs))) + ip = instruction_pointer(regs); + if (!store_ip(consume_entry, cookie, entry, perf, ip)) return; sf = (void __user *)user_stack_pointer(regs); pagefault_disable(); @@ -91,8 +108,8 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, else break; } - if (!consume_entry(cookie, ip)) - break; + if (!store_ip(consume_entry, cookie, entry, perf, ip)) + return; /* Sanity check: ABI requires SP to be aligned 8 bytes. */ if (!sp || sp & 0x7) break; @@ -102,6 +119,12 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, pagefault_enable(); } +void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, + const struct pt_regs *regs) +{ + arch_stack_walk_user_common(consume_entry, cookie, NULL, regs, false); +} + unsigned long return_address(unsigned int n) { struct unwind_state state; -- cgit v1.2.3-59-g8ed1b From 87eceb17a987802aeee718be4decd19b56fc8e33 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 29 Apr 2024 14:28:45 +0200 Subject: s390/stacktrace: Skip first user stack frame When walking user stack frames the first stack frame (where the stack pointer points to) should be skipped: the return address of the current function is saved in the previous stack frame, not the current stack frame, which is allocated for to be called functions. Fixes: aa44433ac4ee ("s390: add USER_STACKTRACE support") Reviewed-by: Jens Remus Signed-off-by: Heiko Carstens Signed-off-by: Alexander Gordeev --- arch/s390/kernel/stacktrace.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/s390/kernel/stacktrace.c') diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index e580d4cd2729..1c9e3b7739a2 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c @@ -95,6 +95,10 @@ void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *coo while (1) { if (__get_user(sp, &sf->back_chain)) break; + /* Sanity check: ABI requires SP to be 8 byte aligned. */ + if (!sp || sp & 0x7) + break; + sf = (void __user *)sp; if (__get_user(ip, &sf->gprs[8])) break; if (ip & 0x1) { @@ -110,10 +114,6 @@ void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *coo } if (!store_ip(consume_entry, cookie, entry, perf, ip)) return; - /* Sanity check: ABI requires SP to be aligned 8 bytes. */ - if (!sp || sp & 0x7) - break; - sf = (void __user *)sp; first = false; } pagefault_enable(); -- cgit v1.2.3-59-g8ed1b From cd58109283944ea8bdcd0a8211a86cbd2450716a Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 29 Apr 2024 14:28:46 +0200 Subject: s390/stacktrace: Improve detection of invalid instruction pointers Add basic checks to identify invalid instruction pointers when walking stack frames: Instruction pointers must - have even addresses - be larger than mmap_min_addr - lower than the asce_limit of the process Alternatively it would also be possible to walk page tables similar to fast GUP and verify that the mapping of the corresponding page is executable, however that seems to be overkill. Fixes: aa44433ac4ee ("s390: add USER_STACKTRACE support") Reviewed-by: Jens Remus Signed-off-by: Heiko Carstens Signed-off-by: Alexander Gordeev --- arch/s390/kernel/stacktrace.c | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) (limited to 'arch/s390/kernel/stacktrace.c') diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index 1c9e3b7739a2..b4485b0c7f06 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c @@ -77,6 +77,21 @@ static inline bool store_ip(stack_trace_consume_fn consume_entry, void *cookie, return consume_entry(cookie, ip); } +static inline bool ip_invalid(unsigned long ip) +{ + /* + * Perform some basic checks if an instruction address taken + * from unreliable source is invalid. + */ + if (ip & 1) + return true; + if (ip < mmap_min_addr) + return true; + if (ip >= current->mm->context.asce_limit) + return true; + return false; +} + void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie, struct perf_callchain_entry_ctx *entry, const struct pt_regs *regs, bool perf) @@ -87,6 +102,8 @@ void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *coo if (is_compat_task()) return; + if (!current->mm) + return; ip = instruction_pointer(regs); if (!store_ip(consume_entry, cookie, entry, perf, ip)) return; @@ -101,15 +118,16 @@ void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *coo sf = (void __user *)sp; if (__get_user(ip, &sf->gprs[8])) break; - if (ip & 0x1) { + if (ip_invalid(ip)) { /* * If the instruction address is invalid, and this * is the first stack frame, assume r14 has not * been written to the stack yet. Otherwise exit. */ - if (first && !(regs->gprs[14] & 0x1)) - ip = regs->gprs[14]; - else + if (!first) + break; + ip = regs->gprs[14]; + if (ip_invalid(ip)) break; } if (!store_ip(consume_entry, cookie, entry, perf, ip)) -- cgit v1.2.3-59-g8ed1b From 62b672c4ba90e726cc39b5c3d6dffd1ca817e143 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 29 Apr 2024 14:28:48 +0200 Subject: s390/stackstrace: Detect vdso stack frames Clear the backchain of the extra stack frame added by the vdso user wrapper code. This allows the user stack walker to detect and skip the non-standard stack frame. Without this an incorrect instruction pointer would be added to stack traces, and stack frame walking would be continued with a more or less random back chain. Fixes: aa44433ac4ee ("s390: add USER_STACKTRACE support") Reviewed-by: Jens Remus Signed-off-by: Heiko Carstens Signed-off-by: Alexander Gordeev --- arch/s390/include/asm/processor.h | 1 + arch/s390/kernel/asm-offsets.c | 1 + arch/s390/kernel/stacktrace.c | 28 ++++++++++++++++++++++++---- arch/s390/kernel/vdso.c | 13 +++++++++---- arch/s390/kernel/vdso64/vdso_user_wrapper.S | 1 + 5 files changed, 36 insertions(+), 8 deletions(-) (limited to 'arch/s390/kernel/stacktrace.c') diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index db9982f0e8cd..bbbdc5abe2b2 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -98,6 +98,7 @@ void cpu_detect_mhz_feature(void); extern const struct seq_operations cpuinfo_op; extern void execve_tail(void); +unsigned long vdso_text_size(void); unsigned long vdso_size(void); /* diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 28017c418442..2f65bca2f3f1 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -66,6 +66,7 @@ int main(void) OFFSET(__SF_SIE_CONTROL_PHYS, stack_frame, sie_control_block_phys); DEFINE(STACK_FRAME_OVERHEAD, sizeof(struct stack_frame)); BLANK(); + OFFSET(__SFUSER_BACKCHAIN, stack_frame_user, back_chain); DEFINE(STACK_FRAME_USER_OVERHEAD, sizeof(struct stack_frame_user)); OFFSET(__SFVDSO_RETURN_ADDRESS, stack_frame_vdso_wrapper, return_address); DEFINE(STACK_FRAME_VDSO_OVERHEAD, sizeof(struct stack_frame_vdso_wrapper)); diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index b4485b0c7f06..640363b2a105 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c @@ -92,10 +92,16 @@ static inline bool ip_invalid(unsigned long ip) return false; } +static inline bool ip_within_vdso(unsigned long ip) +{ + return in_range(ip, current->mm->context.vdso_base, vdso_text_size()); +} + void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie, struct perf_callchain_entry_ctx *entry, const struct pt_regs *regs, bool perf) { + struct stack_frame_vdso_wrapper __user *sf_vdso; struct stack_frame_user __user *sf; unsigned long ip, sp; bool first = true; @@ -112,11 +118,25 @@ void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *coo while (1) { if (__get_user(sp, &sf->back_chain)) break; + /* + * VDSO entry code has a non-standard stack frame layout. + * See VDSO user wrapper code for details. + */ + if (!sp && ip_within_vdso(ip)) { + sf_vdso = (void __user *)sf; + if (__get_user(ip, &sf_vdso->return_address)) + break; + sp = (unsigned long)sf + STACK_FRAME_VDSO_OVERHEAD; + sf = (void __user *)sp; + if (__get_user(sp, &sf->back_chain)) + break; + } else { + sf = (void __user *)sp; + if (__get_user(ip, &sf->gprs[8])) + break; + } /* Sanity check: ABI requires SP to be 8 byte aligned. */ - if (!sp || sp & 0x7) - break; - sf = (void __user *)sp; - if (__get_user(ip, &sf->gprs[8])) + if (sp & 0x7) break; if (ip_invalid(ip)) { /* diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index a45b3a4c91db..2f967ac2b8e3 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -210,17 +210,22 @@ static unsigned long vdso_addr(unsigned long start, unsigned long len) return addr; } -unsigned long vdso_size(void) +unsigned long vdso_text_size(void) { - unsigned long size = VVAR_NR_PAGES * PAGE_SIZE; + unsigned long size; if (is_compat_task()) - size += vdso32_end - vdso32_start; + size = vdso32_end - vdso32_start; else - size += vdso64_end - vdso64_start; + size = vdso64_end - vdso64_start; return PAGE_ALIGN(size); } +unsigned long vdso_size(void) +{ + return vdso_text_size() + VVAR_NR_PAGES * PAGE_SIZE; +} + int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { unsigned long addr = VDSO_BASE; diff --git a/arch/s390/kernel/vdso64/vdso_user_wrapper.S b/arch/s390/kernel/vdso64/vdso_user_wrapper.S index deee8ca9cdbf..e26e68675c08 100644 --- a/arch/s390/kernel/vdso64/vdso_user_wrapper.S +++ b/arch/s390/kernel/vdso64/vdso_user_wrapper.S @@ -23,6 +23,7 @@ __kernel_\func: CFI_VAL_OFFSET 15,-STACK_FRAME_USER_OVERHEAD stg %r14,__SFVDSO_RETURN_ADDRESS(%r15) CFI_REL_OFFSET 14,__SFVDSO_RETURN_ADDRESS + xc __SFUSER_BACKCHAIN(8,%r15),__SFUSER_BACKCHAIN(%r15) brasl %r14,__s390_vdso_\func lg %r14,__SFVDSO_RETURN_ADDRESS(%r15) CFI_RESTORE 14 -- cgit v1.2.3-59-g8ed1b