diff options
Diffstat (limited to 'arch/arm64/kernel/stacktrace.c')
-rw-r--r-- | arch/arm64/kernel/stacktrace.c | 257 |
1 files changed, 116 insertions, 141 deletions
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 94f83cd44e50..634279b3b03d 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -7,53 +7,64 @@ #include <linux/kernel.h> #include <linux/export.h> #include <linux/ftrace.h> -#include <linux/kprobes.h> #include <linux/sched.h> #include <linux/sched/debug.h> #include <linux/sched/task_stack.h> #include <linux/stacktrace.h> #include <asm/irq.h> -#include <asm/pointer_auth.h> #include <asm/stack_pointer.h> #include <asm/stacktrace.h> /* - * AArch64 PCS assigns the frame pointer to x29. + * Start an unwind from a pt_regs. * - * A simple function prologue looks like this: - * sub sp, sp, #0x10 - * stp x29, x30, [sp] - * mov x29, sp + * The unwind will begin at the PC within the regs. * - * A simple function epilogue looks like this: - * mov sp, x29 - * ldp x29, x30, [sp] - * add sp, sp, #0x10 + * The regs must be on a stack currently owned by the calling task. */ +static inline void unwind_init_from_regs(struct unwind_state *state, + struct pt_regs *regs) +{ + unwind_init_common(state, current); + state->fp = regs->regs[29]; + state->pc = regs->pc; +} -void start_backtrace(struct stackframe *frame, unsigned long fp, - unsigned long pc) +/* + * Start an unwind from a caller. + * + * The unwind will begin at the caller of whichever function this is inlined + * into. + * + * The function which invokes this must be noinline. + */ +static __always_inline void unwind_init_from_caller(struct unwind_state *state) { - frame->fp = fp; - frame->pc = pc; -#ifdef CONFIG_KRETPROBES - frame->kr_cur = NULL; -#endif + unwind_init_common(state, current); + + state->fp = (unsigned long)__builtin_frame_address(1); + state->pc = (unsigned long)__builtin_return_address(0); +} + +/* + * Start an unwind from a blocked task. + * + * The unwind will begin at the blocked tasks saved PC (i.e. the caller of + * cpu_switch_to()). + * + * The caller should ensure the task is blocked in cpu_switch_to() for the + * duration of the unwind, or the unwind will be bogus. It is never valid to + * call this for the current task. + */ +static inline void unwind_init_from_task(struct unwind_state *state, + struct task_struct *task) +{ + unwind_init_common(state, task); - /* - * Prime the first unwind. - * - * In unwind_frame() we'll check that the FP points to a valid stack, - * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be - * treated as a transition to whichever stack that happens to be. The - * prev_fp value won't be used, but we set it to 0 such that it is - * definitely not an accessible stack address. - */ - bitmap_zero(frame->stacks_done, __NR_STACK_TYPES); - frame->prev_fp = 0; - frame->prev_type = STACK_TYPE_UNKNOWN; + state->fp = thread_saved_fp(task); + state->pc = thread_saved_pc(task); } /* @@ -63,61 +74,25 @@ void start_backtrace(struct stackframe *frame, unsigned long fp, * records (e.g. a cycle), determined based on the location and fp value of A * and the location (but not the fp value) of B. */ -int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) +static int notrace unwind_next(struct unwind_state *state) { - unsigned long fp = frame->fp; - struct stack_info info; - - if (!tsk) - tsk = current; + struct task_struct *tsk = state->task; + unsigned long fp = state->fp; + int err; /* Final frame; nothing to unwind */ if (fp == (unsigned long)task_pt_regs(tsk)->stackframe) return -ENOENT; - if (fp & 0x7) - return -EINVAL; - - if (!on_accessible_stack(tsk, fp, 16, &info)) - return -EINVAL; - - if (test_bit(info.type, frame->stacks_done)) - return -EINVAL; - - /* - * As stacks grow downward, any valid record on the same stack must be - * at a strictly higher address than the prior record. - * - * Stacks can nest in several valid orders, e.g. - * - * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL - * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW - * - * ... but the nesting itself is strict. Once we transition from one - * stack to another, it's never valid to unwind back to that first - * stack. - */ - if (info.type == frame->prev_type) { - if (fp <= frame->prev_fp) - return -EINVAL; - } else { - set_bit(frame->prev_type, frame->stacks_done); - } - - /* - * Record this frame record's values and location. The prev_fp and - * prev_type are only meaningful to the next unwind_frame() invocation. - */ - frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp)); - frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8)); - frame->prev_fp = fp; - frame->prev_type = info.type; + err = unwind_next_frame_record(state); + if (err) + return err; - frame->pc = ptrauth_strip_insn_pac(frame->pc); + state->pc = ptrauth_strip_insn_pac(state->pc); #ifdef CONFIG_FUNCTION_GRAPH_TRACER if (tsk->ret_stack && - (frame->pc == (unsigned long)return_to_handler)) { + (state->pc == (unsigned long)return_to_handler)) { unsigned long orig_pc; /* * This is a case where function graph tracer has @@ -125,55 +100,51 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) * to hook a function return. * So replace it to an original value. */ - orig_pc = ftrace_graph_ret_addr(tsk, NULL, frame->pc, - (void *)frame->fp); - if (WARN_ON_ONCE(frame->pc == orig_pc)) + orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc, + (void *)state->fp); + if (WARN_ON_ONCE(state->pc == orig_pc)) return -EINVAL; - frame->pc = orig_pc; + state->pc = orig_pc; } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #ifdef CONFIG_KRETPROBES - if (is_kretprobe_trampoline(frame->pc)) - frame->pc = kretprobe_find_ret_addr(tsk, (void *)frame->fp, &frame->kr_cur); + if (is_kretprobe_trampoline(state->pc)) + state->pc = kretprobe_find_ret_addr(tsk, (void *)state->fp, &state->kr_cur); #endif return 0; } -NOKPROBE_SYMBOL(unwind_frame); +NOKPROBE_SYMBOL(unwind_next); -void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame, - bool (*fn)(void *, unsigned long), void *data) +static void notrace unwind(struct unwind_state *state, + stack_trace_consume_fn consume_entry, void *cookie) { while (1) { int ret; - if (!fn(data, frame->pc)) + if (!consume_entry(cookie, state->pc)) break; - ret = unwind_frame(tsk, frame); + ret = unwind_next(state); if (ret < 0) break; } } -NOKPROBE_SYMBOL(walk_stackframe); +NOKPROBE_SYMBOL(unwind); -static void dump_backtrace_entry(unsigned long where, const char *loglvl) +static bool dump_backtrace_entry(void *arg, unsigned long where) { + char *loglvl = arg; printk("%s %pSb\n", loglvl, (void *)where); + return true; } void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, const char *loglvl) { - struct stackframe frame; - int skip = 0; - pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); - if (regs) { - if (user_mode(regs)) - return; - skip = 1; - } + if (regs && user_mode(regs)) + return; if (!tsk) tsk = current; @@ -181,36 +152,8 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, if (!try_get_task_stack(tsk)) return; - if (tsk == current) { - start_backtrace(&frame, - (unsigned long)__builtin_frame_address(0), - (unsigned long)dump_backtrace); - } else { - /* - * task blocked in __switch_to - */ - start_backtrace(&frame, - thread_saved_fp(tsk), - thread_saved_pc(tsk)); - } - printk("%sCall trace:\n", loglvl); - do { - /* skip until specified stack frame */ - if (!skip) { - dump_backtrace_entry(frame.pc, loglvl); - } else if (frame.fp == regs->regs[29]) { - skip = 0; - /* - * Mostly, this is the case where this function is - * called in panic/abort. As exception handler's - * stack frame does not contain the corresponding pc - * at which an exception has taken place, use regs->pc - * instead. - */ - dump_backtrace_entry(regs->pc, loglvl); - } - } while (!unwind_frame(tsk, &frame)); + arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs); put_task_stack(tsk); } @@ -221,25 +164,57 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) barrier(); } -#ifdef CONFIG_STACKTRACE +/* + * Per-cpu stacks are only accessible when unwinding the current task in a + * non-preemptible context. + */ +#define STACKINFO_CPU(name) \ + ({ \ + ((task == current) && !preemptible()) \ + ? stackinfo_get_##name() \ + : stackinfo_get_unknown(); \ + }) + +/* + * SDEI stacks are only accessible when unwinding the current task in an NMI + * context. + */ +#define STACKINFO_SDEI(name) \ + ({ \ + ((task == current) && in_nmi()) \ + ? stackinfo_get_sdei_##name() \ + : stackinfo_get_unknown(); \ + }) noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, struct task_struct *task, struct pt_regs *regs) { - struct stackframe frame; - - if (regs) - start_backtrace(&frame, regs->regs[29], regs->pc); - else if (task == current) - start_backtrace(&frame, - (unsigned long)__builtin_frame_address(1), - (unsigned long)__builtin_return_address(0)); - else - start_backtrace(&frame, thread_saved_fp(task), - thread_saved_pc(task)); - - walk_stackframe(task, &frame, consume_entry, cookie); -} - + struct stack_info stacks[] = { + stackinfo_get_task(task), + STACKINFO_CPU(irq), +#if defined(CONFIG_VMAP_STACK) + STACKINFO_CPU(overflow), +#endif +#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE) + STACKINFO_SDEI(normal), + STACKINFO_SDEI(critical), #endif + }; + struct unwind_state state = { + .stacks = stacks, + .nr_stacks = ARRAY_SIZE(stacks), + }; + + if (regs) { + if (task != current) + return; + unwind_init_from_regs(&state, regs); + } else if (task == current) { + unwind_init_from_caller(&state); + } else { + unwind_init_from_task(&state, task); + } + + unwind(&state, consume_entry, cookie); +} |