aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2022-07-25 10:57:26 +0100
committerWill Deacon <will@kernel.org>2022-07-25 10:57:26 +0100
commitcb20311e5ee9118043f85edb5c036792b987f8d0 (patch)
tree76421bcd43fd010ed76a0c8a887a6477d8e382d5 /arch/arm64/kernel
parentMerge branch 'for-next/sme' into for-next/core (diff)
parentarm64: Copy the task argument to unwind_state (diff)
downloadlinux-dev-cb20311e5ee9118043f85edb5c036792b987f8d0.tar.xz
linux-dev-cb20311e5ee9118043f85edb5c036792b987f8d0.zip
Merge branch 'for-next/stacktrace' into for-next/core
* for-next/stacktrace: arm64: Copy the task argument to unwind_state arm64: Split unwind_init() arm64: stacktrace: use non-atomic __set_bit arm64: kasan: do not instrument stacktrace.c
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/Makefile5
-rw-r--r--arch/arm64/kernel/stacktrace.c99
2 files changed, 80 insertions, 24 deletions
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index fa7981d0d917..7075a9c6a4a6 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -14,6 +14,11 @@ CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_syscall.o = -fstack-protector -fstack-protector-strong
CFLAGS_syscall.o += -fno-stack-protector
+# When KASAN is enabled, a stack trace is recorded for every alloc/free, which
+# can significantly impact performance. Avoid instrumenting the stack trace
+# collection code to minimize this impact.
+KASAN_SANITIZE_stacktrace.o := n
+
# It's not safe to invoke KCOV when portions of the kernel environment aren't
# available or are out-of-sync with HW state. Since `noinstr` doesn't always
# inhibit KCOV instrumentation, disable it for the entire compilation unit.
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 0467cb79f080..fcaa151b81f1 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -38,6 +38,8 @@
* @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
* associated with the most recently encountered replacement lr
* value.
+ *
+ * @task: The task being unwound.
*/
struct unwind_state {
unsigned long fp;
@@ -48,13 +50,13 @@ struct unwind_state {
#ifdef CONFIG_KRETPROBES
struct llist_node *kr_cur;
#endif
+ struct task_struct *task;
};
-static notrace void unwind_init(struct unwind_state *state, unsigned long fp,
- unsigned long pc)
+static void unwind_init_common(struct unwind_state *state,
+ struct task_struct *task)
{
- state->fp = fp;
- state->pc = pc;
+ state->task = task;
#ifdef CONFIG_KRETPROBES
state->kr_cur = NULL;
#endif
@@ -72,7 +74,57 @@ static notrace void unwind_init(struct unwind_state *state, unsigned long fp,
state->prev_fp = 0;
state->prev_type = STACK_TYPE_UNKNOWN;
}
-NOKPROBE_SYMBOL(unwind_init);
+
+/*
+ * Start an unwind from a pt_regs.
+ *
+ * The unwind will begin at the PC within the regs.
+ *
+ * The regs must be on a stack currently owned by the calling task.
+ */
+static inline void unwind_init_from_regs(struct unwind_state *state,
+ struct pt_regs *regs)
+{
+ unwind_init_common(state, current);
+
+ state->fp = regs->regs[29];
+ state->pc = regs->pc;
+}
+
+/*
+ * Start an unwind from a caller.
+ *
+ * The unwind will begin at the caller of whichever function this is inlined
+ * into.
+ *
+ * The function which invokes this must be noinline.
+ */
+static __always_inline void unwind_init_from_caller(struct unwind_state *state)
+{
+ unwind_init_common(state, current);
+
+ state->fp = (unsigned long)__builtin_frame_address(1);
+ state->pc = (unsigned long)__builtin_return_address(0);
+}
+
+/*
+ * Start an unwind from a blocked task.
+ *
+ * The unwind will begin at the blocked tasks saved PC (i.e. the caller of
+ * cpu_switch_to()).
+ *
+ * The caller should ensure the task is blocked in cpu_switch_to() for the
+ * duration of the unwind, or the unwind will be bogus. It is never valid to
+ * call this for the current task.
+ */
+static inline void unwind_init_from_task(struct unwind_state *state,
+ struct task_struct *task)
+{
+ unwind_init_common(state, task);
+
+ state->fp = thread_saved_fp(task);
+ state->pc = thread_saved_pc(task);
+}
/*
* Unwind from one frame record (A) to the next frame record (B).
@@ -81,9 +133,9 @@ NOKPROBE_SYMBOL(unwind_init);
* records (e.g. a cycle), determined based on the location and fp value of A
* and the location (but not the fp value) of B.
*/
-static int notrace unwind_next(struct task_struct *tsk,
- struct unwind_state *state)
+static int notrace unwind_next(struct unwind_state *state)
{
+ struct task_struct *tsk = state->task;
unsigned long fp = state->fp;
struct stack_info info;
@@ -117,15 +169,15 @@ static int notrace unwind_next(struct task_struct *tsk,
if (fp <= state->prev_fp)
return -EINVAL;
} else {
- set_bit(state->prev_type, state->stacks_done);
+ __set_bit(state->prev_type, state->stacks_done);
}
/*
* Record this frame record's values and location. The prev_fp and
* prev_type are only meaningful to the next unwind_next() invocation.
*/
- state->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
- state->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
+ state->fp = READ_ONCE(*(unsigned long *)(fp));
+ state->pc = READ_ONCE(*(unsigned long *)(fp + 8));
state->prev_fp = fp;
state->prev_type = info.type;
@@ -157,8 +209,7 @@ static int notrace unwind_next(struct task_struct *tsk,
}
NOKPROBE_SYMBOL(unwind_next);
-static void notrace unwind(struct task_struct *tsk,
- struct unwind_state *state,
+static void notrace unwind(struct unwind_state *state,
stack_trace_consume_fn consume_entry, void *cookie)
{
while (1) {
@@ -166,7 +217,7 @@ static void notrace unwind(struct task_struct *tsk,
if (!consume_entry(cookie, state->pc))
break;
- ret = unwind_next(tsk, state);
+ ret = unwind_next(state);
if (ret < 0)
break;
}
@@ -212,15 +263,15 @@ noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
{
struct unwind_state state;
- if (regs)
- unwind_init(&state, regs->regs[29], regs->pc);
- else if (task == current)
- unwind_init(&state,
- (unsigned long)__builtin_frame_address(1),
- (unsigned long)__builtin_return_address(0));
- else
- unwind_init(&state, thread_saved_fp(task),
- thread_saved_pc(task));
-
- unwind(task, &state, consume_entry, cookie);
+ if (regs) {
+ if (task != current)
+ return;
+ unwind_init_from_regs(&state, regs);
+ } else if (task == current) {
+ unwind_init_from_caller(&state);
+ } else {
+ unwind_init_from_task(&state, task);
+ }
+
+ unwind(&state, consume_entry, cookie);
}