aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Axtens <dja@axtens.net>2021-06-14 22:09:07 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2021-06-17 00:09:11 +1000
commitb112fb913b5b5705db22efa90ec60f42518934af (patch)
tree78ce3f3f8d13289e02ee7fc9782ae210abb361c0
parentselftests/powerpc: EBB selftest for MMCR0 control for PMU SPRs in ISA v3.1 (diff)
downloadlinux-dev-b112fb913b5b5705db22efa90ec60f42518934af.tar.xz
linux-dev-b112fb913b5b5705db22efa90ec60f42518934af.zip
powerpc: make stack walking KASAN-safe
Make our stack-walking code KASAN-safe by using __no_sanitize_address. Generic code, arm64, s390 and x86 all make accesses unchecked for similar sorts of reasons: when unwinding a stack, we might touch memory that KASAN has marked as being out-of-bounds. In ppc64 KASAN development, I hit this sometimes when checking for an exception frame - because we're checking an arbitrary offset into the stack frame. See commit 20955746320e ("s390/kasan: avoid false positives during stack unwind"), commit bcaf669b4bdb ("arm64: disable kasan when accessing frame->fp in unwind_frame"), commit 91e08ab0c851 ("x86/dumpstack: Prevent KASAN false positive warnings") and commit 6e22c8366416 ("tracing, kasan: Silence Kasan warning in check_stack of stack_tracer"). Signed-off-by: Daniel Axtens <dja@axtens.net> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20210614120907.1952321-1-dja@axtens.net
-rw-r--r--arch/powerpc/kernel/process.c5
-rw-r--r--arch/powerpc/kernel/stacktrace.c8
-rw-r--r--arch/powerpc/perf/callchain.c2
3 files changed, 8 insertions, 7 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 3626074acec9..4e593fc2c66d 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -2133,8 +2133,9 @@ unsigned long get_wchan(struct task_struct *p)
static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
-void show_stack(struct task_struct *tsk, unsigned long *stack,
- const char *loglvl)
+void __no_sanitize_address show_stack(struct task_struct *tsk,
+ unsigned long *stack,
+ const char *loglvl)
{
unsigned long sp, ip, lr, newsp;
int count = 0;
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
index 1deb1bf331dd..1961e6d5e33b 100644
--- a/arch/powerpc/kernel/stacktrace.c
+++ b/arch/powerpc/kernel/stacktrace.c
@@ -23,8 +23,8 @@
#include <asm/paca.h>
-void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
- struct task_struct *task, struct pt_regs *regs)
+void __no_sanitize_address arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+ struct task_struct *task, struct pt_regs *regs)
{
unsigned long sp;
@@ -61,8 +61,8 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
*
* If the task is not 'current', the caller *must* ensure the task is inactive.
*/
-int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
- void *cookie, struct task_struct *task)
+int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
+ void *cookie, struct task_struct *task)
{
unsigned long sp;
unsigned long newsp;
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index 6c028ee513c0..082f6d0308a4 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -40,7 +40,7 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
return 0;
}
-void
+void __no_sanitize_address
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
unsigned long sp, next_sp;