powerpc: make stack walking KASAN-safe
authorDaniel Axtens <dja@axtens.net>
Mon, 14 Jun 2021 12:09:07 +0000 (22:09 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Wed, 16 Jun 2021 14:09:11 +0000 (00:09 +1000)
Make our stack-walking code KASAN-safe by using __no_sanitize_address.
Generic code, arm64, s390 and x86 all make accesses unchecked for similar
sorts of reasons: when unwinding a stack, we might touch memory that KASAN
has marked as being out-of-bounds. In ppc64 KASAN development, I hit this
sometimes when checking for an exception frame - because we're checking
an arbitrary offset into the stack frame.

See commit 20955746320e ("s390/kasan: avoid false positives during stack
unwind"), commit bcaf669b4bdb ("arm64: disable kasan when accessing
frame->fp in unwind_frame"), commit 91e08ab0c851 ("x86/dumpstack:
Prevent KASAN false positive warnings") and commit 6e22c8366416
("tracing, kasan: Silence Kasan warning in check_stack of stack_tracer").

Signed-off-by: Daniel Axtens <dja@axtens.net>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210614120907.1952321-1-dja@axtens.net
arch/powerpc/kernel/process.c
arch/powerpc/kernel/stacktrace.c
arch/powerpc/perf/callchain.c

index 3626074..4e593fc 100644 (file)
@@ -2133,8 +2133,9 @@ unsigned long get_wchan(struct task_struct *p)
 
 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
 
-void show_stack(struct task_struct *tsk, unsigned long *stack,
-               const char *loglvl)
+void __no_sanitize_address show_stack(struct task_struct *tsk,
+                                     unsigned long *stack,
+                                     const char *loglvl)
 {
        unsigned long sp, ip, lr, newsp;
        int count = 0;
index 1deb1bf..1961e6d 100644 (file)
@@ -23,8 +23,8 @@
 
 #include <asm/paca.h>
 
-void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
-                    struct task_struct *task, struct pt_regs *regs)
+void __no_sanitize_address arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+                                          struct task_struct *task, struct pt_regs *regs)
 {
        unsigned long sp;
 
@@ -61,8 +61,8 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
  *
  * If the task is not 'current', the caller *must* ensure the task is inactive.
  */
-int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
-                            void *cookie, struct task_struct *task)
+int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
+                                                  void *cookie, struct task_struct *task)
 {
        unsigned long sp;
        unsigned long newsp;
index 6c028ee..082f6d0 100644 (file)
@@ -40,7 +40,7 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
        return 0;
 }
 
-void
+void __no_sanitize_address
 perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 {
        unsigned long sp, next_sp;