arm64: prep stack walkers for THREAD_INFO_IN_TASK
authorMark Rutland <mark.rutland@arm.com>
Thu, 3 Nov 2016 20:23:08 +0000 (20:23 +0000)
committerCatalin Marinas <catalin.marinas@arm.com>
Fri, 11 Nov 2016 18:25:44 +0000 (18:25 +0000)
When CONFIG_THREAD_INFO_IN_TASK is selected, task stacks may be freed
before a task is destroyed. To account for this, the stacks are
refcounted, and when manipulating the stack of another task, it is
necessary to get/put the stack to ensure it isn't freed and/or re-used
while we do so.

This patch reworks the arm64 stack walking code to account for this.
When CONFIG_THREAD_INFO_IN_TASK is not selected these perform no
refcounting, and this should only be a structural change that does not
affect behaviour.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Tested-by: Laura Abbott <labbott@redhat.com>
Cc: AKASHI Takahiro <takahiro.akashi@linaro.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: James Morse <james.morse@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/kernel/process.c
arch/arm64/kernel/stacktrace.c
arch/arm64/kernel/traps.c

index 01753cd..ec7b9c0 100644 (file)
@@ -350,27 +350,35 @@ struct task_struct *__switch_to(struct task_struct *prev,
 unsigned long get_wchan(struct task_struct *p)
 {
        struct stackframe frame;
-       unsigned long stack_page;
+       unsigned long stack_page, ret = 0;
        int count = 0;
        if (!p || p == current || p->state == TASK_RUNNING)
                return 0;
 
+       stack_page = (unsigned long)try_get_task_stack(p);
+       if (!stack_page)
+               return 0;
+
        frame.fp = thread_saved_fp(p);
        frame.sp = thread_saved_sp(p);
        frame.pc = thread_saved_pc(p);
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
        frame.graph = p->curr_ret_stack;
 #endif
-       stack_page = (unsigned long)task_stack_page(p);
        do {
                if (frame.sp < stack_page ||
                    frame.sp >= stack_page + THREAD_SIZE ||
                    unwind_frame(p, &frame))
-                       return 0;
-               if (!in_sched_functions(frame.pc))
-                       return frame.pc;
+                       goto out;
+               if (!in_sched_functions(frame.pc)) {
+                       ret = frame.pc;
+                       goto out;
+               }
        } while (count ++ < 16);
-       return 0;
+
+out:
+       put_task_stack(p);
+       return ret;
 }
 
 unsigned long arch_align_stack(unsigned long sp)
index d53f99d..8a552a3 100644 (file)
@@ -181,6 +181,9 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
        struct stack_trace_data data;
        struct stackframe frame;
 
+       if (!try_get_task_stack(tsk))
+               return;
+
        data.trace = trace;
        data.skip = trace->skip;
 
@@ -202,6 +205,8 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
        walk_stackframe(tsk, &frame, save_trace, &data);
        if (trace->nr_entries < trace->max_entries)
                trace->entries[trace->nr_entries++] = ULONG_MAX;
+
+       put_task_stack(tsk);
 }
 
 void save_stack_trace(struct stack_trace *trace)
index 7ac30bf..4731133 100644 (file)
@@ -148,6 +148,9 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
        if (!tsk)
                tsk = current;
 
+       if (!try_get_task_stack(tsk))
+               return;
+
        /*
         * Switching between stacks is valid when tracing current and in
         * non-preemptible context.
@@ -213,6 +216,8 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
                                 stack + sizeof(struct pt_regs));
                }
        }
+
+       put_task_stack(tsk);
 }
 
 void show_stack(struct task_struct *tsk, unsigned long *sp)