1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 ARM Ltd.
5 #ifndef __ASM_STACKTRACE_H
6 #define __ASM_STACKTRACE_H
8 #include <linux/percpu.h>
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/types.h>
13 #include <asm/memory.h>
14 #include <asm/ptrace.h>
22 STACK_TYPE_SDEI_NORMAL,
23 STACK_TYPE_SDEI_CRITICAL,
34 * A snapshot of a frame record or fp/lr register values, along with some
35 * accounting information necessary for robust unwinding.
37 * @fp: The fp value in the frame record (or the real fp)
38 * @pc: The lr value in the frame record (or the real lr)
40 * @stacks_done: Stacks which have been entirely unwound, for which it is no
41 * longer valid to unwind to.
43 * @prev_fp: The fp that pointed to this frame record, or a synthetic value
44 * of 0. This is used to ensure that within a stack, each
45 * subsequent frame record is at an increasing address.
46 * @prev_type: The type of stack this frame record was on, or a synthetic
47 * value of STACK_TYPE_UNKNOWN. This is used to detect a
48 * transition from one stack to another.
50 * @graph: When FUNCTION_GRAPH_TRACER is selected, holds the index of a
51 * replacement lr value in the ftrace graph stack.
56 DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
57 unsigned long prev_fp;
58 enum stack_type prev_type;
59 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
64 extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
65 extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
66 bool (*fn)(void *, unsigned long), void *data);
67 extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
70 DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
72 static inline bool on_stack(unsigned long sp, unsigned long size,
73 unsigned long low, unsigned long high,
74 enum stack_type type, struct stack_info *info)
79 if (sp < low || sp + size < sp || sp + size > high)
90 static inline bool on_irq_stack(unsigned long sp, unsigned long size,
91 struct stack_info *info)
93 unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
94 unsigned long high = low + IRQ_STACK_SIZE;
96 return on_stack(sp, size, low, high, STACK_TYPE_IRQ, info);
99 static inline bool on_task_stack(const struct task_struct *tsk,
100 unsigned long sp, unsigned long size,
101 struct stack_info *info)
103 unsigned long low = (unsigned long)task_stack_page(tsk);
104 unsigned long high = low + THREAD_SIZE;
106 return on_stack(sp, size, low, high, STACK_TYPE_TASK, info);
109 #ifdef CONFIG_VMAP_STACK
110 DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
112 static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
113 struct stack_info *info)
115 unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
116 unsigned long high = low + OVERFLOW_STACK_SIZE;
118 return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
121 static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
122 struct stack_info *info) { return false; }
127 * We can only safely access per-cpu stacks from current in a non-preemptible
130 static inline bool on_accessible_stack(const struct task_struct *tsk,
131 unsigned long sp, unsigned long size,
132 struct stack_info *info)
135 info->type = STACK_TYPE_UNKNOWN;
137 if (on_task_stack(tsk, sp, size, info))
139 if (tsk != current || preemptible())
141 if (on_irq_stack(sp, size, info))
143 if (on_overflow_stack(sp, size, info))
145 if (on_sdei_stack(sp, size, info))
151 void start_backtrace(struct stackframe *frame, unsigned long fp,
154 #endif /* __ASM_STACKTRACE_H */