1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Common arm64 stack unwinder code.
5 * To implement a new arm64 stack unwinder:
6 * 1) Include this header
8 * 2) Provide implementations for the following functions:
9 * on_overflow_stack(): Returns true if SP is on the overflow
11 * on_accessible_stack(): Returns true is SP is on any accessible
13 * unwind_next(): Performs validation checks on the frame
14 * pointer, and transitions unwind_state
17 * See: arch/arm64/include/asm/stacktrace.h for reference
20 * Copyright (C) 2012 ARM Ltd.
22 #ifndef __ASM_STACKTRACE_COMMON_H
23 #define __ASM_STACKTRACE_COMMON_H
25 #include <linux/bitmap.h>
26 #include <linux/bitops.h>
27 #include <linux/kprobes.h>
28 #include <linux/types.h>
35 STACK_TYPE_SDEI_NORMAL,
36 STACK_TYPE_SDEI_CRITICAL,
48 * A snapshot of a frame record or fp/lr register values, along with some
49 * accounting information necessary for robust unwinding.
51 * @fp: The fp value in the frame record (or the real fp)
52 * @pc: The lr value in the frame record (or the real lr)
54 * @stacks_done: Stacks which have been entirely unwound, for which it is no
55 * longer valid to unwind to.
57 * @prev_fp: The fp that pointed to this frame record, or a synthetic value
58 * of 0. This is used to ensure that within a stack, each
59 * subsequent frame record is at an increasing address.
60 * @prev_type: The type of stack this frame record was on, or a synthetic
61 * value of STACK_TYPE_UNKNOWN. This is used to detect a
62 * transition from one stack to another.
64 * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
65 * associated with the most recently encountered replacement lr
68 * @task: The task being unwound.
73 DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
74 unsigned long prev_fp;
75 enum stack_type prev_type;
76 #ifdef CONFIG_KRETPROBES
77 struct llist_node *kr_cur;
79 struct task_struct *task;
82 static inline bool on_stack(unsigned long sp, unsigned long size,
83 unsigned long low, unsigned long high,
84 enum stack_type type, struct stack_info *info)
89 if (sp < low || sp + size < sp || sp + size > high)
100 static inline void unwind_init_common(struct unwind_state *state,
101 struct task_struct *task)
104 #ifdef CONFIG_KRETPROBES
105 state->kr_cur = NULL;
109 * Prime the first unwind.
111 * In unwind_next() we'll check that the FP points to a valid stack,
112 * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be
113 * treated as a transition to whichever stack that happens to be. The
114 * prev_fp value won't be used, but we set it to 0 such that it is
115 * definitely not an accessible stack address.
117 bitmap_zero(state->stacks_done, __NR_STACK_TYPES);
119 state->prev_type = STACK_TYPE_UNKNOWN;
123 * stack_trace_translate_fp_fn() - Translates a non-kernel frame pointer to
126 * @fp: the frame pointer to be updated to its kernel address.
127 * @type: the stack type associated with frame pointer @fp
129 * Returns true and success and @fp is updated to the corresponding
130 * kernel virtual address; otherwise returns false.
132 typedef bool (*stack_trace_translate_fp_fn)(unsigned long *fp,
133 enum stack_type type);
136 * on_accessible_stack_fn() - Check whether a stack range is on any
137 * of the possible stacks.
139 * @tsk: task whose stack is being unwound
140 * @sp: stack address being checked
141 * @size: size of the stack range being checked
142 * @info: stack unwinding context
144 typedef bool (*on_accessible_stack_fn)(const struct task_struct *tsk,
145 unsigned long sp, unsigned long size,
146 struct stack_info *info);
148 static inline int unwind_next_common(struct unwind_state *state,
149 struct stack_info *info,
150 on_accessible_stack_fn accessible,
151 stack_trace_translate_fp_fn translate_fp)
153 unsigned long fp = state->fp, kern_fp = fp;
154 struct task_struct *tsk = state->task;
159 if (!accessible(tsk, fp, 16, info))
162 if (test_bit(info->type, state->stacks_done))
166 * If fp is not from the current address space perform the necessary
167 * translation before dereferencing it to get the next fp.
169 if (translate_fp && !translate_fp(&kern_fp, info->type))
173 * As stacks grow downward, any valid record on the same stack must be
174 * at a strictly higher address than the prior record.
176 * Stacks can nest in several valid orders, e.g.
178 * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
179 * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
182 * ... but the nesting itself is strict. Once we transition from one
183 * stack to another, it's never valid to unwind back to that first
186 if (info->type == state->prev_type) {
187 if (fp <= state->prev_fp)
190 __set_bit(state->prev_type, state->stacks_done);
194 * Record this frame record's values and location. The prev_fp and
195 * prev_type are only meaningful to the next unwind_next() invocation.
197 state->fp = READ_ONCE(*(unsigned long *)(kern_fp));
198 state->pc = READ_ONCE(*(unsigned long *)(kern_fp + 8));
200 state->prev_type = info->type;
205 #endif /* __ASM_STACKTRACE_COMMON_H */