1 // SPDX-License-Identifier: GPL-2.0-only
3 * arch/arm64/kernel/ftrace.c
5 * Copyright (C) 2013 Linaro Limited
6 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
9 #include <linux/ftrace.h>
10 #include <linux/module.h>
11 #include <linux/swab.h>
12 #include <linux/uaccess.h>
14 #include <asm/cacheflush.h>
15 #include <asm/debug-monitors.h>
16 #include <asm/ftrace.h>
18 #include <asm/patching.h>
20 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
26 #define FREGS_OFFSET(n, field) \
29 .offset = offsetof(struct ftrace_regs, field), \
32 static const struct fregs_offset fregs_offsets[] = {
33 FREGS_OFFSET("x0", regs[0]),
34 FREGS_OFFSET("x1", regs[1]),
35 FREGS_OFFSET("x2", regs[2]),
36 FREGS_OFFSET("x3", regs[3]),
37 FREGS_OFFSET("x4", regs[4]),
38 FREGS_OFFSET("x5", regs[5]),
39 FREGS_OFFSET("x6", regs[6]),
40 FREGS_OFFSET("x7", regs[7]),
41 FREGS_OFFSET("x8", regs[8]),
43 FREGS_OFFSET("x29", fp),
44 FREGS_OFFSET("x30", lr),
45 FREGS_OFFSET("lr", lr),
47 FREGS_OFFSET("sp", sp),
48 FREGS_OFFSET("pc", pc),
51 int ftrace_regs_query_register_offset(const char *name)
53 for (int i = 0; i < ARRAY_SIZE(fregs_offsets); i++) {
54 const struct fregs_offset *roff = &fregs_offsets[i];
55 if (!strcmp(roff->name, name))
63 unsigned long ftrace_call_adjust(unsigned long addr)
66 * When using mcount, addr is the address of the mcount call
67 * instruction, and no adjustment is necessary.
69 if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS))
73 * When using patchable-function-entry without pre-function NOPS, addr
74 * is the address of the first NOP after the function entry point.
76 * The compiler has either generated:
78 * addr+00: func: NOP // To be patched to MOV X9, LR
79 * addr+04: NOP // To be patched to BL <caller>
84 * addr+00: func: NOP // To be patched to MOV X9, LR
85 * addr+04: NOP // To be patched to BL <caller>
87 * We must adjust addr to the address of the NOP which will be patched
88 * to `BL <caller>`, which is at `addr + 4` bytes in either case.
91 if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS))
92 return addr + AARCH64_INSN_SIZE;
95 * When using patchable-function-entry with pre-function NOPs, addr is
96 * the address of the first pre-function NOP.
98 * Starting from an 8-byte aligned base, the compiler has either
101 * addr+00: NOP // Literal (first 32 bits)
102 * addr+04: NOP // Literal (last 32 bits)
103 * addr+08: func: NOP // To be patched to MOV X9, LR
104 * addr+12: NOP // To be patched to BL <caller>
108 * addr+00: NOP // Literal (first 32 bits)
109 * addr+04: NOP // Literal (last 32 bits)
110 * addr+08: func: BTI C
111 * addr+12: NOP // To be patched to MOV X9, LR
112 * addr+16: NOP // To be patched to BL <caller>
114 * We must adjust addr to the address of the NOP which will be patched
115 * to `BL <caller>`, which is at either addr+12 or addr+16 depending on
116 * whether there is a BTI.
119 if (!IS_ALIGNED(addr, sizeof(unsigned long))) {
120 WARN_RATELIMIT(1, "Misaligned patch-site %pS\n",
125 /* Skip the NOPs placed before the function entry point */
126 addr += 2 * AARCH64_INSN_SIZE;
129 if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)) {
130 u32 insn = le32_to_cpu(*(__le32 *)addr);
132 if (aarch64_insn_is_bti(insn)) {
133 addr += AARCH64_INSN_SIZE;
134 } else if (insn != aarch64_insn_gen_nop()) {
135 WARN_RATELIMIT(1, "unexpected insn in patch-site %pS: 0x%08x\n",
140 /* Skip the first NOP after function entry */
141 addr += AARCH64_INSN_SIZE;
147 * Replace a single instruction, which may be a branch or NOP.
148 * If @validate == true, a replaced instruction is checked against 'old'.
150 static int ftrace_modify_code(unsigned long pc, u32 old, u32 new,
157 * We are paranoid about modifying text, as if a bug were to happen, it
158 * could cause us to read or write to someplace that could cause harm.
159 * Carefully read and modify the code with aarch64_insn_*() which uses
160 * probe_kernel_*(), and make sure what we read is what we expected it
161 * to be before modifying it.
164 if (aarch64_insn_read((void *)pc, &replaced))
170 if (aarch64_insn_patch_text_nosync((void *)pc, new))
177 * Replace tracer function in ftrace_caller()
179 int ftrace_update_ftrace_func(ftrace_func_t func)
185 * When using CALL_OPS, the function to call is associated with the
186 * call site, and we don't have a global function pointer to update.
188 if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS))
191 pc = (unsigned long)ftrace_call;
192 new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func,
193 AARCH64_INSN_BRANCH_LINK);
195 return ftrace_modify_code(pc, 0, new, false);
198 static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
200 #ifdef CONFIG_ARM64_MODULE_PLTS
201 struct plt_entry *plt = mod->arch.ftrace_trampolines;
203 if (addr == FTRACE_ADDR)
204 return &plt[FTRACE_PLT_IDX];
210 * Find the address the callsite must branch to in order to reach '*addr'.
212 * Due to the limited range of 'BL' instructions, modules may be placed too far
213 * away to branch directly and must use a PLT.
215 * Returns true when '*addr' contains a reachable target address, or has been
216 * modified to contain a PLT address. Returns false otherwise.
218 static bool ftrace_find_callable_addr(struct dyn_ftrace *rec,
222 unsigned long pc = rec->ip;
223 long offset = (long)*addr - (long)pc;
224 struct plt_entry *plt;
227 * When the target is within range of the 'BL' instruction, use 'addr'
228 * as-is and branch to that directly.
230 if (offset >= -SZ_128M && offset < SZ_128M)
234 * When the target is outside of the range of a 'BL' instruction, we
235 * must use a PLT to reach it. We can only place PLTs for modules, and
236 * only when module PLT support is built-in.
238 if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
242 * 'mod' is only set at module load time, but if we end up
243 * dealing with an out-of-range condition, we can assume it
244 * is due to a module being loaded far away from the kernel.
246 * NOTE: __module_text_address() must be called with preemption
247 * disabled, but we can rely on ftrace_lock to ensure that 'mod'
248 * retains its validity throughout the remainder of this code.
252 mod = __module_text_address(pc);
259 plt = get_ftrace_plt(mod, *addr);
261 pr_err("ftrace: no module PLT for %ps\n", (void *)*addr);
265 *addr = (unsigned long)plt;
269 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
270 static const struct ftrace_ops *arm64_rec_get_ops(struct dyn_ftrace *rec)
272 const struct ftrace_ops *ops = NULL;
274 if (rec->flags & FTRACE_FL_CALL_OPS_EN) {
275 ops = ftrace_find_unique_ops(rec);
280 ops = &ftrace_list_ops;
285 static int ftrace_rec_set_ops(const struct dyn_ftrace *rec,
286 const struct ftrace_ops *ops)
288 unsigned long literal = ALIGN_DOWN(rec->ip - 12, 8);
289 return aarch64_insn_write_literal_u64((void *)literal,
293 static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec)
295 return ftrace_rec_set_ops(rec, &ftrace_nop_ops);
298 static int ftrace_rec_update_ops(struct dyn_ftrace *rec)
300 return ftrace_rec_set_ops(rec, arm64_rec_get_ops(rec));
303 static int ftrace_rec_set_nop_ops(struct dyn_ftrace *rec) { return 0; }
304 static int ftrace_rec_update_ops(struct dyn_ftrace *rec) { return 0; }
308 * Turn on the call to ftrace_caller() in instrumented function
310 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
312 unsigned long pc = rec->ip;
316 ret = ftrace_rec_update_ops(rec);
320 if (!ftrace_find_callable_addr(rec, NULL, &addr))
323 old = aarch64_insn_gen_nop();
324 new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
326 return ftrace_modify_code(pc, old, new, true);
329 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
330 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
333 if (WARN_ON_ONCE(old_addr != (unsigned long)ftrace_caller))
335 if (WARN_ON_ONCE(addr != (unsigned long)ftrace_caller))
338 return ftrace_rec_update_ops(rec);
342 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
344 * The compiler has inserted two NOPs before the regular function prologue.
345 * All instrumented functions follow the AAPCS, so x0-x8 and x19-x30 are live,
346 * and x9-x18 are free for our use.
348 * At runtime we want to be able to swing a single NOP <-> BL to enable or
349 * disable the ftrace call. The BL requires us to save the original LR value,
350 * so here we insert a <MOV X9, LR> over the first NOP so the instructions
351 * before the regular prologue are:
353 * | Compiled | Disabled | Enabled |
354 * +----------+------------+------------+
355 * | NOP | MOV X9, LR | MOV X9, LR |
356 * | NOP | NOP | BL <entry> |
358 * The LR value will be recovered by ftrace_caller, and restored into LR
359 * before returning to the regular function prologue. When a function is not
360 * being traced, the MOV is not harmful given x9 is not live per the AAPCS.
362 * Note: ftrace_process_locs() has pre-adjusted rec->ip to be the address of
365 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
367 unsigned long pc = rec->ip - AARCH64_INSN_SIZE;
371 ret = ftrace_rec_set_nop_ops(rec);
375 old = aarch64_insn_gen_nop();
376 new = aarch64_insn_gen_move_reg(AARCH64_INSN_REG_9,
378 AARCH64_INSN_VARIANT_64BIT);
379 return ftrace_modify_code(pc, old, new, true);
384 * Turn off the call to ftrace_caller() in instrumented function
386 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
389 unsigned long pc = rec->ip;
393 new = aarch64_insn_gen_nop();
395 ret = ftrace_rec_set_nop_ops(rec);
400 * When using mcount, callsites in modules may have been initalized to
401 * call an arbitrary module PLT (which redirects to the _mcount stub)
402 * rather than the ftrace PLT we'll use at runtime (which redirects to
403 * the ftrace trampoline). We can ignore the old PLT when initializing
406 * Note: 'mod' is only set at module load time.
408 if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS) &&
409 IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && mod) {
410 return aarch64_insn_patch_text_nosync((void *)pc, new);
413 if (!ftrace_find_callable_addr(rec, mod, &addr))
416 old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
418 return ftrace_modify_code(pc, old, new, true);
421 void arch_ftrace_update_code(int command)
423 command |= FTRACE_MAY_SLEEP;
424 ftrace_modify_all_code(command);
427 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
429 * function_graph tracer expects ftrace_return_to_handler() to be called
430 * on the way back to parent. For this purpose, this function is called
431 * in _mcount() or ftrace_caller() to replace return address (*parent) on
432 * the call stack to return_to_handler.
434 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
435 unsigned long frame_pointer)
437 unsigned long return_hooker = (unsigned long)&return_to_handler;
440 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
445 * No protection against faulting at *parent, which may be seen
446 * on other archs. It's unlikely on AArch64.
450 if (!function_graph_enter(old, self_addr, frame_pointer,
451 (void *)frame_pointer)) {
452 *parent = return_hooker;
456 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
457 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
458 struct ftrace_ops *op, struct ftrace_regs *fregs)
460 prepare_ftrace_return(ip, &fregs->lr, fregs->fp);
464 * Turn on/off the call to ftrace_graph_caller() in ftrace_caller()
465 * depending on @enable.
467 static int ftrace_modify_graph_caller(bool enable)
469 unsigned long pc = (unsigned long)&ftrace_graph_call;
472 branch = aarch64_insn_gen_branch_imm(pc,
473 (unsigned long)ftrace_graph_caller,
474 AARCH64_INSN_BRANCH_NOLINK);
475 nop = aarch64_insn_gen_nop();
478 return ftrace_modify_code(pc, nop, branch, true);
480 return ftrace_modify_code(pc, branch, nop, true);
483 int ftrace_enable_ftrace_graph_caller(void)
485 return ftrace_modify_graph_caller(true);
488 int ftrace_disable_ftrace_graph_caller(void)
490 return ftrace_modify_graph_caller(false);
492 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
493 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */