2 * Code for replacing ftrace calls with jumps.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
8 * Added function graph tracer code, taken from x86 that was written
9 * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
13 #include <linux/spinlock.h>
14 #include <linux/hardirq.h>
15 #include <linux/uaccess.h>
16 #include <linux/module.h>
17 #include <linux/ftrace.h>
18 #include <linux/percpu.h>
19 #include <linux/init.h>
20 #include <linux/list.h>
22 #include <asm/cacheflush.h>
23 #include <asm/code-patching.h>
24 #include <asm/ftrace.h>
25 #include <asm/syscall.h>
28 #ifdef CONFIG_DYNAMIC_FTRACE
30 ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
34 addr = ppc_function_entry((void *)addr);
36 /* if (link) set op to 'bl' else 'b' */
37 op = create_branch((unsigned int *)ip, addr, link ? 1 : 0);
43 ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
45 unsigned int replaced;
48 * Note: Due to modules and __init, code can
49 * disappear and change, we need to protect against faulting
50 * as well as code changing. We do this by using the
51 * probe_kernel_* functions.
53 * No real locking needed, this code is run through
54 * kstop_machine, or before SMP starts.
57 /* read the text we want to modify */
58 if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE))
61 /* Make sure it is what we expect it to be */
65 /* replace the text with the new text */
66 if (patch_instruction((unsigned int *)ip, new))
73 * Helper functions that are the same for both PPC64 and PPC32.
75 static int test_24bit_addr(unsigned long ip, unsigned long addr)
78 /* use the create_branch to verify that this offset can be branched */
79 return create_branch((unsigned int *)ip, addr, 0);
84 static int is_bl_op(unsigned int op)
86 return (op & 0xfc000003) == 0x48000001;
89 static unsigned long find_bl_target(unsigned long ip, unsigned int op)
93 offset = (op & 0x03fffffc);
95 if (offset & 0x02000000)
98 return ip + (long)offset;
103 __ftrace_make_nop(struct module *mod,
104 struct dyn_ftrace *rec, unsigned long addr)
109 unsigned long ip = rec->ip;
113 /* read where this goes */
114 if (probe_kernel_read(&op, (void *)ip, sizeof(int)))
117 /* Make sure that that this is still a 24bit jump */
119 printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
123 /* lets find where the pointer goes */
124 tramp = find_bl_target(ip, op);
127 * On PPC64 the trampoline looks like:
128 * 0x3d, 0x82, 0x00, 0x00, addis r12,r2, <high>
129 * 0x39, 0x8c, 0x00, 0x00, addi r12,r12, <low>
130 * Where the bytes 2,3,6 and 7 make up the 32bit offset
131 * to the TOC that holds the pointer.
133 * 0xf8, 0x41, 0x00, 0x28, std r2,40(r1)
134 * 0xe9, 0x6c, 0x00, 0x20, ld r11,32(r12)
135 * The actually address is 32 bytes from the offset
137 * 0xe8, 0x4c, 0x00, 0x28, ld r2,40(r12)
140 pr_devel("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc);
142 /* Find where the trampoline jumps to */
143 if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
144 printk(KERN_ERR "Failed to read %lx\n", tramp);
148 pr_devel(" %08x %08x", jmp[0], jmp[1]);
150 /* verify that this is what we expect it to be */
151 if (((jmp[0] & 0xffff0000) != 0x3d820000) ||
152 ((jmp[1] & 0xffff0000) != 0x398c0000) ||
153 (jmp[2] != 0xf8410028) ||
154 (jmp[3] != 0xe96c0020) ||
155 (jmp[4] != 0xe84c0028)) {
156 printk(KERN_ERR "Not a trampoline\n");
160 /* The bottom half is signed extended */
161 offset = ((unsigned)((unsigned short)jmp[0]) << 16) +
162 (int)((short)jmp[1]);
164 pr_devel(" %x ", offset);
166 /* get the address this jumps too */
167 tramp = mod->arch.toc + offset + 32;
168 pr_devel("toc: %lx", tramp);
170 if (probe_kernel_read(jmp, (void *)tramp, 8)) {
171 printk(KERN_ERR "Failed to read %lx\n", tramp);
175 pr_devel(" %08x %08x\n", jmp[0], jmp[1]);
177 #ifdef __LITTLE_ENDIAN__
178 ptr = ((unsigned long)jmp[1] << 32) + jmp[0];
180 ptr = ((unsigned long)jmp[0] << 32) + jmp[1];
183 /* This should match what was called */
184 if (ptr != ppc_function_entry((void *)addr)) {
185 printk(KERN_ERR "addr does not match %lx\n", ptr);
190 * We want to nop the line, but the next line is
191 * 0xe8, 0x41, 0x00, 0x28 ld r2,40(r1)
192 * This needs to be turned to a nop too.
194 if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE))
197 if (op != 0xe8410028) {
198 printk(KERN_ERR "Next line is not ld! (%08x)\n", op);
203 * Milton Miller pointed out that we can not blindly do nops.
204 * If a task was preempted when calling a trace function,
205 * the nops will remove the way to restore the TOC in r2
206 * and the r2 TOC will get corrupted.
211 * bl <tramp> <==== will be replaced with "b 1f"
215 op = 0x48000008; /* b +8 */
217 if (patch_instruction((unsigned int *)ip, op))
225 __ftrace_make_nop(struct module *mod,
226 struct dyn_ftrace *rec, unsigned long addr)
230 unsigned long ip = rec->ip;
233 if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
236 /* Make sure that that this is still a 24bit jump */
238 printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
242 /* lets find where the pointer goes */
243 tramp = find_bl_target(ip, op);
246 * On PPC32 the trampoline looks like:
247 * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha
248 * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l
249 * 0x7d, 0x89, 0x03, 0xa6 mtctr r12
250 * 0x4e, 0x80, 0x04, 0x20 bctr
253 pr_devel("ip:%lx jumps to %lx", ip, tramp);
255 /* Find where the trampoline jumps to */
256 if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
257 printk(KERN_ERR "Failed to read %lx\n", tramp);
261 pr_devel(" %08x %08x ", jmp[0], jmp[1]);
263 /* verify that this is what we expect it to be */
264 if (((jmp[0] & 0xffff0000) != 0x3d800000) ||
265 ((jmp[1] & 0xffff0000) != 0x398c0000) ||
266 (jmp[2] != 0x7d8903a6) ||
267 (jmp[3] != 0x4e800420)) {
268 printk(KERN_ERR "Not a trampoline\n");
272 tramp = (jmp[1] & 0xffff) |
273 ((jmp[0] & 0xffff) << 16);
277 pr_devel(" %lx ", tramp);
281 "Trampoline location %08lx does not match addr\n",
288 if (patch_instruction((unsigned int *)ip, op))
294 #endif /* CONFIG_MODULES */
296 int ftrace_make_nop(struct module *mod,
297 struct dyn_ftrace *rec, unsigned long addr)
299 unsigned long ip = rec->ip;
300 unsigned int old, new;
303 * If the calling address is more that 24 bits away,
304 * then we had to use a trampoline to make the call.
305 * Otherwise just update the call site.
307 if (test_24bit_addr(ip, addr)) {
309 old = ftrace_call_replace(ip, addr, 1);
311 return ftrace_modify_code(ip, old, new);
314 #ifdef CONFIG_MODULES
316 * Out of range jumps are called from modules.
317 * We should either already have a pointer to the module
318 * or it has been passed in.
320 if (!rec->arch.mod) {
322 printk(KERN_ERR "No module loaded addr=%lx\n",
328 if (mod != rec->arch.mod) {
330 "Record mod %p not equal to passed in mod %p\n",
334 /* nothing to do if mod == rec->arch.mod */
338 return __ftrace_make_nop(mod, rec, addr);
340 /* We should not get here without modules */
342 #endif /* CONFIG_MODULES */
345 #ifdef CONFIG_MODULES
348 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
351 unsigned long ip = rec->ip;
353 /* read where this goes */
354 if (probe_kernel_read(op, (void *)ip, MCOUNT_INSN_SIZE * 2))
358 * It should be pointing to two nops or
361 if (((op[0] != 0x48000008) || (op[1] != 0xe8410028)) &&
362 ((op[0] != PPC_INST_NOP) || (op[1] != PPC_INST_NOP))) {
363 printk(KERN_ERR "Expected NOPs but have %x %x\n", op[0], op[1]);
367 /* If we never set up a trampoline to ftrace_caller, then bail */
368 if (!rec->arch.mod->arch.tramp) {
369 printk(KERN_ERR "No ftrace trampoline\n");
373 /* create the branch to the trampoline */
374 op[0] = create_branch((unsigned int *)ip,
375 rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
377 printk(KERN_ERR "REL24 out of range!\n");
384 pr_devel("write to %lx\n", rec->ip);
386 if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2))
389 flush_icache_range(ip, ip + 8);
395 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
398 unsigned long ip = rec->ip;
400 /* read where this goes */
401 if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
404 /* It should be pointing to a nop */
405 if (op != PPC_INST_NOP) {
406 printk(KERN_ERR "Expected NOP but have %x\n", op);
410 /* If we never set up a trampoline to ftrace_caller, then bail */
411 if (!rec->arch.mod->arch.tramp) {
412 printk(KERN_ERR "No ftrace trampoline\n");
416 /* create the branch to the trampoline */
417 op = create_branch((unsigned int *)ip,
418 rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
420 printk(KERN_ERR "REL24 out of range!\n");
424 pr_devel("write to %lx\n", rec->ip);
426 if (patch_instruction((unsigned int *)ip, op))
431 #endif /* CONFIG_PPC64 */
432 #endif /* CONFIG_MODULES */
434 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
436 unsigned long ip = rec->ip;
437 unsigned int old, new;
440 * If the calling address is more that 24 bits away,
441 * then we had to use a trampoline to make the call.
442 * Otherwise just update the call site.
444 if (test_24bit_addr(ip, addr)) {
447 new = ftrace_call_replace(ip, addr, 1);
448 return ftrace_modify_code(ip, old, new);
451 #ifdef CONFIG_MODULES
453 * Out of range jumps are called from modules.
454 * Being that we are converting from nop, it had better
455 * already have a module defined.
457 if (!rec->arch.mod) {
458 printk(KERN_ERR "No module loaded\n");
462 return __ftrace_make_call(rec, addr);
464 /* We should not get here without modules */
466 #endif /* CONFIG_MODULES */
469 int ftrace_update_ftrace_func(ftrace_func_t func)
471 unsigned long ip = (unsigned long)(&ftrace_call);
472 unsigned int old, new;
475 old = *(unsigned int *)&ftrace_call;
476 new = ftrace_call_replace(ip, (unsigned long)func, 1);
477 ret = ftrace_modify_code(ip, old, new);
482 static int __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
484 unsigned long ftrace_addr = (unsigned long)FTRACE_ADDR;
487 ret = ftrace_update_record(rec, enable);
490 case FTRACE_UPDATE_IGNORE:
492 case FTRACE_UPDATE_MAKE_CALL:
493 return ftrace_make_call(rec, ftrace_addr);
494 case FTRACE_UPDATE_MAKE_NOP:
495 return ftrace_make_nop(NULL, rec, ftrace_addr);
501 void ftrace_replace_code(int enable)
503 struct ftrace_rec_iter *iter;
504 struct dyn_ftrace *rec;
507 for (iter = ftrace_rec_iter_start(); iter;
508 iter = ftrace_rec_iter_next(iter)) {
509 rec = ftrace_rec_iter_record(iter);
510 ret = __ftrace_replace_code(rec, enable);
512 ftrace_bug(ret, rec->ip);
518 void arch_ftrace_update_code(int command)
520 if (command & FTRACE_UPDATE_CALLS)
521 ftrace_replace_code(1);
522 else if (command & FTRACE_DISABLE_CALLS)
523 ftrace_replace_code(0);
525 if (command & FTRACE_UPDATE_TRACE_FUNC)
526 ftrace_update_ftrace_func(ftrace_trace_function);
528 if (command & FTRACE_START_FUNC_RET)
529 ftrace_enable_ftrace_graph_caller();
530 else if (command & FTRACE_STOP_FUNC_RET)
531 ftrace_disable_ftrace_graph_caller();
534 int __init ftrace_dyn_arch_init(void *data)
536 /* caller expects data to be zero */
537 unsigned long *p = data;
543 #endif /* CONFIG_DYNAMIC_FTRACE */
545 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
547 #ifdef CONFIG_DYNAMIC_FTRACE
548 extern void ftrace_graph_call(void);
549 extern void ftrace_graph_stub(void);
551 int ftrace_enable_ftrace_graph_caller(void)
553 unsigned long ip = (unsigned long)(&ftrace_graph_call);
554 unsigned long addr = (unsigned long)(&ftrace_graph_caller);
555 unsigned long stub = (unsigned long)(&ftrace_graph_stub);
556 unsigned int old, new;
558 old = ftrace_call_replace(ip, stub, 0);
559 new = ftrace_call_replace(ip, addr, 0);
561 return ftrace_modify_code(ip, old, new);
564 int ftrace_disable_ftrace_graph_caller(void)
566 unsigned long ip = (unsigned long)(&ftrace_graph_call);
567 unsigned long addr = (unsigned long)(&ftrace_graph_caller);
568 unsigned long stub = (unsigned long)(&ftrace_graph_stub);
569 unsigned int old, new;
571 old = ftrace_call_replace(ip, addr, 0);
572 new = ftrace_call_replace(ip, stub, 0);
574 return ftrace_modify_code(ip, old, new);
576 #endif /* CONFIG_DYNAMIC_FTRACE */
579 extern void mod_return_to_handler(void);
583 * Hook the return address and push it in the stack of return addrs
584 * in current thread info.
586 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
590 struct ftrace_graph_ent trace;
591 unsigned long return_hooker = (unsigned long)&return_to_handler;
593 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
597 /* non core kernel code needs to save and restore the TOC */
598 if (REGION_ID(self_addr) != KERNEL_REGION_ID)
599 return_hooker = (unsigned long)&mod_return_to_handler;
602 return_hooker = ppc_function_entry((void *)return_hooker);
605 * Protect against fault, even if it shouldn't
606 * happen. This tool is too much intrusive to
607 * ignore such a protection.
610 "1: " PPC_LL "%[old], 0(%[parent])\n"
611 "2: " PPC_STL "%[return_hooker], 0(%[parent])\n"
612 " li %[faulted], 0\n"
615 ".section .fixup, \"ax\"\n"
616 "4: li %[faulted], 1\n"
620 ".section __ex_table,\"a\"\n"
626 : [old] "=&r" (old), [faulted] "=r" (faulted)
627 : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
631 if (unlikely(faulted)) {
637 trace.func = self_addr;
638 trace.depth = current->curr_ret_stack + 1;
640 /* Only trace if the calling function expects to */
641 if (!ftrace_graph_entry(&trace)) {
646 if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0) == -EBUSY)
649 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
651 #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64)
652 unsigned long __init arch_syscall_addr(int nr)
654 return sys_call_table[nr*2];
656 #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */