1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Kernel Probes Jump Optimization (Optprobes)
5 * Copyright (C) IBM Corporation, 2002, 2004
6 * Copyright (C) Hitachi Ltd., 2012
8 #include <linux/kprobes.h>
9 #include <linux/perf_event.h>
10 #include <linux/ptrace.h>
11 #include <linux/string.h>
12 #include <linux/slab.h>
13 #include <linux/hardirq.h>
14 #include <linux/preempt.h>
15 #include <linux/extable.h>
16 #include <linux/kdebug.h>
17 #include <linux/kallsyms.h>
18 #include <linux/kgdb.h>
19 #include <linux/ftrace.h>
20 #include <linux/objtool.h>
21 #include <linux/pgtable.h>
22 #include <linux/static_call.h>
24 #include <asm/text-patching.h>
25 #include <asm/cacheflush.h>
27 #include <linux/uaccess.h>
28 #include <asm/alternative.h>
30 #include <asm/debugreg.h>
31 #include <asm/set_memory.h>
32 #include <asm/sections.h>
33 #include <asm/nospec-branch.h>
37 unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
39 struct optimized_kprobe *op;
44 for (i = 0; i < JMP32_INSN_SIZE; i++) {
45 kp = get_kprobe((void *)addr - i);
46 /* This function only handles jump-optimized kprobe */
47 if (kp && kprobe_optimized(kp)) {
48 op = container_of(kp, struct optimized_kprobe, kp);
49 /* If op is optimized or under unoptimizing */
50 if (list_empty(&op->list) || optprobe_queued_unopt(op))
58 * If the kprobe can be optimized, original bytes which can be
59 * overwritten by jump destination address. In this case, original
60 * bytes must be recovered from op->optinsn.copied_insn buffer.
62 if (copy_from_kernel_nofault(buf, (void *)addr,
63 MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
66 if (addr == (unsigned long)kp->addr) {
68 memcpy(buf + 1, op->optinsn.copied_insn, DISP32_SIZE);
70 offs = addr - (unsigned long)kp->addr - 1;
71 memcpy(buf, op->optinsn.copied_insn + offs, DISP32_SIZE - offs);
74 return (unsigned long)buf;
77 static void synthesize_clac(kprobe_opcode_t *addr)
80 * Can't be static_cpu_has() due to how objtool treats this feature bit.
81 * This isn't a fast path anyway.
83 if (!boot_cpu_has(X86_FEATURE_SMAP))
86 /* Replace the NOP3 with CLAC */
92 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
93 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
101 *(unsigned long *)addr = val;
105 ".pushsection .rodata\n"
106 "optprobe_template_func:\n"
107 ".global optprobe_template_entry\n"
108 "optprobe_template_entry:\n"
110 " pushq $" __stringify(__KERNEL_DS) "\n"
111 /* Save the 'sp - 8', this will be fixed later. */
114 ".global optprobe_template_clac\n"
115 "optprobe_template_clac:\n"
119 ".global optprobe_template_val\n"
120 "optprobe_template_val:\n"
123 ".global optprobe_template_call\n"
124 "optprobe_template_call:\n"
126 /* Copy 'regs->flags' into 'regs->ss'. */
127 " movq 18*8(%rsp), %rdx\n"
128 " movq %rdx, 20*8(%rsp)\n"
130 /* Skip 'regs->flags' and 'regs->sp'. */
132 /* And pop flags register from 'regs->ss'. */
134 #else /* CONFIG_X86_32 */
136 /* Save the 'sp - 4', this will be fixed later. */
139 ".global optprobe_template_clac\n"
140 "optprobe_template_clac:\n"
144 ".global optprobe_template_val\n"
145 "optprobe_template_val:\n"
147 ".global optprobe_template_call\n"
148 "optprobe_template_call:\n"
150 /* Copy 'regs->flags' into 'regs->ss'. */
151 " movl 14*4(%esp), %edx\n"
152 " movl %edx, 16*4(%esp)\n"
154 /* Skip 'regs->flags' and 'regs->sp'. */
156 /* And pop flags register from 'regs->ss'. */
159 ".global optprobe_template_end\n"
160 "optprobe_template_end:\n"
163 void optprobe_template_func(void);
164 STACK_FRAME_NON_STANDARD(optprobe_template_func);
166 #define TMPL_CLAC_IDX \
167 ((long)optprobe_template_clac - (long)optprobe_template_entry)
168 #define TMPL_MOVE_IDX \
169 ((long)optprobe_template_val - (long)optprobe_template_entry)
170 #define TMPL_CALL_IDX \
171 ((long)optprobe_template_call - (long)optprobe_template_entry)
172 #define TMPL_END_IDX \
173 ((long)optprobe_template_end - (long)optprobe_template_entry)
175 /* Optimized kprobe call back function: called from optinsn */
177 optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
179 /* This is possible if op is under delayed unoptimizing */
180 if (kprobe_disabled(&op->kp))
184 if (kprobe_running()) {
185 kprobes_inc_nmissed_count(&op->kp);
187 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
188 /* Adjust stack pointer */
189 regs->sp += sizeof(long);
190 /* Save skipped registers */
191 regs->cs = __KERNEL_CS;
195 regs->ip = (unsigned long)op->kp.addr + INT3_INSN_SIZE;
196 regs->orig_ax = ~0UL;
198 __this_cpu_write(current_kprobe, &op->kp);
199 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
200 opt_pre_handler(&op->kp, regs);
201 __this_cpu_write(current_kprobe, NULL);
205 NOKPROBE_SYMBOL(optimized_callback);
207 static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
212 while (len < JMP32_INSN_SIZE) {
213 ret = __copy_instruction(dest + len, src + len, real + len, &insn);
214 if (!ret || !can_boost(&insn, src + len))
218 /* Check whether the address range is reserved */
219 if (ftrace_text_reserved(src, src + len - 1) ||
220 alternatives_text_reserved(src, src + len - 1) ||
221 jump_label_text_reserved(src, src + len - 1) ||
222 static_call_text_reserved(src, src + len - 1))
228 /* Check whether insn is indirect jump */
229 static int __insn_is_indirect_jump(struct insn *insn)
231 return ((insn->opcode.bytes[0] == 0xff &&
232 (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
233 insn->opcode.bytes[0] == 0xea); /* Segment based jump */
236 /* Check whether insn jumps into specified address range */
237 static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
239 unsigned long target = 0;
241 switch (insn->opcode.bytes[0]) {
242 case 0xe0: /* loopne */
243 case 0xe1: /* loope */
244 case 0xe2: /* loop */
245 case 0xe3: /* jcxz */
246 case 0xe9: /* near relative jump */
247 case 0xeb: /* short relative jump */
250 if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
254 if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
258 target = (unsigned long)insn->next_byte + insn->immediate.value;
260 return (start <= target && target <= start + len);
263 static int insn_is_indirect_jump(struct insn *insn)
265 int ret = __insn_is_indirect_jump(insn);
267 #ifdef CONFIG_RETPOLINE
269 * Jump to x86_indirect_thunk_* is treated as an indirect jump.
270 * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
271 * older gcc may use indirect jump. So we add this check instead of
272 * replace indirect-jump check.
275 ret = insn_jump_into_range(insn,
276 (unsigned long)__indirect_thunk_start,
277 (unsigned long)__indirect_thunk_end -
278 (unsigned long)__indirect_thunk_start);
283 /* Decode whole function to ensure any instructions don't jump into target */
284 static int can_optimize(unsigned long paddr)
286 unsigned long addr, size = 0, offset = 0;
288 kprobe_opcode_t buf[MAX_INSN_SIZE];
290 /* Lookup symbol including addr */
291 if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
295 * Do not optimize in the entry code due to the unstable
296 * stack handling and registers setup.
298 if (((paddr >= (unsigned long)__entry_text_start) &&
299 (paddr < (unsigned long)__entry_text_end)))
302 /* Check there is enough space for a relative jump. */
303 if (size - offset < JMP32_INSN_SIZE)
306 /* Decode instructions */
307 addr = paddr - offset;
308 while (addr < paddr - offset + size) { /* Decode until function end */
309 unsigned long recovered_insn;
312 if (search_exception_tables(addr))
314 * Since some fixup code will jumps into this function,
315 * we can't optimize kprobe in this function.
318 recovered_insn = recover_probed_instruction(buf, addr);
322 ret = insn_decode_kernel(&insn, (void *)recovered_insn);
327 * If there is a dynamically installed kgdb sw breakpoint,
328 * this function should not be probed.
330 if (insn.opcode.bytes[0] == INT3_INSN_OPCODE &&
331 kgdb_has_hit_break(addr))
334 /* Recover address */
335 insn.kaddr = (void *)addr;
336 insn.next_byte = (void *)(addr + insn.length);
337 /* Check any instructions don't jump into target */
338 if (insn_is_indirect_jump(&insn) ||
339 insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE,
348 /* Check optimized_kprobe can actually be optimized. */
349 int arch_check_optimized_kprobe(struct optimized_kprobe *op)
354 for (i = 1; i < op->optinsn.size; i++) {
355 p = get_kprobe(op->kp.addr + i);
356 if (p && !kprobe_disarmed(p))
363 /* Check the addr is within the optimized instructions. */
364 int arch_within_optimized_kprobe(struct optimized_kprobe *op,
365 kprobe_opcode_t *addr)
367 return (op->kp.addr <= addr &&
368 op->kp.addr + op->optinsn.size > addr);
371 /* Free optimized instruction slot */
373 void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
375 u8 *slot = op->optinsn.insn;
377 int len = TMPL_END_IDX + op->optinsn.size + JMP32_INSN_SIZE;
379 /* Record the perf event before freeing the slot */
381 perf_event_text_poke(slot, slot, len, NULL, 0);
383 free_optinsn_slot(slot, dirty);
384 op->optinsn.insn = NULL;
385 op->optinsn.size = 0;
389 void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
391 __arch_remove_optimized_kprobe(op, 1);
395 * Copy replacing target instructions
396 * Target instructions MUST be relocatable (checked inside)
397 * This is called when new aggr(opt)probe is allocated or reused.
399 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
400 struct kprobe *__unused)
402 u8 *buf = NULL, *slot;
406 if (!can_optimize((unsigned long)op->kp.addr))
409 buf = kzalloc(MAX_OPTINSN_SIZE, GFP_KERNEL);
413 op->optinsn.insn = slot = get_optinsn_slot();
420 * Verify if the address gap is in 2GB range, because this uses
423 rel = (long)slot - (long)op->kp.addr + JMP32_INSN_SIZE;
424 if (abs(rel) > 0x7fffffff) {
429 /* Copy arch-dep-instance from template */
430 memcpy(buf, optprobe_template_entry, TMPL_END_IDX);
432 /* Copy instructions into the out-of-line buffer */
433 ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr,
434 slot + TMPL_END_IDX);
437 op->optinsn.size = ret;
438 len = TMPL_END_IDX + op->optinsn.size;
440 synthesize_clac(buf + TMPL_CLAC_IDX);
442 /* Set probe information */
443 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
445 /* Set probe function call */
446 synthesize_relcall(buf + TMPL_CALL_IDX,
447 slot + TMPL_CALL_IDX, optimized_callback);
449 /* Set returning jmp instruction at the tail of out-of-line buffer */
450 synthesize_reljump(buf + len, slot + len,
451 (u8 *)op->kp.addr + op->optinsn.size);
452 len += JMP32_INSN_SIZE;
455 * Note len = TMPL_END_IDX + op->optinsn.size + JMP32_INSN_SIZE is also
456 * used in __arch_remove_optimized_kprobe().
459 /* We have to use text_poke() for instruction buffer because it is RO */
460 perf_event_text_poke(slot, NULL, 0, buf, len);
461 text_poke(slot, buf, len);
469 __arch_remove_optimized_kprobe(op, 0);
474 * Replace breakpoints (INT3) with relative jumps (JMP.d32).
475 * Caller must call with locking kprobe_mutex and text_mutex.
477 * The caller will have installed a regular kprobe and after that issued
478 * syncrhonize_rcu_tasks(), this ensures that the instruction(s) that live in
479 * the 4 bytes after the INT3 are unused and can now be overwritten.
481 void arch_optimize_kprobes(struct list_head *oplist)
483 struct optimized_kprobe *op, *tmp;
484 u8 insn_buff[JMP32_INSN_SIZE];
486 list_for_each_entry_safe(op, tmp, oplist, list) {
487 s32 rel = (s32)((long)op->optinsn.insn -
488 ((long)op->kp.addr + JMP32_INSN_SIZE));
490 WARN_ON(kprobe_disabled(&op->kp));
492 /* Backup instructions which will be replaced by jump address */
493 memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_INSN_SIZE,
496 insn_buff[0] = JMP32_INSN_OPCODE;
497 *(s32 *)(&insn_buff[1]) = rel;
499 text_poke_bp(op->kp.addr, insn_buff, JMP32_INSN_SIZE, NULL);
501 list_del_init(&op->list);
506 * Replace a relative jump (JMP.d32) with a breakpoint (INT3).
508 * After that, we can restore the 4 bytes after the INT3 to undo what
509 * arch_optimize_kprobes() scribbled. This is safe since those bytes will be
510 * unused once the INT3 lands.
512 void arch_unoptimize_kprobe(struct optimized_kprobe *op)
514 u8 new[JMP32_INSN_SIZE] = { INT3_INSN_OPCODE, };
515 u8 old[JMP32_INSN_SIZE];
516 u8 *addr = op->kp.addr;
518 memcpy(old, op->kp.addr, JMP32_INSN_SIZE);
519 memcpy(new + INT3_INSN_SIZE,
520 op->optinsn.copied_insn,
521 JMP32_INSN_SIZE - INT3_INSN_SIZE);
523 text_poke(addr, new, INT3_INSN_SIZE);
525 text_poke(addr + INT3_INSN_SIZE,
526 new + INT3_INSN_SIZE,
527 JMP32_INSN_SIZE - INT3_INSN_SIZE);
530 perf_event_text_poke(op->kp.addr, old, JMP32_INSN_SIZE, new, JMP32_INSN_SIZE);
534 * Recover original instructions and breakpoints from relative jumps.
535 * Caller must call with locking kprobe_mutex.
537 extern void arch_unoptimize_kprobes(struct list_head *oplist,
538 struct list_head *done_list)
540 struct optimized_kprobe *op, *tmp;
542 list_for_each_entry_safe(op, tmp, oplist, list) {
543 arch_unoptimize_kprobe(op);
544 list_move(&op->list, done_list);
548 int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
550 struct optimized_kprobe *op;
552 if (p->flags & KPROBE_FLAG_OPTIMIZED) {
553 /* This kprobe is really able to run optimized path. */
554 op = container_of(p, struct optimized_kprobe, kp);
555 /* Detour through copied instructions */
556 regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
558 reset_current_kprobe();
563 NOKPROBE_SYMBOL(setup_detour_execution);