mm: reorder includes after introduction of linux/pgtable.h
[platform/kernel/linux-starfive.git] / arch / x86 / kernel / kprobes / opt.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Kernel Probes Jump Optimization (Optprobes)
4  *
5  * Copyright (C) IBM Corporation, 2002, 2004
6  * Copyright (C) Hitachi Ltd., 2012
7  */
8 #include <linux/kprobes.h>
9 #include <linux/ptrace.h>
10 #include <linux/string.h>
11 #include <linux/slab.h>
12 #include <linux/hardirq.h>
13 #include <linux/preempt.h>
14 #include <linux/extable.h>
15 #include <linux/kdebug.h>
16 #include <linux/kallsyms.h>
17 #include <linux/ftrace.h>
18 #include <linux/frame.h>
19 #include <linux/pgtable.h>
20
21 #include <asm/text-patching.h>
22 #include <asm/cacheflush.h>
23 #include <asm/desc.h>
24 #include <linux/uaccess.h>
25 #include <asm/alternative.h>
26 #include <asm/insn.h>
27 #include <asm/debugreg.h>
28 #include <asm/set_memory.h>
29 #include <asm/sections.h>
30 #include <asm/nospec-branch.h>
31
32 #include "common.h"
33
34 unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
35 {
36         struct optimized_kprobe *op;
37         struct kprobe *kp;
38         long offs;
39         int i;
40
41         for (i = 0; i < JMP32_INSN_SIZE; i++) {
42                 kp = get_kprobe((void *)addr - i);
43                 /* This function only handles jump-optimized kprobe */
44                 if (kp && kprobe_optimized(kp)) {
45                         op = container_of(kp, struct optimized_kprobe, kp);
46                         /* If op->list is not empty, op is under optimizing */
47                         if (list_empty(&op->list))
48                                 goto found;
49                 }
50         }
51
52         return addr;
53 found:
54         /*
55          * If the kprobe can be optimized, original bytes which can be
56          * overwritten by jump destination address. In this case, original
57          * bytes must be recovered from op->optinsn.copied_insn buffer.
58          */
59         if (probe_kernel_read(buf, (void *)addr,
60                 MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
61                 return 0UL;
62
63         if (addr == (unsigned long)kp->addr) {
64                 buf[0] = kp->opcode;
65                 memcpy(buf + 1, op->optinsn.copied_insn, DISP32_SIZE);
66         } else {
67                 offs = addr - (unsigned long)kp->addr - 1;
68                 memcpy(buf, op->optinsn.copied_insn + offs, DISP32_SIZE - offs);
69         }
70
71         return (unsigned long)buf;
72 }
73
74 static void synthesize_clac(kprobe_opcode_t *addr)
75 {
76         /*
77          * Can't be static_cpu_has() due to how objtool treats this feature bit.
78          * This isn't a fast path anyway.
79          */
80         if (!boot_cpu_has(X86_FEATURE_SMAP))
81                 return;
82
83         /* Replace the NOP3 with CLAC */
84         addr[0] = 0x0f;
85         addr[1] = 0x01;
86         addr[2] = 0xca;
87 }
88
89 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
90 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
91 {
92 #ifdef CONFIG_X86_64
93         *addr++ = 0x48;
94         *addr++ = 0xbf;
95 #else
96         *addr++ = 0xb8;
97 #endif
98         *(unsigned long *)addr = val;
99 }
100
101 asm (
102                         ".pushsection .rodata\n"
103                         "optprobe_template_func:\n"
104                         ".global optprobe_template_entry\n"
105                         "optprobe_template_entry:\n"
106 #ifdef CONFIG_X86_64
107                         /* We don't bother saving the ss register */
108                         "       pushq %rsp\n"
109                         "       pushfq\n"
110                         ".global optprobe_template_clac\n"
111                         "optprobe_template_clac:\n"
112                         ASM_NOP3
113                         SAVE_REGS_STRING
114                         "       movq %rsp, %rsi\n"
115                         ".global optprobe_template_val\n"
116                         "optprobe_template_val:\n"
117                         ASM_NOP5
118                         ASM_NOP5
119                         ".global optprobe_template_call\n"
120                         "optprobe_template_call:\n"
121                         ASM_NOP5
122                         /* Move flags to rsp */
123                         "       movq 18*8(%rsp), %rdx\n"
124                         "       movq %rdx, 19*8(%rsp)\n"
125                         RESTORE_REGS_STRING
126                         /* Skip flags entry */
127                         "       addq $8, %rsp\n"
128                         "       popfq\n"
129 #else /* CONFIG_X86_32 */
130                         "       pushl %esp\n"
131                         "       pushfl\n"
132                         ".global optprobe_template_clac\n"
133                         "optprobe_template_clac:\n"
134                         ASM_NOP3
135                         SAVE_REGS_STRING
136                         "       movl %esp, %edx\n"
137                         ".global optprobe_template_val\n"
138                         "optprobe_template_val:\n"
139                         ASM_NOP5
140                         ".global optprobe_template_call\n"
141                         "optprobe_template_call:\n"
142                         ASM_NOP5
143                         /* Move flags into esp */
144                         "       movl 14*4(%esp), %edx\n"
145                         "       movl %edx, 15*4(%esp)\n"
146                         RESTORE_REGS_STRING
147                         /* Skip flags entry */
148                         "       addl $4, %esp\n"
149                         "       popfl\n"
150 #endif
151                         ".global optprobe_template_end\n"
152                         "optprobe_template_end:\n"
153                         ".popsection\n");
154
155 void optprobe_template_func(void);
156 STACK_FRAME_NON_STANDARD(optprobe_template_func);
157
158 #define TMPL_CLAC_IDX \
159         ((long)optprobe_template_clac - (long)optprobe_template_entry)
160 #define TMPL_MOVE_IDX \
161         ((long)optprobe_template_val - (long)optprobe_template_entry)
162 #define TMPL_CALL_IDX \
163         ((long)optprobe_template_call - (long)optprobe_template_entry)
164 #define TMPL_END_IDX \
165         ((long)optprobe_template_end - (long)optprobe_template_entry)
166
167 /* Optimized kprobe call back function: called from optinsn */
168 static void
169 optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
170 {
171         /* This is possible if op is under delayed unoptimizing */
172         if (kprobe_disabled(&op->kp))
173                 return;
174
175         preempt_disable();
176         if (kprobe_running()) {
177                 kprobes_inc_nmissed_count(&op->kp);
178         } else {
179                 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
180                 /* Save skipped registers */
181                 regs->cs = __KERNEL_CS;
182 #ifdef CONFIG_X86_32
183                 regs->cs |= get_kernel_rpl();
184                 regs->gs = 0;
185 #endif
186                 regs->ip = (unsigned long)op->kp.addr + INT3_INSN_SIZE;
187                 regs->orig_ax = ~0UL;
188
189                 __this_cpu_write(current_kprobe, &op->kp);
190                 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
191                 opt_pre_handler(&op->kp, regs);
192                 __this_cpu_write(current_kprobe, NULL);
193         }
194         preempt_enable();
195 }
196 NOKPROBE_SYMBOL(optimized_callback);
197
198 static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
199 {
200         struct insn insn;
201         int len = 0, ret;
202
203         while (len < JMP32_INSN_SIZE) {
204                 ret = __copy_instruction(dest + len, src + len, real + len, &insn);
205                 if (!ret || !can_boost(&insn, src + len))
206                         return -EINVAL;
207                 len += ret;
208         }
209         /* Check whether the address range is reserved */
210         if (ftrace_text_reserved(src, src + len - 1) ||
211             alternatives_text_reserved(src, src + len - 1) ||
212             jump_label_text_reserved(src, src + len - 1))
213                 return -EBUSY;
214
215         return len;
216 }
217
218 /* Check whether insn is indirect jump */
219 static int __insn_is_indirect_jump(struct insn *insn)
220 {
221         return ((insn->opcode.bytes[0] == 0xff &&
222                 (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
223                 insn->opcode.bytes[0] == 0xea); /* Segment based jump */
224 }
225
226 /* Check whether insn jumps into specified address range */
227 static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
228 {
229         unsigned long target = 0;
230
231         switch (insn->opcode.bytes[0]) {
232         case 0xe0:      /* loopne */
233         case 0xe1:      /* loope */
234         case 0xe2:      /* loop */
235         case 0xe3:      /* jcxz */
236         case 0xe9:      /* near relative jump */
237         case 0xeb:      /* short relative jump */
238                 break;
239         case 0x0f:
240                 if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
241                         break;
242                 return 0;
243         default:
244                 if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
245                         break;
246                 return 0;
247         }
248         target = (unsigned long)insn->next_byte + insn->immediate.value;
249
250         return (start <= target && target <= start + len);
251 }
252
253 static int insn_is_indirect_jump(struct insn *insn)
254 {
255         int ret = __insn_is_indirect_jump(insn);
256
257 #ifdef CONFIG_RETPOLINE
258         /*
259          * Jump to x86_indirect_thunk_* is treated as an indirect jump.
260          * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
261          * older gcc may use indirect jump. So we add this check instead of
262          * replace indirect-jump check.
263          */
264         if (!ret)
265                 ret = insn_jump_into_range(insn,
266                                 (unsigned long)__indirect_thunk_start,
267                                 (unsigned long)__indirect_thunk_end -
268                                 (unsigned long)__indirect_thunk_start);
269 #endif
270         return ret;
271 }
272
273 /* Decode whole function to ensure any instructions don't jump into target */
274 static int can_optimize(unsigned long paddr)
275 {
276         unsigned long addr, size = 0, offset = 0;
277         struct insn insn;
278         kprobe_opcode_t buf[MAX_INSN_SIZE];
279
280         /* Lookup symbol including addr */
281         if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
282                 return 0;
283
284         /*
285          * Do not optimize in the entry code due to the unstable
286          * stack handling and registers setup.
287          */
288         if (((paddr >= (unsigned long)__entry_text_start) &&
289              (paddr <  (unsigned long)__entry_text_end)) ||
290             ((paddr >= (unsigned long)__irqentry_text_start) &&
291              (paddr <  (unsigned long)__irqentry_text_end)))
292                 return 0;
293
294         /* Check there is enough space for a relative jump. */
295         if (size - offset < JMP32_INSN_SIZE)
296                 return 0;
297
298         /* Decode instructions */
299         addr = paddr - offset;
300         while (addr < paddr - offset + size) { /* Decode until function end */
301                 unsigned long recovered_insn;
302                 if (search_exception_tables(addr))
303                         /*
304                          * Since some fixup code will jumps into this function,
305                          * we can't optimize kprobe in this function.
306                          */
307                         return 0;
308                 recovered_insn = recover_probed_instruction(buf, addr);
309                 if (!recovered_insn)
310                         return 0;
311                 kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
312                 insn_get_length(&insn);
313                 /* Another subsystem puts a breakpoint */
314                 if (insn.opcode.bytes[0] == INT3_INSN_OPCODE)
315                         return 0;
316                 /* Recover address */
317                 insn.kaddr = (void *)addr;
318                 insn.next_byte = (void *)(addr + insn.length);
319                 /* Check any instructions don't jump into target */
320                 if (insn_is_indirect_jump(&insn) ||
321                     insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE,
322                                          DISP32_SIZE))
323                         return 0;
324                 addr += insn.length;
325         }
326
327         return 1;
328 }
329
330 /* Check optimized_kprobe can actually be optimized. */
331 int arch_check_optimized_kprobe(struct optimized_kprobe *op)
332 {
333         int i;
334         struct kprobe *p;
335
336         for (i = 1; i < op->optinsn.size; i++) {
337                 p = get_kprobe(op->kp.addr + i);
338                 if (p && !kprobe_disabled(p))
339                         return -EEXIST;
340         }
341
342         return 0;
343 }
344
345 /* Check the addr is within the optimized instructions. */
346 int arch_within_optimized_kprobe(struct optimized_kprobe *op,
347                                  unsigned long addr)
348 {
349         return ((unsigned long)op->kp.addr <= addr &&
350                 (unsigned long)op->kp.addr + op->optinsn.size > addr);
351 }
352
353 /* Free optimized instruction slot */
354 static
355 void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
356 {
357         if (op->optinsn.insn) {
358                 free_optinsn_slot(op->optinsn.insn, dirty);
359                 op->optinsn.insn = NULL;
360                 op->optinsn.size = 0;
361         }
362 }
363
364 void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
365 {
366         __arch_remove_optimized_kprobe(op, 1);
367 }
368
369 /*
370  * Copy replacing target instructions
371  * Target instructions MUST be relocatable (checked inside)
372  * This is called when new aggr(opt)probe is allocated or reused.
373  */
374 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
375                                   struct kprobe *__unused)
376 {
377         u8 *buf = NULL, *slot;
378         int ret, len;
379         long rel;
380
381         if (!can_optimize((unsigned long)op->kp.addr))
382                 return -EILSEQ;
383
384         buf = kzalloc(MAX_OPTINSN_SIZE, GFP_KERNEL);
385         if (!buf)
386                 return -ENOMEM;
387
388         op->optinsn.insn = slot = get_optinsn_slot();
389         if (!slot) {
390                 ret = -ENOMEM;
391                 goto out;
392         }
393
394         /*
395          * Verify if the address gap is in 2GB range, because this uses
396          * a relative jump.
397          */
398         rel = (long)slot - (long)op->kp.addr + JMP32_INSN_SIZE;
399         if (abs(rel) > 0x7fffffff) {
400                 ret = -ERANGE;
401                 goto err;
402         }
403
404         /* Copy arch-dep-instance from template */
405         memcpy(buf, optprobe_template_entry, TMPL_END_IDX);
406
407         /* Copy instructions into the out-of-line buffer */
408         ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr,
409                                           slot + TMPL_END_IDX);
410         if (ret < 0)
411                 goto err;
412         op->optinsn.size = ret;
413         len = TMPL_END_IDX + op->optinsn.size;
414
415         synthesize_clac(buf + TMPL_CLAC_IDX);
416
417         /* Set probe information */
418         synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
419
420         /* Set probe function call */
421         synthesize_relcall(buf + TMPL_CALL_IDX,
422                            slot + TMPL_CALL_IDX, optimized_callback);
423
424         /* Set returning jmp instruction at the tail of out-of-line buffer */
425         synthesize_reljump(buf + len, slot + len,
426                            (u8 *)op->kp.addr + op->optinsn.size);
427         len += JMP32_INSN_SIZE;
428
429         /* We have to use text_poke() for instruction buffer because it is RO */
430         text_poke(slot, buf, len);
431         ret = 0;
432 out:
433         kfree(buf);
434         return ret;
435
436 err:
437         __arch_remove_optimized_kprobe(op, 0);
438         goto out;
439 }
440
441 /*
442  * Replace breakpoints (INT3) with relative jumps (JMP.d32).
443  * Caller must call with locking kprobe_mutex and text_mutex.
444  *
445  * The caller will have installed a regular kprobe and after that issued
446  * syncrhonize_rcu_tasks(), this ensures that the instruction(s) that live in
447  * the 4 bytes after the INT3 are unused and can now be overwritten.
448  */
449 void arch_optimize_kprobes(struct list_head *oplist)
450 {
451         struct optimized_kprobe *op, *tmp;
452         u8 insn_buff[JMP32_INSN_SIZE];
453
454         list_for_each_entry_safe(op, tmp, oplist, list) {
455                 s32 rel = (s32)((long)op->optinsn.insn -
456                         ((long)op->kp.addr + JMP32_INSN_SIZE));
457
458                 WARN_ON(kprobe_disabled(&op->kp));
459
460                 /* Backup instructions which will be replaced by jump address */
461                 memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_INSN_SIZE,
462                        DISP32_SIZE);
463
464                 insn_buff[0] = JMP32_INSN_OPCODE;
465                 *(s32 *)(&insn_buff[1]) = rel;
466
467                 text_poke_bp(op->kp.addr, insn_buff, JMP32_INSN_SIZE, NULL);
468
469                 list_del_init(&op->list);
470         }
471 }
472
473 /*
474  * Replace a relative jump (JMP.d32) with a breakpoint (INT3).
475  *
476  * After that, we can restore the 4 bytes after the INT3 to undo what
477  * arch_optimize_kprobes() scribbled. This is safe since those bytes will be
478  * unused once the INT3 lands.
479  */
480 void arch_unoptimize_kprobe(struct optimized_kprobe *op)
481 {
482         arch_arm_kprobe(&op->kp);
483         text_poke(op->kp.addr + INT3_INSN_SIZE,
484                   op->optinsn.copied_insn, DISP32_SIZE);
485         text_poke_sync();
486 }
487
488 /*
489  * Recover original instructions and breakpoints from relative jumps.
490  * Caller must call with locking kprobe_mutex.
491  */
492 extern void arch_unoptimize_kprobes(struct list_head *oplist,
493                                     struct list_head *done_list)
494 {
495         struct optimized_kprobe *op, *tmp;
496
497         list_for_each_entry_safe(op, tmp, oplist, list) {
498                 arch_unoptimize_kprobe(op);
499                 list_move(&op->list, done_list);
500         }
501 }
502
503 int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
504 {
505         struct optimized_kprobe *op;
506
507         if (p->flags & KPROBE_FLAG_OPTIMIZED) {
508                 /* This kprobe is really able to run optimized path. */
509                 op = container_of(p, struct optimized_kprobe, kp);
510                 /* Detour through copied instructions */
511                 regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
512                 if (!reenter)
513                         reset_current_kprobe();
514                 return 1;
515         }
516         return 0;
517 }
518 NOKPROBE_SYMBOL(setup_detour_execution);