drm/nouveau: fence: fix undefined fence state after emit
[platform/kernel/linux-rpi.git] / arch / x86 / kernel / kprobes / opt.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Kernel Probes Jump Optimization (Optprobes)
4  *
5  * Copyright (C) IBM Corporation, 2002, 2004
6  * Copyright (C) Hitachi Ltd., 2012
7  */
8 #include <linux/kprobes.h>
9 #include <linux/perf_event.h>
10 #include <linux/ptrace.h>
11 #include <linux/string.h>
12 #include <linux/slab.h>
13 #include <linux/hardirq.h>
14 #include <linux/preempt.h>
15 #include <linux/extable.h>
16 #include <linux/kdebug.h>
17 #include <linux/kallsyms.h>
18 #include <linux/kgdb.h>
19 #include <linux/ftrace.h>
20 #include <linux/objtool.h>
21 #include <linux/pgtable.h>
22 #include <linux/static_call.h>
23
24 #include <asm/text-patching.h>
25 #include <asm/cacheflush.h>
26 #include <asm/desc.h>
27 #include <linux/uaccess.h>
28 #include <asm/alternative.h>
29 #include <asm/insn.h>
30 #include <asm/debugreg.h>
31 #include <asm/set_memory.h>
32 #include <asm/sections.h>
33 #include <asm/nospec-branch.h>
34
35 #include "common.h"
36
37 unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
38 {
39         struct optimized_kprobe *op;
40         struct kprobe *kp;
41         long offs;
42         int i;
43
44         for (i = 0; i < JMP32_INSN_SIZE; i++) {
45                 kp = get_kprobe((void *)addr - i);
46                 /* This function only handles jump-optimized kprobe */
47                 if (kp && kprobe_optimized(kp)) {
48                         op = container_of(kp, struct optimized_kprobe, kp);
49                         /* If op is optimized or under unoptimizing */
50                         if (list_empty(&op->list) || optprobe_queued_unopt(op))
51                                 goto found;
52                 }
53         }
54
55         return addr;
56 found:
57         /*
58          * If the kprobe can be optimized, original bytes which can be
59          * overwritten by jump destination address. In this case, original
60          * bytes must be recovered from op->optinsn.copied_insn buffer.
61          */
62         if (copy_from_kernel_nofault(buf, (void *)addr,
63                 MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
64                 return 0UL;
65
66         if (addr == (unsigned long)kp->addr) {
67                 buf[0] = kp->opcode;
68                 memcpy(buf + 1, op->optinsn.copied_insn, DISP32_SIZE);
69         } else {
70                 offs = addr - (unsigned long)kp->addr - 1;
71                 memcpy(buf, op->optinsn.copied_insn + offs, DISP32_SIZE - offs);
72         }
73
74         return (unsigned long)buf;
75 }
76
77 static void synthesize_clac(kprobe_opcode_t *addr)
78 {
79         /*
80          * Can't be static_cpu_has() due to how objtool treats this feature bit.
81          * This isn't a fast path anyway.
82          */
83         if (!boot_cpu_has(X86_FEATURE_SMAP))
84                 return;
85
86         /* Replace the NOP3 with CLAC */
87         addr[0] = 0x0f;
88         addr[1] = 0x01;
89         addr[2] = 0xca;
90 }
91
92 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
93 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
94 {
95 #ifdef CONFIG_X86_64
96         *addr++ = 0x48;
97         *addr++ = 0xbf;
98 #else
99         *addr++ = 0xb8;
100 #endif
101         *(unsigned long *)addr = val;
102 }
103
104 asm (
105                         ".pushsection .rodata\n"
106                         "optprobe_template_func:\n"
107                         ".global optprobe_template_entry\n"
108                         "optprobe_template_entry:\n"
109 #ifdef CONFIG_X86_64
110                         "       pushq $" __stringify(__KERNEL_DS) "\n"
111                         /* Save the 'sp - 8', this will be fixed later. */
112                         "       pushq %rsp\n"
113                         "       pushfq\n"
114                         ".global optprobe_template_clac\n"
115                         "optprobe_template_clac:\n"
116                         ASM_NOP3
117                         SAVE_REGS_STRING
118                         "       movq %rsp, %rsi\n"
119                         ".global optprobe_template_val\n"
120                         "optprobe_template_val:\n"
121                         ASM_NOP5
122                         ASM_NOP5
123                         ".global optprobe_template_call\n"
124                         "optprobe_template_call:\n"
125                         ASM_NOP5
126                         /* Copy 'regs->flags' into 'regs->ss'. */
127                         "       movq 18*8(%rsp), %rdx\n"
128                         "       movq %rdx, 20*8(%rsp)\n"
129                         RESTORE_REGS_STRING
130                         /* Skip 'regs->flags' and 'regs->sp'. */
131                         "       addq $16, %rsp\n"
132                         /* And pop flags register from 'regs->ss'. */
133                         "       popfq\n"
134 #else /* CONFIG_X86_32 */
135                         "       pushl %ss\n"
136                         /* Save the 'sp - 4', this will be fixed later. */
137                         "       pushl %esp\n"
138                         "       pushfl\n"
139                         ".global optprobe_template_clac\n"
140                         "optprobe_template_clac:\n"
141                         ASM_NOP3
142                         SAVE_REGS_STRING
143                         "       movl %esp, %edx\n"
144                         ".global optprobe_template_val\n"
145                         "optprobe_template_val:\n"
146                         ASM_NOP5
147                         ".global optprobe_template_call\n"
148                         "optprobe_template_call:\n"
149                         ASM_NOP5
150                         /* Copy 'regs->flags' into 'regs->ss'. */
151                         "       movl 14*4(%esp), %edx\n"
152                         "       movl %edx, 16*4(%esp)\n"
153                         RESTORE_REGS_STRING
154                         /* Skip 'regs->flags' and 'regs->sp'. */
155                         "       addl $8, %esp\n"
156                         /* And pop flags register from 'regs->ss'. */
157                         "       popfl\n"
158 #endif
159                         ".global optprobe_template_end\n"
160                         "optprobe_template_end:\n"
161                         ".popsection\n");
162
163 void optprobe_template_func(void);
164 STACK_FRAME_NON_STANDARD(optprobe_template_func);
165
166 #define TMPL_CLAC_IDX \
167         ((long)optprobe_template_clac - (long)optprobe_template_entry)
168 #define TMPL_MOVE_IDX \
169         ((long)optprobe_template_val - (long)optprobe_template_entry)
170 #define TMPL_CALL_IDX \
171         ((long)optprobe_template_call - (long)optprobe_template_entry)
172 #define TMPL_END_IDX \
173         ((long)optprobe_template_end - (long)optprobe_template_entry)
174
175 /* Optimized kprobe call back function: called from optinsn */
176 static void
177 optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
178 {
179         /* This is possible if op is under delayed unoptimizing */
180         if (kprobe_disabled(&op->kp))
181                 return;
182
183         preempt_disable();
184         if (kprobe_running()) {
185                 kprobes_inc_nmissed_count(&op->kp);
186         } else {
187                 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
188                 /* Adjust stack pointer */
189                 regs->sp += sizeof(long);
190                 /* Save skipped registers */
191                 regs->cs = __KERNEL_CS;
192 #ifdef CONFIG_X86_32
193                 regs->gs = 0;
194 #endif
195                 regs->ip = (unsigned long)op->kp.addr + INT3_INSN_SIZE;
196                 regs->orig_ax = ~0UL;
197
198                 __this_cpu_write(current_kprobe, &op->kp);
199                 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
200                 opt_pre_handler(&op->kp, regs);
201                 __this_cpu_write(current_kprobe, NULL);
202         }
203         preempt_enable();
204 }
205 NOKPROBE_SYMBOL(optimized_callback);
206
207 static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
208 {
209         struct insn insn;
210         int len = 0, ret;
211
212         while (len < JMP32_INSN_SIZE) {
213                 ret = __copy_instruction(dest + len, src + len, real + len, &insn);
214                 if (!ret || !can_boost(&insn, src + len))
215                         return -EINVAL;
216                 len += ret;
217         }
218         /* Check whether the address range is reserved */
219         if (ftrace_text_reserved(src, src + len - 1) ||
220             alternatives_text_reserved(src, src + len - 1) ||
221             jump_label_text_reserved(src, src + len - 1) ||
222             static_call_text_reserved(src, src + len - 1))
223                 return -EBUSY;
224
225         return len;
226 }
227
228 /* Check whether insn is indirect jump */
229 static int __insn_is_indirect_jump(struct insn *insn)
230 {
231         return ((insn->opcode.bytes[0] == 0xff &&
232                 (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
233                 insn->opcode.bytes[0] == 0xea); /* Segment based jump */
234 }
235
236 /* Check whether insn jumps into specified address range */
237 static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
238 {
239         unsigned long target = 0;
240
241         switch (insn->opcode.bytes[0]) {
242         case 0xe0:      /* loopne */
243         case 0xe1:      /* loope */
244         case 0xe2:      /* loop */
245         case 0xe3:      /* jcxz */
246         case 0xe9:      /* near relative jump */
247         case 0xeb:      /* short relative jump */
248                 break;
249         case 0x0f:
250                 if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
251                         break;
252                 return 0;
253         default:
254                 if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
255                         break;
256                 return 0;
257         }
258         target = (unsigned long)insn->next_byte + insn->immediate.value;
259
260         return (start <= target && target <= start + len);
261 }
262
263 static int insn_is_indirect_jump(struct insn *insn)
264 {
265         int ret = __insn_is_indirect_jump(insn);
266
267 #ifdef CONFIG_RETPOLINE
268         /*
269          * Jump to x86_indirect_thunk_* is treated as an indirect jump.
270          * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
271          * older gcc may use indirect jump. So we add this check instead of
272          * replace indirect-jump check.
273          */
274         if (!ret)
275                 ret = insn_jump_into_range(insn,
276                                 (unsigned long)__indirect_thunk_start,
277                                 (unsigned long)__indirect_thunk_end -
278                                 (unsigned long)__indirect_thunk_start);
279 #endif
280         return ret;
281 }
282
283 /* Decode whole function to ensure any instructions don't jump into target */
284 static int can_optimize(unsigned long paddr)
285 {
286         unsigned long addr, size = 0, offset = 0;
287         struct insn insn;
288         kprobe_opcode_t buf[MAX_INSN_SIZE];
289
290         /* Lookup symbol including addr */
291         if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
292                 return 0;
293
294         /*
295          * Do not optimize in the entry code due to the unstable
296          * stack handling and registers setup.
297          */
298         if (((paddr >= (unsigned long)__entry_text_start) &&
299              (paddr <  (unsigned long)__entry_text_end)))
300                 return 0;
301
302         /* Check there is enough space for a relative jump. */
303         if (size - offset < JMP32_INSN_SIZE)
304                 return 0;
305
306         /* Decode instructions */
307         addr = paddr - offset;
308         while (addr < paddr - offset + size) { /* Decode until function end */
309                 unsigned long recovered_insn;
310                 int ret;
311
312                 if (search_exception_tables(addr))
313                         /*
314                          * Since some fixup code will jumps into this function,
315                          * we can't optimize kprobe in this function.
316                          */
317                         return 0;
318                 recovered_insn = recover_probed_instruction(buf, addr);
319                 if (!recovered_insn)
320                         return 0;
321
322                 ret = insn_decode_kernel(&insn, (void *)recovered_insn);
323                 if (ret < 0)
324                         return 0;
325 #ifdef CONFIG_KGDB
326                 /*
327                  * If there is a dynamically installed kgdb sw breakpoint,
328                  * this function should not be probed.
329                  */
330                 if (insn.opcode.bytes[0] == INT3_INSN_OPCODE &&
331                     kgdb_has_hit_break(addr))
332                         return 0;
333 #endif
334                 /* Recover address */
335                 insn.kaddr = (void *)addr;
336                 insn.next_byte = (void *)(addr + insn.length);
337                 /* Check any instructions don't jump into target */
338                 if (insn_is_indirect_jump(&insn) ||
339                     insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE,
340                                          DISP32_SIZE))
341                         return 0;
342                 addr += insn.length;
343         }
344
345         return 1;
346 }
347
348 /* Check optimized_kprobe can actually be optimized. */
349 int arch_check_optimized_kprobe(struct optimized_kprobe *op)
350 {
351         int i;
352         struct kprobe *p;
353
354         for (i = 1; i < op->optinsn.size; i++) {
355                 p = get_kprobe(op->kp.addr + i);
356                 if (p && !kprobe_disarmed(p))
357                         return -EEXIST;
358         }
359
360         return 0;
361 }
362
363 /* Check the addr is within the optimized instructions. */
364 int arch_within_optimized_kprobe(struct optimized_kprobe *op,
365                                  kprobe_opcode_t *addr)
366 {
367         return (op->kp.addr <= addr &&
368                 op->kp.addr + op->optinsn.size > addr);
369 }
370
371 /* Free optimized instruction slot */
372 static
373 void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
374 {
375         u8 *slot = op->optinsn.insn;
376         if (slot) {
377                 int len = TMPL_END_IDX + op->optinsn.size + JMP32_INSN_SIZE;
378
379                 /* Record the perf event before freeing the slot */
380                 if (dirty)
381                         perf_event_text_poke(slot, slot, len, NULL, 0);
382
383                 free_optinsn_slot(slot, dirty);
384                 op->optinsn.insn = NULL;
385                 op->optinsn.size = 0;
386         }
387 }
388
389 void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
390 {
391         __arch_remove_optimized_kprobe(op, 1);
392 }
393
394 /*
395  * Copy replacing target instructions
396  * Target instructions MUST be relocatable (checked inside)
397  * This is called when new aggr(opt)probe is allocated or reused.
398  */
399 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
400                                   struct kprobe *__unused)
401 {
402         u8 *buf = NULL, *slot;
403         int ret, len;
404         long rel;
405
406         if (!can_optimize((unsigned long)op->kp.addr))
407                 return -EILSEQ;
408
409         buf = kzalloc(MAX_OPTINSN_SIZE, GFP_KERNEL);
410         if (!buf)
411                 return -ENOMEM;
412
413         op->optinsn.insn = slot = get_optinsn_slot();
414         if (!slot) {
415                 ret = -ENOMEM;
416                 goto out;
417         }
418
419         /*
420          * Verify if the address gap is in 2GB range, because this uses
421          * a relative jump.
422          */
423         rel = (long)slot - (long)op->kp.addr + JMP32_INSN_SIZE;
424         if (abs(rel) > 0x7fffffff) {
425                 ret = -ERANGE;
426                 goto err;
427         }
428
429         /* Copy arch-dep-instance from template */
430         memcpy(buf, optprobe_template_entry, TMPL_END_IDX);
431
432         /* Copy instructions into the out-of-line buffer */
433         ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr,
434                                           slot + TMPL_END_IDX);
435         if (ret < 0)
436                 goto err;
437         op->optinsn.size = ret;
438         len = TMPL_END_IDX + op->optinsn.size;
439
440         synthesize_clac(buf + TMPL_CLAC_IDX);
441
442         /* Set probe information */
443         synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
444
445         /* Set probe function call */
446         synthesize_relcall(buf + TMPL_CALL_IDX,
447                            slot + TMPL_CALL_IDX, optimized_callback);
448
449         /* Set returning jmp instruction at the tail of out-of-line buffer */
450         synthesize_reljump(buf + len, slot + len,
451                            (u8 *)op->kp.addr + op->optinsn.size);
452         len += JMP32_INSN_SIZE;
453
454         /*
455          * Note len = TMPL_END_IDX + op->optinsn.size + JMP32_INSN_SIZE is also
456          * used in __arch_remove_optimized_kprobe().
457          */
458
459         /* We have to use text_poke() for instruction buffer because it is RO */
460         perf_event_text_poke(slot, NULL, 0, buf, len);
461         text_poke(slot, buf, len);
462
463         ret = 0;
464 out:
465         kfree(buf);
466         return ret;
467
468 err:
469         __arch_remove_optimized_kprobe(op, 0);
470         goto out;
471 }
472
473 /*
474  * Replace breakpoints (INT3) with relative jumps (JMP.d32).
475  * Caller must call with locking kprobe_mutex and text_mutex.
476  *
477  * The caller will have installed a regular kprobe and after that issued
478  * syncrhonize_rcu_tasks(), this ensures that the instruction(s) that live in
479  * the 4 bytes after the INT3 are unused and can now be overwritten.
480  */
481 void arch_optimize_kprobes(struct list_head *oplist)
482 {
483         struct optimized_kprobe *op, *tmp;
484         u8 insn_buff[JMP32_INSN_SIZE];
485
486         list_for_each_entry_safe(op, tmp, oplist, list) {
487                 s32 rel = (s32)((long)op->optinsn.insn -
488                         ((long)op->kp.addr + JMP32_INSN_SIZE));
489
490                 WARN_ON(kprobe_disabled(&op->kp));
491
492                 /* Backup instructions which will be replaced by jump address */
493                 memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_INSN_SIZE,
494                        DISP32_SIZE);
495
496                 insn_buff[0] = JMP32_INSN_OPCODE;
497                 *(s32 *)(&insn_buff[1]) = rel;
498
499                 text_poke_bp(op->kp.addr, insn_buff, JMP32_INSN_SIZE, NULL);
500
501                 list_del_init(&op->list);
502         }
503 }
504
505 /*
506  * Replace a relative jump (JMP.d32) with a breakpoint (INT3).
507  *
508  * After that, we can restore the 4 bytes after the INT3 to undo what
509  * arch_optimize_kprobes() scribbled. This is safe since those bytes will be
510  * unused once the INT3 lands.
511  */
512 void arch_unoptimize_kprobe(struct optimized_kprobe *op)
513 {
514         u8 new[JMP32_INSN_SIZE] = { INT3_INSN_OPCODE, };
515         u8 old[JMP32_INSN_SIZE];
516         u8 *addr = op->kp.addr;
517
518         memcpy(old, op->kp.addr, JMP32_INSN_SIZE);
519         memcpy(new + INT3_INSN_SIZE,
520                op->optinsn.copied_insn,
521                JMP32_INSN_SIZE - INT3_INSN_SIZE);
522
523         text_poke(addr, new, INT3_INSN_SIZE);
524         text_poke_sync();
525         text_poke(addr + INT3_INSN_SIZE,
526                   new + INT3_INSN_SIZE,
527                   JMP32_INSN_SIZE - INT3_INSN_SIZE);
528         text_poke_sync();
529
530         perf_event_text_poke(op->kp.addr, old, JMP32_INSN_SIZE, new, JMP32_INSN_SIZE);
531 }
532
533 /*
534  * Recover original instructions and breakpoints from relative jumps.
535  * Caller must call with locking kprobe_mutex.
536  */
537 extern void arch_unoptimize_kprobes(struct list_head *oplist,
538                                     struct list_head *done_list)
539 {
540         struct optimized_kprobe *op, *tmp;
541
542         list_for_each_entry_safe(op, tmp, oplist, list) {
543                 arch_unoptimize_kprobe(op);
544                 list_move(&op->list, done_list);
545         }
546 }
547
548 int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
549 {
550         struct optimized_kprobe *op;
551
552         if (p->flags & KPROBE_FLAG_OPTIMIZED) {
553                 /* This kprobe is really able to run optimized path. */
554                 op = container_of(p, struct optimized_kprobe, kp);
555                 /* Detour through copied instructions */
556                 regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
557                 if (!reenter)
558                         reset_current_kprobe();
559                 return 1;
560         }
561         return 0;
562 }
563 NOKPROBE_SYMBOL(setup_detour_execution);