Merge branch 'topic/kprobes' into next
authorMichael Ellerman <mpe@ellerman.id.au>
Mon, 24 Apr 2017 14:24:04 +0000 (00:24 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Mon, 24 Apr 2017 14:24:04 +0000 (00:24 +1000)
Although most of these kprobes patches are powerpc specific, there's a couple
that touch generic code (with Acks). At the moment there's one conflict with
acme's tree, but it's not too bad. Still just in case some other conflicts show
up, we've put these in a topic branch so another tree could merge some or all of
it if necessary.

1  2 
arch/powerpc/Kconfig
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/kprobes.c

Simple merge
Simple merge
@@@ -43,15 -42,78 +43,86 @@@ DEFINE_PER_CPU(struct kprobe_ctlblk, kp
  
  struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
  
 -int __kprobes arch_prepare_kprobe(struct kprobe *p)
 +bool arch_within_kprobe_blacklist(unsigned long addr)
 +{
 +      return  (addr >= (unsigned long)__kprobes_text_start &&
 +               addr < (unsigned long)__kprobes_text_end) ||
 +              (addr >= (unsigned long)_stext &&
 +               addr < (unsigned long)__head_end);
 +}
 +
+ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
+ {
+       kprobe_opcode_t *addr;
+ #ifdef PPC64_ELF_ABI_v2
+       /* PPC64 ABIv2 needs local entry point */
+       addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
+       if (addr && !offset) {
+ #ifdef CONFIG_KPROBES_ON_FTRACE
+               unsigned long faddr;
+               /*
+                * Per livepatch.h, ftrace location is always within the first
+                * 16 bytes of a function on powerpc with -mprofile-kernel.
+                */
+               faddr = ftrace_location_range((unsigned long)addr,
+                                             (unsigned long)addr + 16);
+               if (faddr)
+                       addr = (kprobe_opcode_t *)faddr;
+               else
+ #endif
+                       addr = (kprobe_opcode_t *)ppc_function_entry(addr);
+       }
+ #elif defined(PPC64_ELF_ABI_v1)
+       /*
+        * 64bit powerpc ABIv1 uses function descriptors:
+        * - Check for the dot variant of the symbol first.
+        * - If that fails, try looking up the symbol provided.
+        *
+        * This ensures we always get to the actual symbol and not
+        * the descriptor.
+        *
+        * Also handle <module:symbol> format.
+        */
+       char dot_name[MODULE_NAME_LEN + 1 + KSYM_NAME_LEN];
+       const char *modsym;
+       bool dot_appended = false;
+       if ((modsym = strchr(name, ':')) != NULL) {
+               modsym++;
+               if (*modsym != '\0' && *modsym != '.') {
+                       /* Convert to <module:.symbol> */
+                       strncpy(dot_name, name, modsym - name);
+                       dot_name[modsym - name] = '.';
+                       dot_name[modsym - name + 1] = '\0';
+                       strncat(dot_name, modsym,
+                               sizeof(dot_name) - (modsym - name) - 2);
+                       dot_appended = true;
+               } else {
+                       dot_name[0] = '\0';
+                       strncat(dot_name, name, sizeof(dot_name) - 1);
+               }
+       } else if (name[0] != '.') {
+               dot_name[0] = '.';
+               dot_name[1] = '\0';
+               strncat(dot_name, name, KSYM_NAME_LEN - 2);
+               dot_appended = true;
+       } else {
+               dot_name[0] = '\0';
+               strncat(dot_name, name, KSYM_NAME_LEN - 1);
+       }
+       addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name);
+       if (!addr && dot_appended) {
+               /* Let's try the original non-dot symbol lookup */
+               addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
+       }
+ #else
+       addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
+ #endif
+       return addr;
+ }
 +int arch_prepare_kprobe(struct kprobe *p)
  {
        int ret = 0;
        kprobe_opcode_t insn = *p->addr;
@@@ -144,16 -202,58 +215,59 @@@ static nokprobe_inline void set_current
        kcb->kprobe_saved_msr = regs->msr;
  }
  
 -void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
 -                                    struct pt_regs *regs)
+ bool arch_function_offset_within_entry(unsigned long offset)
+ {
+ #ifdef PPC64_ELF_ABI_v2
+ #ifdef CONFIG_KPROBES_ON_FTRACE
+       return offset <= 16;
+ #else
+       return offset <= 8;
+ #endif
+ #else
+       return !offset;
+ #endif
+ }
 +void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
  {
        ri->ret_addr = (kprobe_opcode_t *)regs->link;
  
        /* Replace the return addr with trampoline addr */
        regs->link = (unsigned long)kretprobe_trampoline;
  }
 +NOKPROBE_SYMBOL(arch_prepare_kretprobe);
  
 -int __kprobes try_to_emulate(struct kprobe *p, struct pt_regs *regs)
++int try_to_emulate(struct kprobe *p, struct pt_regs *regs)
+ {
+       int ret;
+       unsigned int insn = *p->ainsn.insn;
+       /* regs->nip is also adjusted if emulate_step returns 1 */
+       ret = emulate_step(regs, insn);
+       if (ret > 0) {
+               /*
+                * Once this instruction has been boosted
+                * successfully, set the boostable flag
+                */
+               if (unlikely(p->ainsn.boostable == 0))
+                       p->ainsn.boostable = 1;
+       } else if (ret < 0) {
+               /*
+                * We don't allow kprobes on mtmsr(d)/rfi(d), etc.
+                * So, we should never get here... but, its still
+                * good to catch them, just in case...
+                */
+               printk("Can't step on instruction %x\n", insn);
+               BUG();
+       } else if (ret == 0)
+               /* This instruction can't be boosted */
+               p->ainsn.boostable = -1;
+       return ret;
+ }
++NOKPROBE_SYMBOL(try_to_emulate);
 -int __kprobes kprobe_handler(struct pt_regs *regs)
 +int kprobe_handler(struct pt_regs *regs)
  {
        struct kprobe *p;
        int ret = 0;