2 * Kernel Probes (KProbes)
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) IBM Corporation, 2002, 2004
23 * Dynamic Binary Instrumentation Module based on KProbes
24 * modules/kprobe/dbi_kprobes.h
26 * This program is free software; you can redistribute it and/or modify
27 * it under the terms of the GNU General Public License as published by
28 * the Free Software Foundation; either version 2 of the License, or
29 * (at your option) any later version.
31 * This program is distributed in the hope that it will be useful,
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
34 * GNU General Public License for more details.
36 * You should have received a copy of the GNU General Public License
37 * along with this program; if not, write to the Free Software
38 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
40 * Copyright (C) Samsung Electronics, 2006-2010
42 * 2006-2007 Ekaterina Gorelkina <e.gorelkina@samsung.com>: initial implementation for ARM and MIPS
43 * 2008-2009 Alexey Gerenkov <a.gerenkov@samsung.com> User-Space
44 * Probes initial implementation; Support x86/ARM/MIPS for both user and kernel spaces.
45 * 2010 Ekaterina Gorelkina <e.gorelkina@samsung.com>: redesign module for separating core and arch parts
49 #include "dbi_kprobes.h"
50 #include "arch/asm/dbi_kprobes.h"
52 #include "dbi_kdebug.h"
53 #include "dbi_kprobes_deps.h"
54 #include "dbi_insn_slots.h"
57 #include <linux/version.h>
58 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
59 #include <linux/config.h>
62 #include <linux/hash.h>
63 #include <linux/module.h>
65 #include <linux/pagemap.h>
67 unsigned long sched_addr;
68 static unsigned long exit_addr;
69 static unsigned long do_group_exit_addr;
70 static unsigned long sys_exit_group_addr;
71 static unsigned long sys_exit_addr;
73 struct slot_manager sm;
75 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
76 static DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
78 DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
79 EXPORT_SYMBOL_GPL(kretprobe_lock);
80 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
82 struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
83 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
85 atomic_t kprobe_count;
86 EXPORT_SYMBOL_GPL(kprobe_count);
89 static void *(*module_alloc)(unsigned long size) = NULL;
90 static void *(*module_free)(struct module *mod, void *module_region) = NULL;
92 static void *__wrapper_module_alloc(unsigned long size)
94 return module_alloc(size);
97 static void *__wrapper_module_free(void *module_region)
99 return module_free(NULL, module_region);
102 static void *sm_alloc(struct slot_manager *sm)
104 return __wrapper_module_alloc(PAGE_SIZE);
107 static void sm_free(struct slot_manager *sm, void *ptr)
109 __wrapper_module_free(ptr);
112 static void init_sm(void)
114 sm.slot_size = KPROBES_TRAMP_LEN;
117 INIT_HLIST_HEAD(&sm.page_list);
120 static void exit_sm(void)
125 void kretprobe_assert(struct kretprobe_instance *ri, unsigned long orig_ret_address, unsigned long trampoline_address)
127 if (!orig_ret_address || (orig_ret_address == trampoline_address)) {
128 struct task_struct *task;
130 panic("kretprobe BUG!: ri = NULL\n");
136 panic("kretprobe BUG!: task = NULL\n");
139 if (ri->rp == NULL) {
140 panic("kretprobe BUG!: ri->rp = NULL\n");
143 panic("kretprobe BUG!: Processing kretprobe %p @ %p (%d/%d - %s)\n",
144 ri->rp, ri->rp->kp.addr, ri->task->tgid, ri->task->pid, ri->task->comm);
148 /* We have preemption disabled.. so it is safe to use __ versions */
149 static inline void set_kprobe_instance(struct kprobe *kp)
151 __get_cpu_var(kprobe_instance) = kp;
154 static inline void reset_kprobe_instance(void)
156 __get_cpu_var(kprobe_instance) = NULL;
159 /* kprobe_running() will just return the current_kprobe on this CPU */
160 struct kprobe *kprobe_running(void)
162 return __get_cpu_var(current_kprobe);
165 void reset_current_kprobe(void)
167 __get_cpu_var(current_kprobe) = NULL;
170 struct kprobe_ctlblk *get_kprobe_ctlblk(void)
172 return &__get_cpu_var(kprobe_ctlblk);
176 * This routine is called either:
177 * - under the kprobe_mutex - during kprobe_[un]register()
179 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
181 struct kprobe *get_kprobe(void *addr)
183 struct hlist_head *head;
184 struct hlist_node *node;
187 head = &kprobe_table[hash_ptr (addr, KPROBE_HASH_BITS)];
188 swap_hlist_for_each_entry_rcu(p, node, head, hlist) {
189 if (p->addr == addr) {
198 * Aggregate handlers for multiple kprobes support - these handlers
199 * take care of invoking the individual kprobe handlers on p->list
201 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
206 list_for_each_entry_rcu(kp, &p->list, list) {
207 if (kp->pre_handler) {
208 set_kprobe_instance(kp);
209 ret = kp->pre_handler(kp, regs);
213 reset_kprobe_instance();
219 static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, unsigned long flags)
223 list_for_each_entry_rcu(kp, &p->list, list) {
224 if (kp->post_handler) {
225 set_kprobe_instance(kp);
226 kp->post_handler(kp, regs, flags);
227 reset_kprobe_instance();
232 static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr)
234 struct kprobe *cur = __get_cpu_var(kprobe_instance);
237 * if we faulted "during" the execution of a user specified
238 * probe handler, invoke just that probe's fault handler
240 if (cur && cur->fault_handler) {
241 if (cur->fault_handler(cur, regs, trapnr))
248 static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
250 struct kprobe *cur = __get_cpu_var(kprobe_instance);
252 DBPRINTF ("cur = 0x%p\n", cur);
254 DBPRINTF ("cur = 0x%p cur->break_handler = 0x%p\n", cur, cur->break_handler);
256 if (cur && cur->break_handler) {
257 if (cur->break_handler(cur, regs))
260 reset_kprobe_instance();
265 /* Walks the list and increments nmissed count for multiprobe case */
266 void kprobes_inc_nmissed_count(struct kprobe *p)
269 if (p->pre_handler != aggr_pre_handler) {
272 list_for_each_entry_rcu(kp, &p->list, list) {
278 /* Called with kretprobe_lock held */
279 struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp)
281 struct hlist_node *node;
282 struct kretprobe_instance *ri;
284 swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
288 if (!alloc_nodes_kretprobe(rp)) {
289 swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
296 EXPORT_SYMBOL_GPL(get_free_rp_inst);
298 /* Called with kretprobe_lock held */
299 struct kretprobe_instance *get_free_rp_inst_no_alloc(struct kretprobe *rp)
301 struct hlist_node *node;
302 struct kretprobe_instance *ri;
304 swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
311 /* Called with kretprobe_lock held */
312 struct kretprobe_instance *get_used_rp_inst(struct kretprobe *rp)
314 struct hlist_node *node;
315 struct kretprobe_instance *ri;
317 swap_hlist_for_each_entry(ri, node, &rp->used_instances, uflist) {
323 EXPORT_SYMBOL_GPL(get_used_rp_inst);
325 /* Called with kretprobe_lock held */
326 void add_rp_inst (struct kretprobe_instance *ri)
329 * Remove rp inst off the free list -
330 * Add it back when probed function returns
332 hlist_del(&ri->uflist);
334 /* Add rp inst onto table */
335 INIT_HLIST_NODE(&ri->hlist);
337 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash_ptr(ri->task, KPROBE_HASH_BITS)]);
339 /* Also add this rp inst to the used list. */
340 INIT_HLIST_NODE(&ri->uflist);
341 hlist_add_head(&ri->uflist, &ri->rp->used_instances);
343 EXPORT_SYMBOL_GPL(add_rp_inst);
345 /* Called with kretprobe_lock held */
346 void recycle_rp_inst(struct kretprobe_instance *ri)
349 hlist_del(&ri->hlist);
350 /* remove rp inst off the used list */
351 hlist_del(&ri->uflist);
352 /* put rp inst back onto the free list */
353 INIT_HLIST_NODE(&ri->uflist);
354 hlist_add_head(&ri->uflist, &ri->rp->free_instances);
357 EXPORT_SYMBOL_GPL(recycle_rp_inst);
359 struct hlist_head *kretprobe_inst_table_head(void *hash_key)
361 return &kretprobe_inst_table[hash_ptr(hash_key, KPROBE_HASH_BITS)];
363 EXPORT_SYMBOL_GPL(kretprobe_inst_table_head);
365 void free_rp_inst(struct kretprobe *rp)
367 struct kretprobe_instance *ri;
368 while ((ri = get_free_rp_inst_no_alloc(rp)) != NULL) {
369 hlist_del(&ri->uflist);
373 EXPORT_SYMBOL_GPL(free_rp_inst);
376 * Keep all fields in the kprobe consistent
378 static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
380 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
381 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
382 p->ss_addr = old_p->ss_addr;
384 p->safe_arm = old_p->safe_arm;
385 p->safe_thumb = old_p->safe_thumb;
390 * Add the new probe to old_p->list. Fail if this is the
391 * second jprobe at the address - two jprobes can't coexist
393 static int add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
395 if (p->break_handler) {
396 if (old_p->break_handler) {
400 list_add_tail_rcu(&p->list, &old_p->list);
401 old_p->break_handler = aggr_break_handler;
403 list_add_rcu(&p->list, &old_p->list);
406 if (p->post_handler && !old_p->post_handler) {
407 old_p->post_handler = aggr_post_handler;
414 * hlist_replace_rcu - replace old entry by new one
415 * @old : the element to be replaced
416 * @new : the new element to insert
418 * The @old entry will be replaced with the @new entry atomically.
420 inline void dbi_hlist_replace_rcu(struct hlist_node *old, struct hlist_node *new)
422 struct hlist_node *next = old->next;
425 new->pprev = old->pprev;
428 new->next->pprev = &new->next;
431 old->pprev = LIST_POISON2;
435 * Fill in the required fields of the "manager kprobe". Replace the
436 * earlier kprobe in the hlist with the manager kprobe
438 static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
442 ap->pre_handler = aggr_pre_handler;
443 ap->fault_handler = aggr_fault_handler;
445 ap->post_handler = aggr_post_handler;
446 if (p->break_handler)
447 ap->break_handler = aggr_break_handler;
449 INIT_LIST_HEAD(&ap->list);
450 list_add_rcu(&p->list, &ap->list);
452 dbi_hlist_replace_rcu(&p->hlist, &ap->hlist);
456 * This is the second or subsequent kprobe at the address - handle
459 int register_aggr_kprobe(struct kprobe *old_p, struct kprobe *p)
463 DBPRINTF ("start\n");
465 DBPRINTF ("p = %p old_p = %p \n", p, old_p);
466 if (old_p->pre_handler == aggr_pre_handler) {
467 DBPRINTF ("aggr_pre_handler \n");
469 copy_kprobe(old_p, p);
470 ret = add_new_kprobe(old_p, p);
472 DBPRINTF ("kzalloc\n");
474 ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
476 ap = kmalloc(sizeof(struct kprobe), GFP_KERNEL);
478 memset(ap, 0, sizeof(struct kprobe));
482 add_aggr_kprobe(ap, old_p);
484 DBPRINTF ("ap = %p p = %p old_p = %p \n", ap, p, old_p);
485 ret = add_new_kprobe(ap, p);
490 EXPORT_SYMBOL_GPL(register_aggr_kprobe);
492 static void remove_kprobe(struct kprobe *p)
494 /* TODO: check boostable for x86 and MIPS */
495 free_insn_slot(&sm, p->ainsn.insn);
498 int dbi_register_kprobe(struct kprobe *p)
500 struct kprobe *old_p;
503 * If we have a symbol_name argument look it up,
504 * and add it to the address. That way the addr
505 * field can either be global or relative to a symbol.
507 if (p->symbol_name) {
510 p->addr = (kprobe_opcode_t *)swap_ksyms(p->symbol_name);
515 DBPRINTF ("p->addr = 0x%p\n", p->addr);
516 p->addr = (kprobe_opcode_t *)(((char *)p->addr) + p->offset);
517 DBPRINTF ("p->addr = 0x%p p = 0x%p\n", p->addr, p);
519 #ifdef KPROBES_PROFILE
520 p->start_tm.tv_sec = p->start_tm.tv_usec = 0;
521 p->hnd_tm_sum.tv_sec = p->hnd_tm_sum.tv_usec = 0;
524 p->mod_refcounted = 0;
527 old_p = get_kprobe(p->addr);
529 ret = register_aggr_kprobe(old_p, p);
531 atomic_inc(&kprobe_count);
535 if ((ret = arch_prepare_kprobe(p, &sm)) != 0)
538 DBPRINTF ("before out ret = 0x%x\n", ret);
539 INIT_HLIST_NODE(&p->hlist);
540 hlist_add_head_rcu(&p->hlist, &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
544 DBPRINTF ("out ret = 0x%x\n", ret);
548 void dbi_unregister_kprobe(struct kprobe *p)
550 struct kprobe *old_p, *list_p;
553 old_p = get_kprobe(p->addr);
554 DBPRINTF ("dbi_unregister_kprobe p=%p old_p=%p", p, old_p);
555 if (unlikely (!old_p))
559 list_for_each_entry_rcu(list_p, &old_p->list, list)
561 /* kprobe p is a valid probe */
567 DBPRINTF ("dbi_unregister_kprobe valid_p");
568 if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) &&
569 (p->list.next == &old_p->list) && (p->list.prev == &old_p->list))) {
570 /* Only probe on the hash list */
571 arch_disarm_kprobe(p);
572 hlist_del_rcu(&old_p->hlist);
575 list_del_rcu(&p->list);
578 DBPRINTF ("dbi_unregister_kprobe cleanup_p=%d", cleanup_p);
582 list_del_rcu(&p->list);
592 if (p->break_handler)
593 old_p->break_handler = NULL;
594 if (p->post_handler) {
595 list_for_each_entry_rcu(list_p, &old_p->list, list) {
596 if (list_p->post_handler) {
603 old_p->post_handler = NULL;
608 int dbi_register_jprobe(struct jprobe *jp)
610 /* Todo: Verify probepoint is a function entry point */
611 jp->kp.pre_handler = setjmp_pre_handler;
612 jp->kp.break_handler = longjmp_break_handler;
614 return dbi_register_kprobe(&jp->kp);
617 void dbi_unregister_jprobe(struct jprobe *jp)
619 dbi_unregister_kprobe(&jp->kp);
623 * This kprobe pre_handler is registered with every kretprobe. When probe
624 * hits it will set up the return probe.
626 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
628 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
629 struct kretprobe_instance *ri;
630 unsigned long flags = 0;
632 /* TODO: consider to only swap the RA after the last pre_handler fired */
633 spin_lock_irqsave(&kretprobe_lock, flags);
635 /* TODO: test - remove retprobe after func entry but before its exit */
636 if ((ri = get_free_rp_inst(rp)) != NULL) {
640 if (rp->entry_handler) {
641 rp->entry_handler(ri, regs);
644 arch_prepare_kretprobe(ri, regs);
651 spin_unlock_irqrestore(&kretprobe_lock, flags);
656 int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
658 struct kretprobe_instance *ri = NULL;
659 struct hlist_head *head;
660 struct hlist_node *node, *tmp;
661 unsigned long flags, orig_ret_address = 0;
662 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
664 struct kprobe_ctlblk *kcb;
667 kcb = get_kprobe_ctlblk();
669 spin_lock_irqsave(&kretprobe_lock, flags);
672 * We are using different hash keys (current and mm) for finding kernel
673 * space and user space probes. Kernel space probes can change mm field in
674 * task_struct. User space probes can be shared between threads of one
675 * process so they have different current but same mm.
677 head = kretprobe_inst_table_head(current);
680 regs->XREG(cs) = __KERNEL_CS | get_kernel_rpl();
681 regs->EREG(ip) = trampoline_address;
682 regs->ORIG_EAX_REG = 0xffffffff;
686 * It is possible to have multiple instances associated with a given
687 * task either because an multiple functions in the call path
688 * have a return probe installed on them, and/or more then one
689 * return probe was registered for a target function.
691 * We can handle this because:
692 * - instances are always inserted at the head of the list
693 * - when multiple return probes are registered for the same
694 * function, the first instance's ret_addr will point to the
695 * real return address, and all the rest will point to
696 * kretprobe_trampoline
698 swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
699 if (ri->task != current)
700 /* another task is sharing our hash bucket */
702 if (ri->rp && ri->rp->handler) {
703 __get_cpu_var(current_kprobe) = &ri->rp->kp;
704 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
705 ri->rp->handler(ri, regs);
706 __get_cpu_var(current_kprobe) = NULL;
709 orig_ret_address = (unsigned long)ri->ret_addr;
711 if (orig_ret_address != trampoline_address)
713 * This is the real return address. Any other
714 * instances associated with this task are for
715 * other calls deeper on the call stack
719 kretprobe_assert(ri, orig_ret_address, trampoline_address);
721 if (kcb->kprobe_status == KPROBE_REENTER) {
722 restore_previous_kprobe(kcb);
724 reset_current_kprobe();
727 spin_unlock_irqrestore(&kretprobe_lock, flags);
728 preempt_enable_no_resched();
731 * By returning a non-zero value, we are telling
732 * kprobe_handler() that we don't want the post_handler
733 * to run (and have re-enabled preemption)
736 return (int)orig_ret_address;
739 #define SCHED_RP_NR 200
740 #define COMMON_RP_NR 10
742 int alloc_nodes_kretprobe(struct kretprobe *rp)
745 struct kretprobe_instance *inst;
748 DBPRINTF("Alloc aditional mem for retprobes");
750 if ((unsigned long)rp->kp.addr == sched_addr) {
751 rp->maxactive += SCHED_RP_NR;//max (100, 2 * NR_CPUS);
752 alloc_nodes = SCHED_RP_NR;
754 #if 1//def CONFIG_PREEMPT
755 rp->maxactive += max (COMMON_RP_NR, 2 * NR_CPUS);
757 rp->maxacpptive += NR_CPUS;
759 alloc_nodes = COMMON_RP_NR;
762 for (i = 0; i < alloc_nodes; i++) {
763 inst = kmalloc(sizeof(*inst) + rp->data_size, GFP_ATOMIC);
768 INIT_HLIST_NODE(&inst->uflist);
769 hlist_add_head(&inst->uflist, &rp->free_instances);
772 DBPRINTF ("addr=%p, *addr=[%lx %lx %lx]", rp->kp.addr, (unsigned long) (*(rp->kp.addr)), (unsigned long) (*(rp->kp.addr + 1)), (unsigned long) (*(rp->kp.addr + 2)));
776 int dbi_register_kretprobe(struct kretprobe *rp)
779 struct kretprobe_instance *inst;
783 rp->kp.pre_handler = pre_handler_kretprobe;
784 rp->kp.post_handler = NULL;
785 rp->kp.fault_handler = NULL;
786 rp->kp.break_handler = NULL;
788 /* Pre-allocate memory for max kretprobe instances */
789 if ((unsigned long)rp->kp.addr == exit_addr) {
790 rp->kp.pre_handler = NULL; //not needed for do_exit
792 } else if ((unsigned long)rp->kp.addr == do_group_exit_addr) {
793 rp->kp.pre_handler = NULL;
795 } else if ((unsigned long)rp->kp.addr == sys_exit_group_addr) {
796 rp->kp.pre_handler = NULL;
798 } else if ((unsigned long)rp->kp.addr == sys_exit_addr) {
799 rp->kp.pre_handler = NULL;
801 } else if (rp->maxactive <= 0) {
802 #if 1//def CONFIG_PREEMPT
803 rp->maxactive = max (COMMON_RP_NR, 2 * NR_CPUS);
805 rp->maxactive = NR_CPUS;
808 INIT_HLIST_HEAD(&rp->used_instances);
809 INIT_HLIST_HEAD(&rp->free_instances);
810 for (i = 0; i < rp->maxactive; i++) {
811 inst = kmalloc(sizeof(*inst) + rp->data_size, GFP_KERNEL);
816 INIT_HLIST_NODE(&inst->uflist);
817 hlist_add_head(&inst->uflist, &rp->free_instances);
820 DBPRINTF ("addr=%p, *addr=[%lx %lx %lx]", rp->kp.addr, (unsigned long) (*(rp->kp.addr)), (unsigned long) (*(rp->kp.addr + 1)), (unsigned long) (*(rp->kp.addr + 2)));
822 /* Establish function entry probe point */
823 if ((ret = dbi_register_kprobe(&rp->kp)) != 0)
826 DBPRINTF ("addr=%p, *addr=[%lx %lx %lx]", rp->kp.addr, (unsigned long) (*(rp->kp.addr)), (unsigned long) (*(rp->kp.addr + 1)), (unsigned long) (*(rp->kp.addr + 2)));
831 static int dbi_disarm_krp_inst(struct kretprobe_instance *ri);
833 static void dbi_unregister_kretprobe_top(struct kretprobe *rp)
836 struct kretprobe_instance *ri;
837 struct hlist_node *node;
839 dbi_unregister_kprobe(&rp->kp);
842 spin_lock_irqsave(&kretprobe_lock, flags);
844 swap_hlist_for_each_entry(ri, node, &rp->used_instances, uflist) {
845 if (!dbi_disarm_krp_inst(ri)) {
846 printk("%s (%d/%d): cannot disarm krp instance (%08lx)\n",
847 ri->task->comm, ri->task->tgid, ri->task->pid,
848 (unsigned long)rp->kp.addr);
851 spin_unlock_irqrestore(&kretprobe_lock, flags);
854 static void dbi_unregister_kretprobe_bottom(struct kretprobe *rp)
857 struct kretprobe_instance *ri;
859 spin_lock_irqsave(&kretprobe_lock, flags);
860 while ((ri = get_used_rp_inst(rp)) != NULL) {
863 spin_unlock_irqrestore(&kretprobe_lock, flags);
867 void dbi_unregister_kretprobe(struct kretprobe *rp)
869 dbi_unregister_kretprobe_top(rp);
870 dbi_unregister_kretprobe_bottom(rp);
873 struct kretprobe *clone_kretprobe(struct kretprobe *rp)
875 struct kprobe *old_p;
876 struct kretprobe *clone = NULL;
879 clone = kmalloc(sizeof(struct kretprobe), GFP_KERNEL);
881 DBPRINTF ("failed to alloc memory for clone probe %p!", rp->kp.addr);
884 memcpy(clone, rp, sizeof(struct kretprobe));
885 clone->kp.pre_handler = pre_handler_kretprobe;
886 clone->kp.post_handler = NULL;
887 clone->kp.fault_handler = NULL;
888 clone->kp.break_handler = NULL;
889 old_p = get_kprobe(rp->kp.addr);
891 ret = register_aggr_kprobe(old_p, &clone->kp);
896 atomic_inc(&kprobe_count);
901 EXPORT_SYMBOL_GPL(clone_kretprobe);
903 static void inline rm_task_trampoline(struct task_struct *p, struct kretprobe_instance *ri)
905 arch_set_task_pc(p, (unsigned long)ri->ret_addr);
908 static int dbi_disarm_krp_inst(struct kretprobe_instance *ri)
910 unsigned long *tramp = (unsigned long *)&kretprobe_trampoline;
911 unsigned long *sp = ri->sp;
912 unsigned long *found = NULL;
913 int retval = -ENOENT;
916 unsigned long pc = arch_get_task_pc(ri->task);
918 printk("---> [%d] %s (%d/%d): pc = %08lx, ra = %08lx, tramp= %08lx (%08lx)\n",
920 ri->task->comm, ri->task->tgid, ri->task->pid,
921 pc, (long unsigned int)ri->ret_addr,
922 (long unsigned int)tramp,
923 (long unsigned int)(ri->rp ? ri->rp->kp.addr: NULL));
925 /* __switch_to retprobe handling */
926 if (pc == (unsigned long)tramp) {
927 rm_task_trampoline(ri->task, ri);
934 while (sp > ri->sp - RETPROBE_STACK_DEPTH) {
935 if (*sp == (unsigned long)tramp) {
943 printk("---> [%d] %s (%d/%d): tramp (%08lx) found at %08lx (%08lx /%+d) - %p\n",
945 ri->task->comm, ri->task->tgid, ri->task->pid,
946 (long unsigned int)tramp,
947 (long unsigned int)found, (long unsigned int)ri->sp,
948 found - ri->sp, ri->rp ? ri->rp->kp.addr: NULL);
949 *found = (unsigned long)ri->ret_addr;
952 printk("---> [%d] %s (%d/%d): tramp (%08lx) NOT found at sp = %08lx - %p\n",
954 ri->task->comm, ri->task->tgid, ri->task->pid,
955 (long unsigned int)tramp,
956 (long unsigned int)ri->sp, ri->rp ? ri->rp->kp.addr: NULL);
962 static int init_module_deps(void)
966 sched_addr = swap_ksyms("__switch_to");
967 exit_addr = swap_ksyms("do_exit");
968 sys_exit_group_addr = swap_ksyms("sys_exit_group");
969 do_group_exit_addr = swap_ksyms("do_group_exit");
970 sys_exit_addr = swap_ksyms("sys_exit");
972 if (sched_addr == 0 ||
974 sys_exit_group_addr == 0 ||
975 do_group_exit_addr == 0 ||
976 sys_exit_addr == 0) {
980 ret = init_module_dependencies();
985 return arch_init_module_deps();
988 static int __init init_kprobes(void)
992 module_alloc = (void *)swap_ksyms("module_alloc");
994 printk("module_alloc is not found! Oops.\n");
997 module_free = (void *)swap_ksyms("module_free");
999 printk("module_free is not found! Oops.\n");
1005 /* FIXME allocate the probe table, currently defined statically */
1006 /* initialize all list heads */
1007 for (i = 0; i < KPROBE_TABLE_SIZE; ++i) {
1008 INIT_HLIST_HEAD(&kprobe_table[i]);
1009 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
1011 atomic_set(&kprobe_count, 0);
1013 err = init_module_deps();
1018 err = arch_init_kprobes();
1020 DBPRINTF ("init_kprobes: arch_init_kprobes - %d", err);
1025 static void __exit exit_kprobes(void)
1027 arch_exit_kprobes();
1031 module_init(init_kprobes);
1032 module_exit(exit_kprobes);
1034 EXPORT_SYMBOL_GPL(dbi_register_kprobe);
1035 EXPORT_SYMBOL_GPL(dbi_unregister_kprobe);
1036 EXPORT_SYMBOL_GPL(dbi_register_jprobe);
1037 EXPORT_SYMBOL_GPL(dbi_unregister_jprobe);
1038 EXPORT_SYMBOL_GPL(dbi_jprobe_return);
1039 EXPORT_SYMBOL_GPL(dbi_register_kretprobe);
1040 EXPORT_SYMBOL_GPL(dbi_unregister_kretprobe);
1042 MODULE_LICENSE("Dual BSD/GPL");