2 * Dynamic Binary Instrumentation Module based on KProbes
3 * modules/kprobe/dbi_uprobes.h
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) Samsung Electronics, 2006-2010
21 * 2008-2009 Alexey Gerenkov <a.gerenkov@samsung.com> User-Space
22 * Probes initial implementation; Support x86/ARM/MIPS for both user and kernel spaces.
23 * 2010 Ekaterina Gorelkina <e.gorelkina@samsung.com>: redesign module for separating core and arch parts
28 #include "swap_uprobes.h"
29 #include "dbi_kdebug.h"
31 #include <asm/swap_uprobes.h>
33 #include <linux/hash.h>
34 #include <linux/mempolicy.h>
35 #include <linux/module.h>
36 #include <dbi_insn_slots.h>
39 UPROBE_HASH_BITS = 10,
40 UPROBE_TABLE_SIZE = (1 << UPROBE_HASH_BITS)
43 struct hlist_head uprobe_insn_slot_table[UPROBE_TABLE_SIZE];
44 struct hlist_head uprobe_table[UPROBE_TABLE_SIZE];
45 struct hlist_head uprobe_insn_pages;
47 DEFINE_SPINLOCK(uretprobe_lock); /* Protects uretprobe_inst_table */
48 static struct hlist_head uretprobe_inst_table[UPROBE_TABLE_SIZE];
50 #define DEBUG_PRINT_HASH_TABLE 0
52 #if DEBUG_PRINT_HASH_TABLE
53 void print_kprobe_hash_table(void)
56 struct hlist_head *head;
57 struct hlist_node *node;
61 for (i = 0; i < KPROBE_TABLE_SIZE; ++i) {
62 head = &kprobe_table[i];
63 hlist_for_each_entry_rcu (p, node, head, is_hlist_arm) {
64 printk("####### find K tgid=%u, addr=%x\n",
70 void print_kretprobe_hash_table(void)
73 struct hlist_head *head;
74 struct hlist_node *node;
78 for (i = 0; i < KPROBE_TABLE_SIZE; ++i) {
79 head = &kretprobe_inst_table[i];
80 hlist_for_each_entry_rcu (p, node, head, is_hlist_arm) {
81 printk("####### find KR tgid=%u, addr=%x\n",
87 void print_uprobe_hash_table(void)
90 struct hlist_head *head;
91 struct hlist_node *node;
95 for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
96 head = &uprobe_insn_slot_table[i];
97 hlist_for_each_entry_rcu (p, node, head, is_hlist_arm) {
98 printk("####### find U tgid=%u, addr=%x\n",
106 * Keep all fields in the uprobe consistent
108 static inline void copy_uprobe(struct kprobe *old_p, struct kprobe *p)
110 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
111 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
112 p->tgid = old_p->tgid;
113 p->ss_addr = old_p->ss_addr;
115 p->safe_arm = old_p->safe_arm;
116 p->safe_thumb = old_p->safe_thumb;
121 * Aggregate handlers for multiple uprobes support - these handlers
122 * take care of invoking the individual uprobe handlers on p->list
124 static int aggr_pre_uhandler(struct kprobe *p, struct pt_regs *regs)
129 list_for_each_entry_rcu(kp, &p->list, list) {
130 if (kp->pre_handler) {
131 ret = kp->pre_handler(kp, regs);
141 static void aggr_post_uhandler(struct kprobe *p, struct pt_regs *regs, unsigned long flags)
145 list_for_each_entry_rcu(kp, &p->list, list) {
146 if (kp->post_handler) {
147 kp->post_handler(kp, regs, flags);
152 static int aggr_fault_uhandler(struct kprobe *p, struct pt_regs *regs, int trapnr)
157 static int aggr_break_uhandler(struct kprobe *p, struct pt_regs *regs)
163 * Add the new probe to old_p->list. Fail if this is the
164 * second ujprobe at the address - two ujprobes can't coexist
166 static int add_new_uprobe(struct kprobe *old_p, struct kprobe *p)
168 if (p->break_handler) {
169 if (old_p->break_handler) {
173 list_add_tail_rcu(&p->list, &old_p->list);
174 old_p->break_handler = aggr_break_uhandler;
176 list_add_rcu (&p->list, &old_p->list);
179 if (p->post_handler && !old_p->post_handler) {
180 old_p->post_handler = aggr_post_uhandler;
187 * Fill in the required fields of the "manager uprobe". Replace the
188 * earlier uprobe in the hlist with the manager uprobe
190 static inline void add_aggr_uprobe(struct kprobe *ap, struct kprobe *p)
195 ap->pre_handler = aggr_pre_uhandler;
196 ap->fault_handler = aggr_fault_uhandler;
198 if (p->post_handler) {
199 ap->post_handler = aggr_post_uhandler;
202 if (p->break_handler) {
203 ap->break_handler = aggr_break_uhandler;
206 INIT_LIST_HEAD(&ap->list);
207 list_add_rcu(&p->list, &ap->list);
209 hlist_replace_rcu(&p->hlist, &ap->hlist);
213 * This is the second or subsequent uprobe at the address - handle
216 static int register_aggr_uprobe(struct kprobe *old_p, struct kprobe *p)
221 if (old_p->pre_handler == aggr_pre_uhandler) {
222 copy_uprobe(old_p, p);
223 ret = add_new_uprobe(old_p, p);
225 struct uprobe *uap = kzalloc(sizeof(*uap), GFP_KERNEL);
230 uap->task = kp2up(p)->task;
232 add_aggr_uprobe(ap, old_p);
234 ret = add_new_uprobe(ap, p);
240 static void arm_uprobe(struct uprobe *p)
242 kprobe_opcode_t insn = BREAKPOINT_INSTRUCTION;
243 int ret = write_proc_vm_atomic(p->task, (unsigned long)p->kp.addr,
244 &insn, sizeof(insn));
246 panic("arm_uprobe: failed to write memory "
247 "tgid=%u addr=%p!\n", p->task->tgid, p->kp.addr);
251 void disarm_uprobe(struct uprobe *p)
253 int ret = write_proc_vm_atomic(p->task, (unsigned long)p->kp.addr,
254 &p->kp.opcode, sizeof(p->kp.opcode));
256 panic("disarm_uprobe: failed to write memory "
257 "tgid=%u, addr=%p!\n", p->task->tgid, p->kp.addr);
260 EXPORT_SYMBOL_GPL(disarm_uprobe);
262 static void init_uprobes_insn_slots(void)
265 for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
266 INIT_HLIST_HEAD(&uprobe_insn_slot_table[i]);
270 static void init_uprobe_table(void)
273 for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
274 INIT_HLIST_HEAD(&uprobe_table[i]);
278 static void init_uretprobe_inst_table(void)
281 for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
282 INIT_HLIST_HEAD (&uretprobe_inst_table[i]);
286 struct kprobe *get_ukprobe(void *addr, pid_t tgid)
288 struct hlist_head *head;
289 struct hlist_node *node;
292 head = &uprobe_table[hash_ptr(addr, UPROBE_HASH_BITS)];
293 hlist_for_each_entry_rcu(p, node, head, hlist) {
294 if (p->addr == addr && kp2up(p)->task->tgid == tgid) {
302 static void add_uprobe_table(struct kprobe *p)
305 INIT_HLIST_NODE(&p->is_hlist_arm);
306 hlist_add_head_rcu(&p->is_hlist_arm, &uprobe_insn_slot_table[hash_ptr(p->ainsn.insn_arm, UPROBE_HASH_BITS)]);
307 INIT_HLIST_NODE(&p->is_hlist_thumb);
308 hlist_add_head_rcu(&p->is_hlist_thumb, &uprobe_insn_slot_table[hash_ptr(p->ainsn.insn_thumb, UPROBE_HASH_BITS)]);
309 #else /* CONFIG_ARM */
310 INIT_HLIST_NODE(&p->is_hlist);
311 hlist_add_head_rcu(&p->is_hlist, &uprobe_insn_slot_table[hash_ptr(p->ainsn.insn, UPROBE_HASH_BITS)]);
312 #endif /* CONFIG_ARM */
316 static struct kprobe *get_kprobe_by_insn_slot_arm(kprobe_opcode_t *addr, pid_t tgid)
318 struct hlist_head *head;
319 struct hlist_node *node;
320 struct kprobe *p, *ret = NULL;
322 /* TODO: test - two processes invokes instrumented function */
323 head = &uprobe_insn_slot_table[hash_ptr(addr, UPROBE_HASH_BITS)];
324 hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
325 if (p->ainsn.insn == addr && tgid == p->tgid) {
334 static struct kprobe *get_kprobe_by_insn_slot_thumb(kprobe_opcode_t *addr, pid_t tgid)
336 struct hlist_head *head;
337 struct hlist_node *node;
338 struct kprobe *p, *ret = NULL;
340 /* TODO: test - two processes invokes instrumented function */
341 head = &uprobe_insn_slot_table[hash_ptr(addr, UPROBE_HASH_BITS)];
342 hlist_for_each_entry_rcu(p, node, head, is_hlist_thumb) {
343 if (p->ainsn.insn == addr && tgid == p->tgid) {
352 struct kprobe *get_kprobe_by_insn_slot(kprobe_opcode_t *addr, pid_t tgid, struct pt_regs *regs)
354 struct kprobe *p = NULL;
356 if (!thumb_mode(regs)) {
357 p = get_kprobe_by_insn_slot_arm(addr - UPROBES_TRAMP_RET_BREAK_IDX, tgid);
359 p = get_kprobe_by_insn_slot_thumb((kprobe_opcode_t *)((unsigned long)addr - 0x1a), tgid);
364 #else /* CONFIG_ARM */
365 struct kprobe *get_kprobe_by_insn_slot(void *addr, int tgid, struct task_struct *ctask)
367 struct hlist_head *head;
368 struct hlist_node *node;
369 struct kprobe *p, *ret = NULL;
371 /* TODO: test - two processes invokes instrumented function */
372 head = &uprobe_insn_slot_table[hash_ptr(addr, UPROBE_HASH_BITS)];
373 hlist_for_each_entry_rcu(p, node, head, is_hlist) {
374 if (p->ainsn.insn == addr && tgid == p->tgid) {
382 #endif /* CONFIG_ARM */
385 static void remove_uprobe(struct uprobe *up)
387 struct kprobe *p = &up->kp;
388 struct task_struct *task = up->task;
391 panic("remove_uprobe for tgid == 0!!!");
395 free_insn_slot(&uprobe_insn_pages, task, p->ainsn.insn_arm);
396 free_insn_slot(&uprobe_insn_pages, task, p->ainsn.insn_thumb);
397 #else /* CONFIG_ARM */
398 free_insn_slot(&uprobe_insn_pages, task, p->ainsn.insn);
399 #endif /* CONFIG_ARM */
402 static struct hlist_head *uretprobe_inst_table_head(void *hash_key)
404 return &uretprobe_inst_table[hash_ptr (hash_key, UPROBE_HASH_BITS)];
407 /* Called with uretprobe_lock held */
408 static void add_urp_inst(struct uretprobe_instance *ri)
411 * Remove rp inst off the free list -
412 * Add it back when probed function returns
414 hlist_del(&ri->uflist);
416 /* Add rp inst onto table */
417 INIT_HLIST_NODE(&ri->hlist);
418 hlist_add_head(&ri->hlist, uretprobe_inst_table_head(ri->task->mm));
420 /* Also add this rp inst to the used list. */
421 INIT_HLIST_NODE(&ri->uflist);
422 hlist_add_head(&ri->uflist, &ri->rp->used_instances);
425 /* Called with uretprobe_lock held */
426 static void recycle_urp_inst(struct uretprobe_instance *ri)
429 hlist_del(&ri->hlist);
430 /* remove rp inst off the used list */
431 hlist_del(&ri->uflist);
432 /* put rp inst back onto the free list */
433 INIT_HLIST_NODE(&ri->uflist);
434 hlist_add_head(&ri->uflist, &ri->rp->free_instances);
438 /* Called with uretprobe_lock held */
439 static struct uretprobe_instance *get_used_urp_inst(struct uretprobe *rp)
441 struct hlist_node *node;
442 struct uretprobe_instance *ri;
444 hlist_for_each_entry(ri, node, &rp->used_instances, uflist) {
451 /* Called with uretprobe_lock held */
452 struct uretprobe_instance *get_free_urp_inst_no_alloc(struct uretprobe *rp)
454 struct hlist_node *node;
455 struct uretprobe_instance *ri;
457 hlist_for_each_entry (ri, node, &rp->free_instances, uflist) {
464 /* Called with uretprobe_lock held */
465 static void free_urp_inst(struct uretprobe *rp)
467 struct uretprobe_instance *ri;
468 while ((ri = get_free_urp_inst_no_alloc(rp)) != NULL) {
469 hlist_del(&ri->uflist);
474 #define COMMON_URP_NR 10
476 static int alloc_nodes_uretprobe(struct uretprobe *rp)
479 struct uretprobe_instance *inst;
482 #if 1//def CONFIG_PREEMPT
483 rp->maxactive += max(COMMON_URP_NR, 2 * NR_CPUS);
485 rp->maxacpptive += NR_CPUS;
487 alloc_nodes = COMMON_URP_NR;
489 for (i = 0; i < alloc_nodes; ++i) {
490 inst = kmalloc(sizeof(*inst), GFP_ATOMIC);
495 INIT_HLIST_NODE(&inst->uflist);
496 hlist_add_head(&inst->uflist, &rp->free_instances);
502 /* Called with uretprobe_lock held */
503 static struct uretprobe_instance *get_free_urp_inst(struct uretprobe *rp)
505 struct hlist_node *node;
506 struct uretprobe_instance *ri;
508 hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
512 if (!alloc_nodes_uretprobe(rp)) {
513 hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
520 // ===================================================================
522 int dbi_register_uprobe(struct uprobe *up, int atomic)
525 struct kprobe *p, *old_p;
532 DBPRINTF("p->addr = 0x%p p = 0x%p\n", p->addr, p);
534 // thumb address = address-1;
535 #if defined(CONFIG_ARM)
536 // TODO: must be corrected in 'bundle'
537 if ((unsigned long) p->addr & 0x01) {
538 p->addr = (kprobe_opcode_t *)((unsigned long)p->addr & 0xfffffffe);
542 p->mod_refcounted = 0;
544 INIT_LIST_HEAD(&p->list);
545 #ifdef KPROBES_PROFILE
546 p->start_tm.tv_sec = p->start_tm.tv_usec = 0;
547 p->hnd_tm_sum.tv_sec = p->hnd_tm_sum.tv_usec = 0;
551 // get the first item
552 old_p = get_ukprobe(p->addr, p->tgid);
555 p->safe_arm = old_p->safe_arm;
556 p->safe_thumb = old_p->safe_thumb;
558 ret = register_aggr_uprobe(old_p, p);
560 // atomic_inc(&kprobe_count);
563 DBPRINTF("goto out\n", ret);
567 ret = arch_prepare_uprobe(up, atomic);
569 DBPRINTF("goto out\n", ret);
573 DBPRINTF ("before out ret = 0x%x\n", ret);
575 // TODO: add uprobe (must be in function)
576 INIT_HLIST_NODE(&p->hlist);
577 hlist_add_head_rcu(&p->hlist, &uprobe_table[hash_ptr(p->addr, UPROBE_HASH_BITS)]);
582 DBPRINTF("out ret = 0x%x\n", ret);
586 void dbi_unregister_uprobe(struct uprobe *up, int atomic)
588 struct kprobe *p, *old_p, *list_p;
592 old_p = get_ukprobe(p->addr, p->tgid);
593 if (unlikely(!old_p)) {
598 list_for_each_entry_rcu(list_p, &old_p->list, list) {
600 /* uprobe p is a valid probe */
609 if ((old_p == p) || ((old_p->pre_handler == aggr_pre_uhandler) &&
610 (p->list.next == &old_p->list) && (p->list.prev == &old_p->list))) {
611 /* Only probe on the hash list */
613 hlist_del_rcu(&old_p->hlist);
616 list_del_rcu(&p->list);
622 list_del_rcu(&p->list);
632 if (p->break_handler) {
633 old_p->break_handler = NULL;
636 if (p->post_handler) {
637 list_for_each_entry_rcu (list_p, &old_p->list, list) {
638 if (list_p->post_handler) {
644 if (cleanup_p == 0) {
645 old_p->post_handler = NULL;
651 int dbi_register_ujprobe(struct ujprobe *jp, int atomic)
655 /* Todo: Verify probepoint is a function entry point */
656 jp->up.kp.pre_handler = setjmp_upre_handler;
657 jp->up.kp.break_handler = longjmp_break_uhandler;
659 ret = dbi_register_uprobe(&jp->up, atomic);
664 void dbi_unregister_ujprobe(struct ujprobe *jp, int atomic)
666 dbi_unregister_uprobe(&jp->up, atomic);
668 * Here is an attempt to unregister even those probes that have not been
669 * installed (hence not added to the hlist).
670 * So if we try to delete them from the hlist we will get NULL pointer
671 * dereference error. That is why we check whether this node
672 * really belongs to the hlist.
675 if (!(hlist_unhashed(&jp->up.kp.is_hlist_arm))) {
676 hlist_del_rcu(&jp->up.kp.is_hlist_arm);
678 if (!(hlist_unhashed(&jp->up.kp.is_hlist_thumb))) {
679 hlist_del_rcu(&jp->up.kp.is_hlist_thumb);
681 #else /* CONFIG_ARM */
682 if (!(hlist_unhashed(&jp->up.kp.is_hlist))) {
683 hlist_del_rcu(&jp->up.kp.is_hlist);
685 #endif /* CONFIG_ARM */
688 int trampoline_uprobe_handler(struct kprobe *p, struct pt_regs *regs)
690 struct uretprobe_instance *ri = NULL;
691 struct hlist_head *head;
692 struct hlist_node *node, *tmp;
693 unsigned long flags, tramp_addr, orig_ret_addr = 0;
695 tramp_addr = arch_get_trampoline_addr(p, regs);
696 spin_lock_irqsave(&uretprobe_lock, flags);
698 head = uretprobe_inst_table_head(current->mm);
701 * It is possible to have multiple instances associated with a given
702 * task either because an multiple functions in the call path
703 * have a return probe installed on them, and/or more then one
704 * return probe was registered for a target function.
706 * We can handle this because:
707 * - instances are always inserted at the head of the list
708 * - when multiple return probes are registered for the same
709 * function, the first instance's ret_addr will point to the
710 * real return address, and all the rest will point to
711 * uretprobe_trampoline
713 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
714 if (ri->task != current) {
715 /* another task is sharing our hash bucket */
719 if (ri->rp && ri->rp->handler) {
720 ri->rp->handler(ri, regs, ri->rp->priv_arg);
723 orig_ret_addr = (unsigned long)ri->ret_addr;
724 recycle_urp_inst(ri);
726 if (orig_ret_addr != tramp_addr) {
728 * This is the real return address. Any other
729 * instances associated with this task are for
730 * other calls deeper on the call stack
736 spin_unlock_irqrestore(&uretprobe_lock, flags);
737 arch_set_orig_ret_addr(orig_ret_addr, regs);
742 static int pre_handler_uretprobe(struct kprobe *p, struct pt_regs *regs)
744 struct uprobe *up = container_of(p, struct uprobe, kp);
745 struct uretprobe *rp = container_of(up, struct uretprobe, up);
746 struct uretprobe_instance *ri;
749 /* TODO: consider to only swap the RA after the last pre_handler fired */
750 spin_lock_irqsave(&uretprobe_lock, flags);
752 /* TODO: test - remove retprobe after func entry but before its exit */
753 if ((ri = get_free_urp_inst(rp)) != NULL) {
757 arch_prepare_uretprobe_hl(ri, regs);
764 spin_unlock_irqrestore(&uretprobe_lock, flags);
769 int dbi_register_uretprobe(struct uretprobe *rp, int atomic)
772 struct uretprobe_instance *inst;
774 DBPRINTF ("START\n");
776 rp->up.kp.pre_handler = pre_handler_uretprobe;
777 rp->up.kp.post_handler = NULL;
778 rp->up.kp.fault_handler = NULL;
779 rp->up.kp.break_handler = NULL;
781 /* Pre-allocate memory for max kretprobe instances */
782 if (rp->maxactive <= 0) {
783 #if 1//def CONFIG_PREEMPT
784 rp->maxactive = max(10, 2 * NR_CPUS);
786 rp->maxactive = NR_CPUS;
790 INIT_HLIST_HEAD(&rp->used_instances);
791 INIT_HLIST_HEAD(&rp->free_instances);
793 for (i = 0; i < rp->maxactive; i++) {
794 inst = kmalloc(sizeof(*inst), GFP_KERNEL);
801 INIT_HLIST_NODE(&inst->uflist);
802 hlist_add_head(&inst->uflist, &rp->free_instances);
807 /* Establish function entry probe point */
808 ret = dbi_register_uprobe(&rp->up, atomic);
818 int dbi_disarm_urp_inst(struct uretprobe_instance *ri, struct task_struct *rm_task)
820 struct task_struct *task = rm_task ? rm_task : ri->task;
821 kprobe_opcode_t *tramp;
822 kprobe_opcode_t *sp = (kprobe_opcode_t *)((long)ri->sp & ~1);
823 kprobe_opcode_t *stack = sp - RETPROBE_STACK_DEPTH + 1;
824 kprobe_opcode_t *found = NULL;
825 kprobe_opcode_t *buf[RETPROBE_STACK_DEPTH];
828 /* Understand function mode */
829 if ((long)ri->sp & 1) {
830 tramp = (kprobe_opcode_t *)
831 ((unsigned long)ri->rp->up.kp.ainsn.insn + 0x1b);
833 tramp = (kprobe_opcode_t *)
834 (ri->rp->up.kp.ainsn.insn + UPROBES_TRAMP_RET_BREAK_IDX);
837 retval = read_proc_vm_atomic(task, (unsigned long)stack, buf, sizeof(buf));
838 if (retval != sizeof(buf)) {
839 printk("---> %s (%d/%d): failed to read stack from %08lx",
840 task->comm, task->tgid, task->pid, (unsigned long)stack);
845 /* search the stack from the bottom */
846 for (i = RETPROBE_STACK_DEPTH - 1; i >= 0; i--) {
847 if (buf[i] == tramp) {
854 printk("---> %s (%d/%d): trampoline found at %08lx (%08lx /%+d) - %p\n",
855 task->comm, task->tgid, task->pid,
856 (unsigned long)found, (unsigned long)sp,
857 found - sp, ri->rp->up.kp.addr);
858 retval = write_proc_vm_atomic(task, (unsigned long)found, &ri->ret_addr,
859 sizeof(ri->ret_addr));
860 if (retval != sizeof(ri->ret_addr)) {
861 printk("---> %s (%d/%d): failed to write value to %08lx",
862 task->comm, task->tgid, task->pid, (unsigned long)found);
868 struct pt_regs *uregs = task_pt_regs(ri->task);
869 unsigned long ra = dbi_get_ret_addr(uregs);
870 if (ra == (unsigned long)tramp) {
871 printk("---> %s (%d/%d): trampoline found at lr = %08lx - %p\n",
872 task->comm, task->tgid, task->pid, ra, ri->rp->up.kp.addr);
873 dbi_set_ret_addr(uregs, (unsigned long)tramp);
876 printk("---> %s (%d/%d): trampoline NOT found at sp = %08lx, lr = %08lx - %p\n",
877 task->comm, task->tgid, task->pid,
878 (unsigned long)sp, ra, ri->rp->up.kp.addr);
887 /* Called with uretprobe_lock held */
888 int dbi_disarm_urp_inst_for_task(struct task_struct *parent, struct task_struct *task)
890 struct uretprobe_instance *ri;
891 struct hlist_node *node, *tmp;
892 struct hlist_head *head = uretprobe_inst_table_head(parent->mm);
894 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
895 if (parent == ri->task && ri->rp->up.kp.tgid) {
896 dbi_disarm_urp_inst(ri, task);
902 EXPORT_SYMBOL_GPL(dbi_disarm_urp_inst_for_task);
904 void dbi_unregister_uretprobe(struct uretprobe *rp, int atomic)
907 struct uretprobe_instance *ri;
909 spin_lock_irqsave (&uretprobe_lock, flags);
911 while ((ri = get_used_urp_inst(rp)) != NULL) {
912 if (dbi_disarm_urp_inst(ri, NULL) != 0)
913 /*panic*/printk("%s (%d/%d): cannot disarm urp instance (%08lx)\n",
914 ri->task->comm, ri->task->tgid, ri->task->pid,
915 (unsigned long)rp->up.kp.addr);
916 recycle_urp_inst(ri);
919 if (hlist_empty(&rp->used_instances)) {
920 struct kprobe *p = &rp->up.kp;
922 if (!(hlist_unhashed(&p->is_hlist_arm))) {
923 hlist_del_rcu(&p->is_hlist_arm);
926 if (!(hlist_unhashed(&p->is_hlist_thumb))) {
927 hlist_del_rcu(&p->is_hlist_thumb);
929 #else /* CONFIG_ARM */
930 if (!(hlist_unhashed(&p->is_hlist))) {
931 hlist_del_rcu(&p->is_hlist);
933 #endif /* CONFIG_ARM */
936 while ((ri = get_used_urp_inst(rp)) != NULL) {
938 hlist_del(&ri->uflist);
941 spin_unlock_irqrestore(&uretprobe_lock, flags);
944 dbi_unregister_uprobe(&rp->up, atomic);
947 void dbi_unregister_all_uprobes(struct task_struct *task, int atomic)
949 struct hlist_head *head;
950 struct hlist_node *node, *tnode;
954 for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
955 head = &uprobe_table[i];
956 hlist_for_each_entry_safe(p, node, tnode, head, hlist) {
957 if (p->tgid == task->tgid) {
958 struct uprobe *up = container_of(p, struct uprobe, kp);
959 printk("dbi_unregister_all_uprobes: delete uprobe at %p[%lx] for %s/%d\n",
960 p->addr, (unsigned long)p->opcode, task->comm, task->pid);
961 dbi_unregister_uprobe(up, atomic);
967 void dbi_uprobe_return(void)
969 dbi_arch_uprobe_return();
972 static int __init init_uprobes(void)
975 init_uprobes_insn_slots();
976 init_uretprobe_inst_table();
978 return swap_arch_init_uprobes();
981 static void __exit exit_uprobes(void)
983 swap_arch_exit_uprobes();
986 EXPORT_SYMBOL_GPL(dbi_uprobe_return);
987 EXPORT_SYMBOL_GPL(dbi_register_ujprobe);
988 EXPORT_SYMBOL_GPL(dbi_unregister_ujprobe);
989 EXPORT_SYMBOL_GPL(dbi_register_uretprobe);
990 EXPORT_SYMBOL_GPL(dbi_unregister_uretprobe);
991 EXPORT_SYMBOL_GPL(dbi_unregister_all_uprobes);
993 module_init(init_uprobes);
994 module_exit(exit_uprobes);
996 MODULE_LICENSE ("GPL");