2 * uprobe/swap_uprobes.c
3 * @author Alexey Gerenkov <a.gerenkov@samsung.com> User-Space Probes initial
4 * implementation; Support x86/ARM/MIPS for both user and kernel spaces.
5 * @author Ekaterina Gorelkina <e.gorelkina@samsung.com>: redesign module for
6 * separating core and arch parts
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 * Copyright (C) Samsung Electronics, 2006-2010
28 * @section DESCRIPTION
30 * Uprobes implementation.
34 #include <linux/hash.h>
35 #include <linux/mempolicy.h>
36 #include <linux/module.h>
38 #include <master/swap_initializer.h>
39 #include <kprobe/swap_slots.h>
40 #include <kprobe/swap_kdebug.h>
41 #include <kprobe/swap_kprobes_deps.h>
43 #include <swap-asm/swap_uprobes.h>
45 #include "swap_uprobes.h"
49 UPROBE_HASH_BITS = 10,
50 UPROBE_TABLE_SIZE = (1 << UPROBE_HASH_BITS)
53 struct hlist_head uprobe_insn_slot_table[UPROBE_TABLE_SIZE];
54 struct hlist_head uprobe_table[UPROBE_TABLE_SIZE];
56 DEFINE_SPINLOCK(uretprobe_lock); /* Protects uretprobe_inst_table */
57 static struct hlist_head uretprobe_inst_table[UPROBE_TABLE_SIZE];
59 #define DEBUG_PRINT_HASH_TABLE 0
61 #if DEBUG_PRINT_HASH_TABLE
62 void print_uprobe_hash_table(void)
65 struct hlist_head *head;
67 DECLARE_NODE_PTR_FOR_HLIST(node);
69 /* print uprobe table */
70 for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
71 head = &uprobe_insn_slot_table[i];
72 swap_hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
73 printk(KERN_INFO "####### find U tgid=%u, addr=%x\n",
81 * Keep all fields in the uprobe consistent
83 static inline void copy_uprobe(struct kprobe *old_p, struct kprobe *p)
85 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
86 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
88 p->safe_arm = old_p->safe_arm;
89 p->safe_thumb = old_p->safe_thumb;
94 * Aggregate handlers for multiple uprobes support - these handlers
95 * take care of invoking the individual uprobe handlers on p->list
97 static int aggr_pre_uhandler(struct kprobe *p, struct pt_regs *regs)
102 list_for_each_entry_rcu(kp, &p->list, list) {
103 if (kp->pre_handler) {
104 ret = kp->pre_handler(kp, regs);
113 static void aggr_post_uhandler(struct kprobe *p, struct pt_regs *regs,
118 list_for_each_entry_rcu(kp, &p->list, list) {
119 if (kp->post_handler)
120 kp->post_handler(kp, regs, flags);
124 static int aggr_fault_uhandler(struct kprobe *p,
125 struct pt_regs *regs,
131 static int aggr_break_uhandler(struct kprobe *p, struct pt_regs *regs)
137 * Add the new probe to old_p->list. Fail if this is the
138 * second ujprobe at the address - two ujprobes can't coexist
140 static int add_new_uprobe(struct kprobe *old_p, struct kprobe *p)
142 if (p->break_handler) {
143 if (old_p->break_handler)
146 list_add_tail_rcu(&p->list, &old_p->list);
147 old_p->break_handler = aggr_break_uhandler;
149 list_add_rcu(&p->list, &old_p->list);
152 if (p->post_handler && !old_p->post_handler)
153 old_p->post_handler = aggr_post_uhandler;
159 * Fill in the required fields of the "manager uprobe". Replace the
160 * earlier uprobe in the hlist with the manager uprobe
162 static inline void add_aggr_uprobe(struct kprobe *ap, struct kprobe *p)
167 ap->pre_handler = aggr_pre_uhandler;
168 ap->fault_handler = aggr_fault_uhandler;
171 ap->post_handler = aggr_post_uhandler;
173 if (p->break_handler)
174 ap->break_handler = aggr_break_uhandler;
176 INIT_LIST_HEAD(&ap->list);
177 list_add_rcu(&p->list, &ap->list);
179 hlist_replace_rcu(&p->hlist, &ap->hlist);
183 * This is the second or subsequent uprobe at the address - handle
186 static int register_aggr_uprobe(struct kprobe *old_p, struct kprobe *p)
191 if (old_p->pre_handler == aggr_pre_uhandler) {
192 copy_uprobe(old_p, p);
193 ret = add_new_uprobe(old_p, p);
195 struct uprobe *uap = kzalloc(sizeof(*uap), GFP_KERNEL);
199 uap->task = kp2up(p)->task;
201 add_aggr_uprobe(ap, old_p);
203 ret = add_new_uprobe(ap, p);
209 static int arm_uprobe(struct uprobe *p)
211 kprobe_opcode_t insn = BREAKPOINT_INSTRUCTION;
212 int ret = write_proc_vm_atomic(p->task, (unsigned long)p->kp.addr,
213 &insn, sizeof(insn));
215 printk("arm_uprobe: failed to write memory "
216 "tgid=%u addr=%p!\n", p->task->tgid, p->kp.addr);
225 * @brief Disarms uprobe.
227 * @param p Pointer to the uprobe's kprobe.
228 * @param task Pointer to the target task.
231 void disarm_uprobe(struct kprobe *p, struct task_struct *task)
233 int ret = write_proc_vm_atomic(task, (unsigned long)p->addr,
234 &p->opcode, sizeof(p->opcode));
236 printk("disarm_uprobe: failed to write memory "
237 "tgid=%u, addr=%p!\n", task->tgid, p->addr);
240 EXPORT_SYMBOL_GPL(disarm_uprobe);
242 static void init_uprobes_insn_slots(void)
245 for (i = 0; i < UPROBE_TABLE_SIZE; ++i)
246 INIT_HLIST_HEAD(&uprobe_insn_slot_table[i]);
249 static void init_uprobe_table(void)
252 for (i = 0; i < UPROBE_TABLE_SIZE; ++i)
253 INIT_HLIST_HEAD(&uprobe_table[i]);
256 static void init_uretprobe_inst_table(void)
259 for (i = 0; i < UPROBE_TABLE_SIZE; ++i)
260 INIT_HLIST_HEAD(&uretprobe_inst_table[i]);
264 * @brief Gets uprobe's kprobe.
266 * @param addr Probe's address.
267 * @param tgid Probes's thread group ID.
268 * @return Pointer to the kprobe on success,\n
271 struct kprobe *get_ukprobe(void *addr, pid_t tgid)
273 struct hlist_head *head;
275 DECLARE_NODE_PTR_FOR_HLIST(node);
277 head = &uprobe_table[hash_ptr(addr, UPROBE_HASH_BITS)];
278 swap_hlist_for_each_entry_rcu(p, node, head, hlist) {
279 if (p->addr == addr && kp2up(p)->task->tgid == tgid)
287 * @brief Adds uprobe to hlist when trampoline have been made.
289 * @param p Pointer to the uprobe's kprobe.
292 void add_uprobe_table(struct kprobe *p)
294 hlist_add_head_rcu(&p->is_hlist,
295 &uprobe_insn_slot_table[hash_ptr(p->ainsn.insn,
300 * @brief Gets kprobe by insn slot.
302 * @param addr Probe's address.
303 * @param tgit Probe's thread group ID.
304 * @param regs Pointer to CPU registers data.
305 * @return Pointer to the kprobe on success,\n
308 struct kprobe *get_ukprobe_by_insn_slot(void *addr,
310 struct pt_regs *regs)
312 struct hlist_head *head;
314 DECLARE_NODE_PTR_FOR_HLIST(node);
316 /* TODO: test - two processes invokes instrumented function */
317 head = &uprobe_insn_slot_table[hash_ptr(addr, UPROBE_HASH_BITS)];
318 swap_hlist_for_each_entry_rcu(p, node, head, is_hlist) {
319 if (p->ainsn.insn == addr && kp2up(p)->task->tgid == tgid)
327 static void remove_uprobe(struct uprobe *up)
329 arch_remove_uprobe(up);
332 static struct hlist_head *uretprobe_inst_table_head(void *hash_key)
334 return &uretprobe_inst_table[hash_ptr(hash_key, UPROBE_HASH_BITS)];
337 /* Called with uretprobe_lock held */
338 static void add_urp_inst(struct uretprobe_instance *ri)
341 * Remove rp inst off the free list -
342 * Add it back when probed function returns
344 hlist_del(&ri->uflist);
346 /* Add rp inst onto table */
347 INIT_HLIST_NODE(&ri->hlist);
348 hlist_add_head(&ri->hlist, uretprobe_inst_table_head(ri->task->mm));
350 /* Also add this rp inst to the used list. */
351 INIT_HLIST_NODE(&ri->uflist);
352 hlist_add_head(&ri->uflist, &ri->rp->used_instances);
355 /* Called with uretprobe_lock held */
356 static void recycle_urp_inst(struct uretprobe_instance *ri)
359 hlist_del(&ri->hlist);
360 /* remove rp inst off the used list */
361 hlist_del(&ri->uflist);
362 /* put rp inst back onto the free list */
363 INIT_HLIST_NODE(&ri->uflist);
364 hlist_add_head(&ri->uflist, &ri->rp->free_instances);
368 /* Called with uretprobe_lock held */
369 static struct uretprobe_instance *get_used_urp_inst(struct uretprobe *rp)
371 struct uretprobe_instance *ri;
372 DECLARE_NODE_PTR_FOR_HLIST(node);
374 swap_hlist_for_each_entry(ri, node, &rp->used_instances, uflist) {
382 * @brief Gets free uretprobe instanse for the specified uretprobe without
383 * allocation. Called with uretprobe_lock held.
385 * @param rp Pointer to the uretprobe.
386 * @return Pointer to the uretprobe_instance on success,\n
389 struct uretprobe_instance *get_free_urp_inst_no_alloc(struct uretprobe *rp)
391 struct uretprobe_instance *ri;
392 DECLARE_NODE_PTR_FOR_HLIST(node);
394 swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
401 /* Called with uretprobe_lock held */
402 static void free_urp_inst(struct uretprobe *rp)
404 struct uretprobe_instance *ri;
405 while ((ri = get_free_urp_inst_no_alloc(rp)) != NULL) {
406 hlist_del(&ri->uflist);
411 #define COMMON_URP_NR 10
413 static int alloc_nodes_uretprobe(struct uretprobe *rp)
416 struct uretprobe_instance *inst;
419 #if 1 /* def CONFIG_PREEMPT */
420 rp->maxactive += max(COMMON_URP_NR, 2 * NR_CPUS);
422 rp->maxacpptive += NR_CPUS;
424 alloc_nodes = COMMON_URP_NR;
426 for (i = 0; i < alloc_nodes; ++i) {
427 inst = kmalloc(sizeof(*inst) + rp->data_size, GFP_ATOMIC);
432 INIT_HLIST_NODE(&inst->uflist);
433 hlist_add_head(&inst->uflist, &rp->free_instances);
439 /* Called with uretprobe_lock held */
440 static struct uretprobe_instance *get_free_urp_inst(struct uretprobe *rp)
442 struct uretprobe_instance *ri;
443 DECLARE_NODE_PTR_FOR_HLIST(node);
445 swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
449 if (!alloc_nodes_uretprobe(rp)) {
450 swap_hlist_for_each_entry(ri, node,
451 &rp->free_instances, uflist) {
458 /* =================================================================== */
461 * @brief Registers uprobe.
463 * @param up Pointer to the uprobe to register.
464 * @return 0 on success,\n
465 * negative error code on error.
467 int swap_register_uprobe(struct uprobe *up)
470 struct kprobe *p, *old_p;
476 DBPRINTF("p->addr = 0x%p p = 0x%p\n", p->addr, p);
478 /* thumb address = address-1; */
479 #if defined(CONFIG_ARM)
480 /* TODO: must be corrected in 'bundle' */
481 if ((unsigned long) p->addr & 0x01)
482 p->addr = (kprobe_opcode_t *)((unsigned long)p->addr &
486 p->ainsn.insn = NULL;
487 p->mod_refcounted = 0;
489 INIT_LIST_HEAD(&p->list);
490 #ifdef KPROBES_PROFILE
491 p->start_tm.tv_sec = p->start_tm.tv_usec = 0;
492 p->hnd_tm_sum.tv_sec = p->hnd_tm_sum.tv_usec = 0;
496 /* get the first item */
497 old_p = get_ukprobe(p->addr, kp2up(p)->task->tgid);
499 struct task_struct *task = up->task;
501 /* TODO: add support many uprobes on address */
502 printk(KERN_INFO "uprobe on task[%u %u %s] vaddr=%p is there\n",
503 task->tgid, task->pid, task->comm, p->addr);
507 p->safe_arm = old_p->safe_arm;
508 p->safe_thumb = old_p->safe_thumb;
510 ret = register_aggr_uprobe(old_p, p);
511 DBPRINTF("goto out\n", ret);
515 INIT_HLIST_NODE(&p->is_hlist);
517 ret = arch_prepare_uprobe(up);
519 DBPRINTF("goto out\n", ret);
523 DBPRINTF("before out ret = 0x%x\n", ret);
525 /* TODO: add uprobe (must be in function) */
526 INIT_HLIST_NODE(&p->hlist);
527 hlist_add_head_rcu(&p->hlist,
528 &uprobe_table[hash_ptr(p->addr, UPROBE_HASH_BITS)]);
530 ret = arm_uprobe(up);
532 hlist_del_rcu(&p->hlist);
538 DBPRINTF("out ret = 0x%x\n", ret);
541 EXPORT_SYMBOL_GPL(swap_register_uprobe);
544 * @brief Unregisters uprobe.
546 * @param up Pointer to the uprobe.
547 * @param disarm Disarm flag. When true uprobe is disarmed.
550 void __swap_unregister_uprobe(struct uprobe *up, int disarm)
552 struct kprobe *p, *old_p, *list_p;
556 old_p = get_ukprobe(p->addr, kp2up(p)->task->tgid);
557 if (unlikely(!old_p))
561 list_for_each_entry_rcu(list_p, &old_p->list, list) {
563 /* uprobe p is a valid probe */
572 if ((old_p == p) || ((old_p->pre_handler == aggr_pre_uhandler) &&
573 (p->list.next == &old_p->list) && (p->list.prev == &old_p->list))) {
574 /* Only probe on the hash list */
576 disarm_uprobe(&up->kp, up->task);
578 hlist_del_rcu(&old_p->hlist);
581 list_del_rcu(&p->list);
587 list_del_rcu(&p->list);
596 if (p->break_handler)
597 old_p->break_handler = NULL;
599 if (p->post_handler) {
600 list_for_each_entry_rcu(list_p, &old_p->list, list) {
601 if (list_p->post_handler) {
608 old_p->post_handler = NULL;
612 EXPORT_SYMBOL_GPL(__swap_unregister_uprobe);
615 * @brief Unregisters uprobe. Main interface function, wrapper for
616 * __swap_unregister_uprobe.
618 * @param up Pointer to the uprobe.
621 void swap_unregister_uprobe(struct uprobe *up)
623 __swap_unregister_uprobe(up, 1);
627 * @brief Registers ujprobe.
629 * @param uj Pointer to the ujprobe function.
630 * @return 0 on success,\n
631 * error code on error.
633 int swap_register_ujprobe(struct ujprobe *jp)
637 /* Todo: Verify probepoint is a function entry point */
638 jp->up.kp.pre_handler = setjmp_upre_handler;
639 jp->up.kp.break_handler = longjmp_break_uhandler;
641 ret = swap_register_uprobe(&jp->up);
645 EXPORT_SYMBOL_GPL(swap_register_ujprobe);
648 * @brief Unregisters ujprobe.
650 * @param jp Pointer to the ujprobe.
651 * @param disarm Disarm flag, passed to __swap_unregister_uprobe.
654 void __swap_unregister_ujprobe(struct ujprobe *jp, int disarm)
656 __swap_unregister_uprobe(&jp->up, disarm);
658 * Here is an attempt to unregister even those probes that have not been
659 * installed (hence not added to the hlist).
660 * So if we try to delete them from the hlist we will get NULL pointer
661 * dereference error. That is why we check whether this node
662 * really belongs to the hlist.
664 if (!(hlist_unhashed(&jp->up.kp.is_hlist)))
665 hlist_del_rcu(&jp->up.kp.is_hlist);
667 EXPORT_SYMBOL_GPL(__swap_unregister_ujprobe);
670 * @brief Unregisters ujprobe. Main interface function, wrapper for
671 * __swap_unregister_ujprobe.
673 * @param jp Pointer to the jprobe.
676 void swap_unregister_ujprobe(struct ujprobe *jp)
678 __swap_unregister_ujprobe(jp, 1);
680 EXPORT_SYMBOL_GPL(swap_unregister_ujprobe);
683 * @brief Trampoline uprobe handler.
685 * @param p Pointer to the uprobe's kprobe.
686 * @param regs Pointer to CPU register data.
689 int trampoline_uprobe_handler(struct kprobe *p, struct pt_regs *regs)
691 struct uretprobe_instance *ri = NULL;
693 struct hlist_head *head;
694 unsigned long flags, tramp_addr, orig_ret_addr = 0;
695 struct hlist_node *tmp;
696 DECLARE_NODE_PTR_FOR_HLIST(node);
698 tramp_addr = arch_get_trampoline_addr(p, regs);
699 spin_lock_irqsave(&uretprobe_lock, flags);
701 head = uretprobe_inst_table_head(current->mm);
704 * It is possible to have multiple instances associated with a given
705 * task either because an multiple functions in the call path
706 * have a return probe installed on them, and/or more then one
707 * return probe was registered for a target function.
709 * We can handle this because:
710 * - instances are always inserted at the head of the list
711 * - when multiple return probes are registered for the same
712 * function, the first instance's ret_addr will point to the
713 * real return address, and all the rest will point to
714 * uretprobe_trampoline
716 swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
717 if (ri->task != current) {
718 /* another task is sharing our hash bucket */
724 kp = up2kp(&ri->rp->up);
727 ri->rp->handler(ri, regs);
730 orig_ret_addr = (unsigned long)ri->ret_addr;
731 recycle_urp_inst(ri);
733 if ((orig_ret_addr != tramp_addr && kp == p) || kp == NULL) {
735 * This is the real return address. Any other
736 * instances associated with this task are for
737 * other calls deeper on the call stack
743 spin_unlock_irqrestore(&uretprobe_lock, flags);
744 /* orig_ret_addr is NULL when there is no need to restore anything
745 * (all the magic is performed inside handler) */
746 if (likely(orig_ret_addr))
747 arch_set_orig_ret_addr(orig_ret_addr, regs);
752 static int pre_handler_uretprobe(struct kprobe *p, struct pt_regs *regs)
754 struct uprobe *up = container_of(p, struct uprobe, kp);
755 struct uretprobe *rp = container_of(up, struct uretprobe, up);
757 int noret = thumb_mode(regs) ? rp->thumb_noret : rp->arm_noret;
759 struct uretprobe_instance *ri;
767 /* TODO: consider to only swap the
768 * RA after the last pre_handler fired */
769 spin_lock_irqsave(&uretprobe_lock, flags);
771 /* TODO: test - remove retprobe after func entry but before its exit */
772 ri = get_free_urp_inst(rp);
779 ri->preload_thumb = 0;
782 if (rp->entry_handler)
783 rp->entry_handler(ri, regs);
785 ret = arch_prepare_uretprobe(ri, regs);
788 recycle_urp_inst(ri);
795 spin_unlock_irqrestore(&uretprobe_lock, flags);
801 * @brief Registers uretprobe.
803 * @param rp Pointer to the uretprobe.
804 * @return 0 on success,\n
805 * negative error code on error.
807 int swap_register_uretprobe(struct uretprobe *rp)
810 struct uretprobe_instance *inst;
814 rp->up.kp.pre_handler = pre_handler_uretprobe;
815 rp->up.kp.post_handler = NULL;
816 rp->up.kp.fault_handler = NULL;
817 rp->up.kp.break_handler = NULL;
819 /* Pre-allocate memory for max kretprobe instances */
820 if (rp->maxactive <= 0) {
821 #if 1 /* def CONFIG_PREEMPT */
822 rp->maxactive = max(10, 2 * NR_CPUS);
824 rp->maxactive = NR_CPUS;
828 INIT_HLIST_HEAD(&rp->used_instances);
829 INIT_HLIST_HEAD(&rp->free_instances);
831 for (i = 0; i < rp->maxactive; i++) {
832 inst = kmalloc(sizeof(*inst) + rp->data_size, GFP_ATOMIC);
838 INIT_HLIST_NODE(&inst->uflist);
839 hlist_add_head(&inst->uflist, &rp->free_instances);
844 /* Establish function entry probe point */
845 ret = swap_register_uprobe(&rp->up);
849 arch_opcode_analysis_uretprobe(rp);
853 EXPORT_SYMBOL_GPL(swap_register_uretprobe);
856 * @brief Disarms uretprobe instances for the specified child task.
858 * @param parent Pointer to the parent task struct.
859 * @param task Pointer to the child task struct.
862 int swap_disarm_urp_inst_for_task(struct task_struct *parent,
863 struct task_struct *task)
866 struct uretprobe_instance *ri;
867 struct hlist_head *head;
868 struct hlist_node *tmp;
869 DECLARE_NODE_PTR_FOR_HLIST(node);
871 spin_lock_irqsave(&uretprobe_lock, flags);
873 head = uretprobe_inst_table_head(parent->mm);
874 swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
875 if (parent == ri->task)
876 arch_disarm_urp_inst(ri, task);
879 spin_unlock_irqrestore(&uretprobe_lock, flags);
883 EXPORT_SYMBOL_GPL(swap_disarm_urp_inst_for_task);
886 * @brief Disarms uretprobes for specified task.
888 * @param task Pointer to the task_struct.
891 void swap_discard_pending_uretprobes(struct task_struct *task)
894 struct uretprobe_instance *ri;
895 struct hlist_head *head;
896 struct hlist_node *tmp;
897 DECLARE_NODE_PTR_FOR_HLIST(node);
899 spin_lock_irqsave(&uretprobe_lock, flags);
901 head = uretprobe_inst_table_head(task->mm);
902 swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
903 if (ri->task == task) {
904 printk(KERN_INFO "%s (%d/%d): pending urp inst: %08lx\n",
905 task->comm, task->tgid, task->pid,
906 (unsigned long)ri->rp->up.kp.addr);
907 arch_disarm_urp_inst(ri, task);
908 recycle_urp_inst(ri);
912 spin_unlock_irqrestore(&uretprobe_lock, flags);
914 EXPORT_SYMBOL_GPL(swap_discard_pending_uretprobes);
917 * @brief Unregisters uretprobe.
919 * @param rp Pointer to the ureprobe.
920 * @param disarm Disarm flag, passed to __swap_unregister_uprobe
923 void __swap_unregister_uretprobe(struct uretprobe *rp, int disarm)
926 struct uretprobe_instance *ri;
928 __swap_unregister_uprobe(&rp->up, disarm);
929 spin_lock_irqsave(&uretprobe_lock, flags);
931 while ((ri = get_used_urp_inst(rp)) != NULL) {
932 if (arch_disarm_urp_inst(ri, ri->task) != 0)
933 printk(KERN_INFO "%s (%d/%d): "
934 "cannot disarm urp instance (%08lx)\n",
935 ri->task->comm, ri->task->tgid, ri->task->pid,
936 (unsigned long)rp->up.kp.addr);
937 recycle_urp_inst(ri);
940 if (hlist_empty(&rp->used_instances)) {
941 struct kprobe *p = &rp->up.kp;
943 if (!(hlist_unhashed(&p->is_hlist)))
944 hlist_del_rcu(&p->is_hlist);
947 while ((ri = get_used_urp_inst(rp)) != NULL) {
949 hlist_del(&ri->uflist);
952 spin_unlock_irqrestore(&uretprobe_lock, flags);
955 EXPORT_SYMBOL_GPL(__swap_unregister_uretprobe);
958 * @brief Unregistets uretprobe. Main interface function, wrapper for
959 * __swap_unregister_uretprobe.
961 * @param rp Pointer to the uretprobe.
964 void swap_unregister_uretprobe(struct uretprobe *rp)
966 __swap_unregister_uretprobe(rp, 1);
968 EXPORT_SYMBOL_GPL(swap_unregister_uretprobe);
971 * @brief Unregisters all uprobes for task's thread group ID.
973 * @param task Pointer to the task_struct
976 void swap_unregister_all_uprobes(struct task_struct *task)
978 struct hlist_head *head;
981 struct hlist_node *tnode;
982 DECLARE_NODE_PTR_FOR_HLIST(node);
984 for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
985 head = &uprobe_table[i];
986 swap_hlist_for_each_entry_safe(p, node, tnode, head, hlist) {
987 if (kp2up(p)->task->tgid == task->tgid) {
989 container_of(p, struct uprobe, kp);
990 printk(KERN_INFO "%s: delete uprobe at %p[%lx]"
991 " for %s/%d\n", __func__, p->addr,
992 (unsigned long)p->opcode,
993 task->comm, task->pid);
994 swap_unregister_uprobe(up);
999 EXPORT_SYMBOL_GPL(swap_unregister_all_uprobes);
1002 * @brief Arch-independent wrapper for arch_ujprobe_return.
1006 void swap_ujprobe_return(void)
1008 arch_ujprobe_return();
1010 EXPORT_SYMBOL_GPL(swap_ujprobe_return);
1012 static int once(void)
1014 init_uprobe_table();
1015 init_uprobes_insn_slots();
1016 init_uretprobe_inst_table();
1021 SWAP_LIGHT_INIT_MODULE(once, swap_arch_init_uprobes, swap_arch_exit_uprobes,
1024 MODULE_LICENSE("GPL");