2 * uprobe/swap_uprobes.c
3 * @author Alexey Gerenkov <a.gerenkov@samsung.com> User-Space Probes initial
4 * implementation; Support x86/ARM/MIPS for both user and kernel spaces.
5 * @author Ekaterina Gorelkina <e.gorelkina@samsung.com>: redesign module for
6 * separating core and arch parts
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 * Copyright (C) Samsung Electronics, 2006-2010
28 * @section DESCRIPTION
30 * Uprobes implementation.
34 #include <linux/hash.h>
35 #include <linux/mempolicy.h>
36 #include <linux/module.h>
38 #include <master/swap_initializer.h>
39 #include <kprobe/swap_slots.h>
40 #include <kprobe/swap_kdebug.h>
41 #include <kprobe/swap_kprobes_deps.h>
43 #include <swap-asm/swap_uprobes.h>
45 #include "swap_uprobes.h"
49 UPROBE_HASH_BITS = 10,
50 UPROBE_TABLE_SIZE = (1 << UPROBE_HASH_BITS)
53 static DEFINE_RWLOCK(st_lock);
54 static struct hlist_head slot_table[UPROBE_TABLE_SIZE];
55 struct hlist_head uprobe_table[UPROBE_TABLE_SIZE];
57 DEFINE_SPINLOCK(uretprobe_lock); /* Protects uretprobe_inst_table */
58 static struct hlist_head uretprobe_inst_table[UPROBE_TABLE_SIZE];
60 #define DEBUG_PRINT_HASH_TABLE 0
62 #if DEBUG_PRINT_HASH_TABLE
63 void print_uprobe_hash_table(void)
66 struct hlist_head *head;
68 DECLARE_NODE_PTR_FOR_HLIST(node);
70 /* print uprobe table */
71 for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
72 head = &uprobe_insn_slot_table[i];
73 swap_hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
74 printk(KERN_INFO "####### find U tgid=%u, addr=%x\n",
82 struct uinst_info *uinst_info_create(unsigned long vaddr,
83 kprobe_opcode_t opcode)
85 struct uinst_info *uinst;
87 uinst = kmalloc(sizeof(*uinst), GFP_ATOMIC);
89 INIT_HLIST_NODE(&uinst->hlist);
91 uinst->opcode = opcode;
93 pr_err("Cannot allocate memory for uinst\n");
98 EXPORT_SYMBOL_GPL(uinst_info_create);
100 void uinst_info_destroy(struct uinst_info *uinst)
104 EXPORT_SYMBOL_GPL(uinst_info_destroy);
106 void uinst_info_disarm(struct uinst_info *uinst, struct task_struct *task)
108 int ret = write_proc_vm_atomic(task, uinst->vaddr,
109 &uinst->opcode, sizeof(uinst->opcode));
111 printk("uinst_info_disarm: failed to write memory "
112 "tgid=%u, vaddr=%08lx!\n", task->tgid, uinst->vaddr);
115 EXPORT_SYMBOL_GPL(uinst_info_disarm);
118 * Keep all fields in the uprobe consistent
120 static inline void copy_uprobe(struct kprobe *old_p, struct kprobe *p)
122 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
123 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
127 * Aggregate handlers for multiple uprobes support - these handlers
128 * take care of invoking the individual uprobe handlers on p->list
130 static int aggr_pre_uhandler(struct kprobe *p, struct pt_regs *regs)
135 list_for_each_entry_rcu(kp, &p->list, list) {
136 if (kp->pre_handler) {
137 ret = kp->pre_handler(kp, regs);
146 static void aggr_post_uhandler(struct kprobe *p, struct pt_regs *regs,
151 list_for_each_entry_rcu(kp, &p->list, list) {
152 if (kp->post_handler)
153 kp->post_handler(kp, regs, flags);
157 static int aggr_fault_uhandler(struct kprobe *p,
158 struct pt_regs *regs,
164 static int aggr_break_uhandler(struct kprobe *p, struct pt_regs *regs)
170 * Add the new probe to old_p->list. Fail if this is the
171 * second ujprobe at the address - two ujprobes can't coexist
173 static int add_new_uprobe(struct kprobe *old_p, struct kprobe *p)
175 if (p->break_handler) {
176 if (old_p->break_handler)
179 list_add_tail_rcu(&p->list, &old_p->list);
180 old_p->break_handler = aggr_break_uhandler;
182 list_add_rcu(&p->list, &old_p->list);
185 if (p->post_handler && !old_p->post_handler)
186 old_p->post_handler = aggr_post_uhandler;
192 * Fill in the required fields of the "manager uprobe". Replace the
193 * earlier uprobe in the hlist with the manager uprobe
195 static inline void add_aggr_uprobe(struct kprobe *ap, struct kprobe *p)
200 ap->pre_handler = aggr_pre_uhandler;
201 ap->fault_handler = aggr_fault_uhandler;
204 ap->post_handler = aggr_post_uhandler;
206 if (p->break_handler)
207 ap->break_handler = aggr_break_uhandler;
209 INIT_LIST_HEAD(&ap->list);
210 list_add_rcu(&p->list, &ap->list);
212 hlist_replace_rcu(&p->hlist, &ap->hlist);
216 * This is the second or subsequent uprobe at the address - handle
219 static int register_aggr_uprobe(struct kprobe *old_p, struct kprobe *p)
224 if (old_p->pre_handler == aggr_pre_uhandler) {
225 copy_uprobe(old_p, p);
226 ret = add_new_uprobe(old_p, p);
228 struct uprobe *uap = kzalloc(sizeof(*uap), GFP_KERNEL);
232 uap->task = kp2up(p)->task;
234 add_aggr_uprobe(ap, old_p);
236 ret = add_new_uprobe(ap, p);
242 static int arm_uprobe(struct uprobe *p)
244 return arch_arm_uprobe(p);
248 * @brief Disarms uprobe.
250 * @param p Pointer to the uprobe's kprobe.
251 * @param task Pointer to the target task.
254 void disarm_uprobe(struct kprobe *p, struct task_struct *task)
256 arch_disarm_uprobe(p, task);
258 EXPORT_SYMBOL_GPL(disarm_uprobe);
260 static void init_uprobes_insn_slots(void)
263 for (i = 0; i < UPROBE_TABLE_SIZE; ++i)
264 INIT_HLIST_HEAD(&slot_table[i]);
267 static void init_uprobe_table(void)
270 for (i = 0; i < UPROBE_TABLE_SIZE; ++i)
271 INIT_HLIST_HEAD(&uprobe_table[i]);
274 static void init_uretprobe_inst_table(void)
277 for (i = 0; i < UPROBE_TABLE_SIZE; ++i)
278 INIT_HLIST_HEAD(&uretprobe_inst_table[i]);
282 * @brief Gets uprobe's kprobe.
284 * @param addr Probe's address.
285 * @param tgid Probes's thread group ID.
286 * @return Pointer to the kprobe on success,\n
289 struct kprobe *get_ukprobe(void *addr, pid_t tgid)
291 struct hlist_head *head;
293 DECLARE_NODE_PTR_FOR_HLIST(node);
295 head = &uprobe_table[hash_ptr(addr, UPROBE_HASH_BITS)];
296 swap_hlist_for_each_entry_rcu(p, node, head, hlist) {
297 if (p->addr == addr && kp2up(p)->task->tgid == tgid)
305 * @brief Adds uprobe to hlist when trampoline have been made.
307 * @param p Pointer to the uprobe's kprobe.
310 void add_uprobe_table(struct kprobe *p)
312 write_lock(&st_lock);
313 hlist_add_head(&p->is_hlist,
314 &slot_table[hash_ptr(p->ainsn.insn, UPROBE_HASH_BITS)]);
315 write_unlock(&st_lock);
318 static void del_uprobe_table(struct kprobe *p)
320 write_lock(&st_lock);
321 if (!hlist_unhashed(&p->is_hlist))
322 hlist_del(&p->is_hlist);
323 write_unlock(&st_lock);
327 * @brief Gets kprobe by insn slot.
329 * @param addr Probe's address.
330 * @param tgit Probe's thread group ID.
331 * @param regs Pointer to CPU registers data.
332 * @return Pointer to the kprobe on success,\n
335 struct kprobe *get_ukprobe_by_insn_slot(void *addr,
337 struct pt_regs *regs)
339 struct hlist_head *head;
341 DECLARE_NODE_PTR_FOR_HLIST(node);
344 head = &slot_table[hash_ptr(addr, UPROBE_HASH_BITS)];
345 swap_hlist_for_each_entry(p, node, head, is_hlist) {
346 if (p->ainsn.insn == addr && kp2up(p)->task->tgid == tgid) {
347 read_unlock(&st_lock);
351 read_unlock(&st_lock);
357 static void remove_uprobe(struct uprobe *up)
359 del_uprobe_table(&up->kp);
360 arch_remove_uprobe(up);
363 static struct hlist_head *uretprobe_inst_table_head(void *hash_key)
365 return &uretprobe_inst_table[hash_ptr(hash_key, UPROBE_HASH_BITS)];
368 /* Called with uretprobe_lock held */
369 static void add_urp_inst(struct uretprobe_instance *ri)
372 * Remove rp inst off the free list -
373 * Add it back when probed function returns
375 hlist_del(&ri->uflist);
377 /* Add rp inst onto table */
378 INIT_HLIST_NODE(&ri->hlist);
379 hlist_add_head(&ri->hlist, uretprobe_inst_table_head(ri->task->mm));
381 /* Also add this rp inst to the used list. */
382 INIT_HLIST_NODE(&ri->uflist);
383 hlist_add_head(&ri->uflist, &ri->rp->used_instances);
386 /* Called with uretprobe_lock held */
387 static void recycle_urp_inst(struct uretprobe_instance *ri)
390 hlist_del(&ri->hlist);
391 /* remove rp inst off the used list */
392 hlist_del(&ri->uflist);
393 /* put rp inst back onto the free list */
394 INIT_HLIST_NODE(&ri->uflist);
395 hlist_add_head(&ri->uflist, &ri->rp->free_instances);
399 /* Called with uretprobe_lock held */
400 static struct uretprobe_instance *get_used_urp_inst(struct uretprobe *rp)
402 struct uretprobe_instance *ri;
403 DECLARE_NODE_PTR_FOR_HLIST(node);
405 swap_hlist_for_each_entry(ri, node, &rp->used_instances, uflist) {
413 * @brief Gets free uretprobe instanse for the specified uretprobe without
414 * allocation. Called with uretprobe_lock held.
416 * @param rp Pointer to the uretprobe.
417 * @return Pointer to the uretprobe_instance on success,\n
420 struct uretprobe_instance *get_free_urp_inst_no_alloc(struct uretprobe *rp)
422 struct uretprobe_instance *ri;
423 DECLARE_NODE_PTR_FOR_HLIST(node);
425 swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
432 /* Called with uretprobe_lock held */
433 static void free_urp_inst(struct uretprobe *rp)
435 struct uretprobe_instance *ri;
436 while ((ri = get_free_urp_inst_no_alloc(rp)) != NULL) {
437 hlist_del(&ri->uflist);
442 #define COMMON_URP_NR 10
444 static int alloc_nodes_uretprobe(struct uretprobe *rp)
447 struct uretprobe_instance *inst;
450 #if 1 /* def CONFIG_PREEMPT */
451 rp->maxactive += max(COMMON_URP_NR, 2 * NR_CPUS);
453 rp->maxacpptive += NR_CPUS;
455 alloc_nodes = COMMON_URP_NR;
457 for (i = 0; i < alloc_nodes; ++i) {
458 inst = kmalloc(sizeof(*inst) + rp->data_size, GFP_ATOMIC);
463 INIT_HLIST_NODE(&inst->uflist);
464 hlist_add_head(&inst->uflist, &rp->free_instances);
470 /* Called with uretprobe_lock held */
471 static struct uretprobe_instance *get_free_urp_inst(struct uretprobe *rp)
473 struct uretprobe_instance *ri;
474 DECLARE_NODE_PTR_FOR_HLIST(node);
476 swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
480 if (!alloc_nodes_uretprobe(rp)) {
481 swap_hlist_for_each_entry(ri, node,
482 &rp->free_instances, uflist) {
489 /* =================================================================== */
492 * @brief Registers uprobe.
494 * @param up Pointer to the uprobe to register.
495 * @return 0 on success,\n
496 * negative error code on error.
498 int swap_register_uprobe(struct uprobe *up)
501 struct kprobe *p, *old_p;
507 p->ainsn.insn = NULL;
508 p->mod_refcounted = 0;
510 INIT_LIST_HEAD(&p->list);
511 #ifdef KPROBES_PROFILE
512 p->start_tm.tv_sec = p->start_tm.tv_usec = 0;
513 p->hnd_tm_sum.tv_sec = p->hnd_tm_sum.tv_usec = 0;
517 /* get the first item */
518 old_p = get_ukprobe(p->addr, kp2up(p)->task->tgid);
520 struct task_struct *task = up->task;
522 /* TODO: add support many uprobes on address */
523 printk(KERN_INFO "uprobe on task[%u %u %s] vaddr=%p is there\n",
524 task->tgid, task->pid, task->comm, p->addr);
528 ret = register_aggr_uprobe(old_p, p);
529 DBPRINTF("goto out\n", ret);
533 INIT_HLIST_NODE(&p->is_hlist);
535 ret = arch_prepare_uprobe(up);
537 DBPRINTF("goto out\n", ret);
541 DBPRINTF("before out ret = 0x%x\n", ret);
543 /* TODO: add uprobe (must be in function) */
544 INIT_HLIST_NODE(&p->hlist);
545 hlist_add_head_rcu(&p->hlist,
546 &uprobe_table[hash_ptr(p->addr, UPROBE_HASH_BITS)]);
548 ret = arm_uprobe(up);
550 hlist_del_rcu(&p->hlist);
556 DBPRINTF("out ret = 0x%x\n", ret);
559 EXPORT_SYMBOL_GPL(swap_register_uprobe);
562 * @brief Unregisters uprobe.
564 * @param up Pointer to the uprobe.
565 * @param disarm Disarm flag. When true uprobe is disarmed.
568 void __swap_unregister_uprobe(struct uprobe *up, int disarm)
570 struct kprobe *p, *old_p, *list_p;
574 old_p = get_ukprobe(p->addr, kp2up(p)->task->tgid);
575 if (unlikely(!old_p))
579 list_for_each_entry_rcu(list_p, &old_p->list, list) {
581 /* uprobe p is a valid probe */
590 if ((old_p == p) || ((old_p->pre_handler == aggr_pre_uhandler) &&
591 (p->list.next == &old_p->list) && (p->list.prev == &old_p->list))) {
592 /* Only probe on the hash list */
594 disarm_uprobe(&up->kp, up->task);
596 hlist_del_rcu(&old_p->hlist);
599 list_del_rcu(&p->list);
605 list_del_rcu(&p->list);
614 if (p->break_handler)
615 old_p->break_handler = NULL;
617 if (p->post_handler) {
618 list_for_each_entry_rcu(list_p, &old_p->list, list) {
619 if (list_p->post_handler) {
626 old_p->post_handler = NULL;
630 EXPORT_SYMBOL_GPL(__swap_unregister_uprobe);
633 * @brief Unregisters uprobe. Main interface function, wrapper for
634 * __swap_unregister_uprobe.
636 * @param up Pointer to the uprobe.
639 void swap_unregister_uprobe(struct uprobe *up)
641 __swap_unregister_uprobe(up, 1);
645 * @brief Registers ujprobe.
647 * @param uj Pointer to the ujprobe function.
648 * @return 0 on success,\n
649 * error code on error.
651 int swap_register_ujprobe(struct ujprobe *jp)
655 /* Todo: Verify probepoint is a function entry point */
656 jp->up.kp.pre_handler = setjmp_upre_handler;
657 jp->up.kp.break_handler = longjmp_break_uhandler;
659 ret = swap_register_uprobe(&jp->up);
663 EXPORT_SYMBOL_GPL(swap_register_ujprobe);
666 * @brief Unregisters ujprobe.
668 * @param jp Pointer to the ujprobe.
669 * @param disarm Disarm flag, passed to __swap_unregister_uprobe.
672 void __swap_unregister_ujprobe(struct ujprobe *jp, int disarm)
674 __swap_unregister_uprobe(&jp->up, disarm);
676 EXPORT_SYMBOL_GPL(__swap_unregister_ujprobe);
679 * @brief Unregisters ujprobe. Main interface function, wrapper for
680 * __swap_unregister_ujprobe.
682 * @param jp Pointer to the jprobe.
685 void swap_unregister_ujprobe(struct ujprobe *jp)
687 __swap_unregister_ujprobe(jp, 1);
689 EXPORT_SYMBOL_GPL(swap_unregister_ujprobe);
692 * @brief Trampoline uprobe handler.
694 * @param p Pointer to the uprobe's kprobe.
695 * @param regs Pointer to CPU register data.
698 int trampoline_uprobe_handler(struct kprobe *p, struct pt_regs *regs)
700 struct uretprobe_instance *ri = NULL;
702 struct hlist_head *head;
703 unsigned long flags, tramp_addr, orig_ret_addr = 0;
704 struct hlist_node *tmp;
705 DECLARE_NODE_PTR_FOR_HLIST(node);
707 tramp_addr = arch_get_trampoline_addr(p, regs);
708 spin_lock_irqsave(&uretprobe_lock, flags);
710 head = uretprobe_inst_table_head(current->mm);
713 * It is possible to have multiple instances associated with a given
714 * task either because an multiple functions in the call path
715 * have a return probe installed on them, and/or more then one
716 * return probe was registered for a target function.
718 * We can handle this because:
719 * - instances are always inserted at the head of the list
720 * - when multiple return probes are registered for the same
721 * function, the first instance's ret_addr will point to the
722 * real return address, and all the rest will point to
723 * uretprobe_trampoline
725 swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
726 if (ri->task != current) {
727 /* another task is sharing our hash bucket */
733 kp = up2kp(&ri->rp->up);
736 ri->rp->handler(ri, regs);
739 orig_ret_addr = (unsigned long)ri->ret_addr;
740 recycle_urp_inst(ri);
742 if ((orig_ret_addr != tramp_addr && kp == p) || kp == NULL) {
744 * This is the real return address. Any other
745 * instances associated with this task are for
746 * other calls deeper on the call stack
752 spin_unlock_irqrestore(&uretprobe_lock, flags);
753 /* orig_ret_addr is NULL when there is no need to restore anything
754 * (all the magic is performed inside handler) */
755 if (likely(orig_ret_addr))
756 arch_set_orig_ret_addr(orig_ret_addr, regs);
761 static int pre_handler_uretprobe(struct kprobe *p, struct pt_regs *regs)
763 struct uprobe *up = container_of(p, struct uprobe, kp);
764 struct uretprobe *rp = container_of(up, struct uretprobe, up);
766 int noret = thumb_mode(regs) ? rp->thumb_noret : rp->arm_noret;
768 struct uretprobe_instance *ri;
776 /* TODO: consider to only swap the
777 * RA after the last pre_handler fired */
778 spin_lock_irqsave(&uretprobe_lock, flags);
780 /* TODO: test - remove retprobe after func entry but before its exit */
781 ri = get_free_urp_inst(rp);
788 ri->preload_thumb = 0;
791 if (rp->entry_handler)
792 rp->entry_handler(ri, regs);
794 ret = arch_prepare_uretprobe(ri, regs);
797 recycle_urp_inst(ri);
804 spin_unlock_irqrestore(&uretprobe_lock, flags);
810 * @brief Registers uretprobe.
812 * @param rp Pointer to the uretprobe.
813 * @return 0 on success,\n
814 * negative error code on error.
816 int swap_register_uretprobe(struct uretprobe *rp)
819 struct uretprobe_instance *inst;
823 rp->up.kp.pre_handler = pre_handler_uretprobe;
824 rp->up.kp.post_handler = NULL;
825 rp->up.kp.fault_handler = NULL;
826 rp->up.kp.break_handler = NULL;
828 /* Pre-allocate memory for max kretprobe instances */
829 if (rp->maxactive <= 0) {
830 #if 1 /* def CONFIG_PREEMPT */
831 rp->maxactive = max(10, 2 * NR_CPUS);
833 rp->maxactive = NR_CPUS;
837 INIT_HLIST_HEAD(&rp->used_instances);
838 INIT_HLIST_HEAD(&rp->free_instances);
840 for (i = 0; i < rp->maxactive; i++) {
841 inst = kmalloc(sizeof(*inst) + rp->data_size, GFP_KERNEL);
847 INIT_HLIST_NODE(&inst->uflist);
848 hlist_add_head(&inst->uflist, &rp->free_instances);
853 /* Establish function entry probe point */
854 ret = swap_register_uprobe(&rp->up);
858 arch_opcode_analysis_uretprobe(rp);
862 EXPORT_SYMBOL_GPL(swap_register_uretprobe);
865 * @brief Unregisters uretprobe.
867 * @param rp Pointer to the ureprobe.
868 * @param disarm Disarm flag, passed to __swap_unregister_uprobe
871 void __swap_unregister_uretprobe(struct uretprobe *rp, int disarm)
874 struct uretprobe_instance *ri;
876 __swap_unregister_uprobe(&rp->up, disarm);
878 spin_lock_irqsave(&uretprobe_lock, flags);
879 while ((ri = get_used_urp_inst(rp)) != NULL) {
880 bool is_current = ri->task == current;
883 spin_unlock_irqrestore(&uretprobe_lock, flags);
885 /* FIXME: arch_disarm_urp_inst() for no current context */
886 if (arch_disarm_urp_inst(ri, ri->task, 0) != 0)
887 printk(KERN_INFO "%s (%d/%d): "
888 "cannot disarm urp instance (%08lx)\n",
889 ri->task->comm, ri->task->tgid, ri->task->pid,
890 (unsigned long)rp->up.kp.addr);
893 spin_lock_irqsave(&uretprobe_lock, flags);
895 recycle_urp_inst(ri);
897 while ((ri = get_used_urp_inst(rp)) != NULL) {
899 hlist_del(&ri->uflist);
901 spin_unlock_irqrestore(&uretprobe_lock, flags);
905 EXPORT_SYMBOL_GPL(__swap_unregister_uretprobe);
908 * @brief Unregistets uretprobe. Main interface function, wrapper for
909 * __swap_unregister_uretprobe.
911 * @param rp Pointer to the uretprobe.
914 void swap_unregister_uretprobe(struct uretprobe *rp)
916 __swap_unregister_uretprobe(rp, 1);
918 EXPORT_SYMBOL_GPL(swap_unregister_uretprobe);
921 * @brief Unregisters all uprobes for task's thread group ID.
923 * @param task Pointer to the task_struct
926 void swap_unregister_all_uprobes(struct task_struct *task)
928 struct hlist_head *head;
931 struct hlist_node *tnode;
932 DECLARE_NODE_PTR_FOR_HLIST(node);
934 for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
935 head = &uprobe_table[i];
936 swap_hlist_for_each_entry_safe(p, node, tnode, head, hlist) {
937 if (kp2up(p)->task->tgid == task->tgid) {
939 container_of(p, struct uprobe, kp);
940 printk(KERN_INFO "%s: delete uprobe at %p[%lx]"
941 " for %s/%d\n", __func__, p->addr,
942 (unsigned long)p->opcode,
943 task->comm, task->pid);
944 swap_unregister_uprobe(up);
949 EXPORT_SYMBOL_GPL(swap_unregister_all_uprobes);
952 * @brief Arch-independent wrapper for arch_ujprobe_return.
956 void swap_ujprobe_return(void)
958 arch_ujprobe_return();
960 EXPORT_SYMBOL_GPL(swap_ujprobe_return);
963 static struct urinst_info *urinst_info_create(struct uretprobe_instance *ri)
965 struct urinst_info *urinst;
967 urinst = kmalloc(sizeof(*urinst), GFP_ATOMIC);
969 INIT_HLIST_NODE(&urinst->hlist);
970 urinst->task = ri->task;
971 urinst->sp = (unsigned long)ri->sp;
972 urinst->tramp = arch_tramp_by_ri(ri);
973 urinst->ret_addr = (unsigned long)ri->ret_addr;
975 pr_err("Cannot allocate memory for urinst\n");
981 static void urinst_info_destroy(struct urinst_info *urinst)
986 static void urinst_info_disarm(struct urinst_info *urinst, struct task_struct *task)
988 struct uretprobe_instance ri;
989 unsigned long tramp = urinst->tramp;
991 /* set necessary data*/
992 ri.task = urinst->task;
993 ri.sp = (kprobe_opcode_t *)urinst->sp;
994 ri.ret_addr = (kprobe_opcode_t *)urinst->ret_addr;
996 arch_disarm_urp_inst(&ri, task, tramp);
999 void urinst_info_get_current_hlist(struct hlist_head *head, bool recycle)
1001 unsigned long flags;
1002 struct task_struct *task = current;
1003 struct uretprobe_instance *ri;
1004 struct hlist_head *hhead;
1005 struct hlist_node *n;
1006 struct hlist_node *last = NULL;
1007 DECLARE_NODE_PTR_FOR_HLIST(node);
1009 spin_lock_irqsave(&uretprobe_lock, flags);
1010 hhead = uretprobe_inst_table_head(task->mm);
1011 swap_hlist_for_each_entry_safe(ri, node, n, hhead, hlist) {
1012 if (task == ri->task) {
1013 struct urinst_info *urinst;
1015 urinst = urinst_info_create(ri);
1018 hlist_add_after(last, &urinst->hlist);
1020 hlist_add_head(&urinst->hlist, head);
1022 last = &urinst->hlist;
1026 recycle_urp_inst(ri);
1029 spin_unlock_irqrestore(&uretprobe_lock, flags);
1031 EXPORT_SYMBOL_GPL(urinst_info_get_current_hlist);
1033 void urinst_info_put_current_hlist(struct hlist_head *head,
1034 struct task_struct *task)
1036 struct urinst_info *urinst;
1037 struct hlist_node *tmp;
1038 DECLARE_NODE_PTR_FOR_HLIST(node);
1040 swap_hlist_for_each_entry_safe(urinst, node, tmp, head, hlist) {
1041 /* check on disarm */
1043 urinst_info_disarm(urinst, task);
1045 hlist_del(&urinst->hlist);
1046 urinst_info_destroy(urinst);
1049 EXPORT_SYMBOL_GPL(urinst_info_put_current_hlist);
1052 static int once(void)
1054 init_uprobe_table();
1055 init_uprobes_insn_slots();
1056 init_uretprobe_inst_table();
1061 SWAP_LIGHT_INIT_MODULE(once, swap_arch_init_uprobes, swap_arch_exit_uprobes,
1064 MODULE_LICENSE("GPL");