2 * uprobe/swap_uprobes.c
3 * @author Alexey Gerenkov <a.gerenkov@samsung.com> User-Space Probes initial
4 * implementation; Support x86/ARM/MIPS for both user and kernel spaces.
5 * @author Ekaterina Gorelkina <e.gorelkina@samsung.com>: redesign module for
6 * separating core and arch parts
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 * Copyright (C) Samsung Electronics, 2006-2010
28 * @section DESCRIPTION
30 * Uprobes implementation.
34 #include <linux/hash.h>
35 #include <linux/mempolicy.h>
36 #include <linux/module.h>
38 #include <master/swap_initializer.h>
39 #include <kprobe/swap_slots.h>
40 #include <kprobe/swap_kdebug.h>
41 #include <kprobe/swap_kprobes_deps.h>
43 #include <swap-asm/swap_uprobes.h>
45 #include "swap_uprobes.h"
49 UPROBE_HASH_BITS = 10,
50 UPROBE_TABLE_SIZE = (1 << UPROBE_HASH_BITS)
53 static DEFINE_RWLOCK(st_lock);
54 static struct hlist_head slot_table[UPROBE_TABLE_SIZE];
55 struct hlist_head uprobe_table[UPROBE_TABLE_SIZE];
57 DEFINE_SPINLOCK(uretprobe_lock); /* Protects uretprobe_inst_table */
58 static struct hlist_head uretprobe_inst_table[UPROBE_TABLE_SIZE];
60 #define DEBUG_PRINT_HASH_TABLE 0
62 #if DEBUG_PRINT_HASH_TABLE
63 void print_uprobe_hash_table(void)
66 struct hlist_head *head;
68 DECLARE_NODE_PTR_FOR_HLIST(node);
70 /* print uprobe table */
71 for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
72 head = &uprobe_insn_slot_table[i];
73 swap_hlist_for_each_entry_rcu(p, node, head, is_hlist) {
74 printk(KERN_INFO "####### find U tgid=%u, addr=0x%lx\n",
75 p->task->tgid, (unsigned long)p->addr);
82 struct uinst_info *uinst_info_create(unsigned long vaddr,
83 kprobe_opcode_t opcode)
85 struct uinst_info *uinst;
87 uinst = kmalloc(sizeof(*uinst), GFP_ATOMIC);
89 INIT_HLIST_NODE(&uinst->hlist);
91 uinst->opcode = opcode;
93 pr_err("Cannot allocate memory for uinst\n");
98 EXPORT_SYMBOL_GPL(uinst_info_create);
100 void uinst_info_destroy(struct uinst_info *uinst)
104 EXPORT_SYMBOL_GPL(uinst_info_destroy);
106 void uinst_info_disarm(struct uinst_info *uinst, struct task_struct *task)
108 int ret = write_proc_vm_atomic(task, uinst->vaddr,
109 &uinst->opcode, sizeof(uinst->opcode));
111 printk("uinst_info_disarm: failed to write memory "
112 "tgid=%u, vaddr=%08lx!\n", task->tgid, uinst->vaddr);
115 EXPORT_SYMBOL_GPL(uinst_info_disarm);
118 * Keep all fields in the uprobe consistent
120 static inline void copy_uprobe(struct uprobe *old_p, struct uprobe *p)
122 memcpy(&p->opcode, &old_p->opcode, sizeof(uprobe_opcode_t));
123 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_insn));
127 * Aggregate handlers for multiple uprobes support - these handlers
128 * take care of invoking the individual uprobe handlers on p->list
130 static int aggr_pre_uhandler(struct uprobe *p, struct pt_regs *regs)
135 list_for_each_entry_rcu(up, &p->list, list) {
136 if (up->pre_handler) {
137 ret = up->pre_handler(up, regs);
146 static void aggr_post_uhandler(struct uprobe *p, struct pt_regs *regs,
151 list_for_each_entry_rcu(up, &p->list, list) {
152 if (up->post_handler)
153 up->post_handler(up, regs, flags);
157 static int aggr_fault_uhandler(struct uprobe *p,
158 struct pt_regs *regs,
164 static int aggr_break_uhandler(struct uprobe *p, struct pt_regs *regs)
170 * Add the new probe to old_p->list. Fail if this is the
171 * second ujprobe at the address - two ujprobes can't coexist
173 static int add_new_uprobe(struct uprobe *old_p, struct uprobe *p)
175 if (p->break_handler) {
176 if (old_p->break_handler)
179 list_add_tail_rcu(&p->list, &old_p->list);
180 old_p->break_handler = aggr_break_uhandler;
182 list_add_rcu(&p->list, &old_p->list);
185 if (p->post_handler && !old_p->post_handler)
186 old_p->post_handler = aggr_post_uhandler;
192 * Fill in the required fields of the "manager uprobe". Replace the
193 * earlier uprobe in the hlist with the manager uprobe
195 static inline void add_aggr_uprobe(struct uprobe *ap, struct uprobe *p)
200 ap->pre_handler = aggr_pre_uhandler;
201 ap->fault_handler = aggr_fault_uhandler;
204 ap->post_handler = aggr_post_uhandler;
206 if (p->break_handler)
207 ap->break_handler = aggr_break_uhandler;
209 INIT_LIST_HEAD(&ap->list);
210 list_add_rcu(&p->list, &ap->list);
212 hlist_replace_rcu(&p->hlist, &ap->hlist);
216 * This is the second or subsequent uprobe at the address - handle
219 static int register_aggr_uprobe(struct uprobe *old_p, struct uprobe *p)
223 if (old_p->pre_handler == aggr_pre_uhandler) {
224 copy_uprobe(old_p, p);
225 ret = add_new_uprobe(old_p, p);
227 struct uprobe *uap = kzalloc(sizeof(*uap), GFP_KERNEL);
232 add_aggr_uprobe(uap, old_p);
234 ret = add_new_uprobe(uap, p);
240 static int arm_uprobe(struct uprobe *p)
242 return arch_arm_uprobe(p);
246 * @brief Disarms uprobe.
248 * @param p Pointer to the uprobe.
249 * @param task Pointer to the target task.
252 void disarm_uprobe(struct uprobe *p, struct task_struct *task)
254 arch_disarm_uprobe(p, task);
256 EXPORT_SYMBOL_GPL(disarm_uprobe);
258 static void init_uprobes_insn_slots(void)
261 for (i = 0; i < UPROBE_TABLE_SIZE; ++i)
262 INIT_HLIST_HEAD(&slot_table[i]);
265 static void init_uprobe_table(void)
268 for (i = 0; i < UPROBE_TABLE_SIZE; ++i)
269 INIT_HLIST_HEAD(&uprobe_table[i]);
272 static void init_uretprobe_inst_table(void)
275 for (i = 0; i < UPROBE_TABLE_SIZE; ++i)
276 INIT_HLIST_HEAD(&uretprobe_inst_table[i]);
280 * @brief Gets uprobe.
282 * @param addr Probe's address.
283 * @param tgid Probes's thread group ID.
284 * @return Pointer to the uprobe on success,\n
287 struct uprobe *get_uprobe(void *addr, pid_t tgid)
289 struct hlist_head *head;
291 DECLARE_NODE_PTR_FOR_HLIST(node);
293 head = &uprobe_table[hash_ptr(addr, UPROBE_HASH_BITS)];
294 swap_hlist_for_each_entry_rcu(p, node, head, hlist) {
295 if (p->addr == addr && p->task->tgid == tgid)
303 * @brief Adds uprobe to hlist when trampoline have been made.
305 * @param p Pointer to the uprobe.
308 void add_uprobe_table(struct uprobe *p)
310 write_lock(&st_lock);
311 hlist_add_head(&p->is_hlist,
312 &slot_table[hash_ptr(p->ainsn.insn, UPROBE_HASH_BITS)]);
313 write_unlock(&st_lock);
316 static void del_uprobe_table(struct uprobe *p)
318 write_lock(&st_lock);
319 if (!hlist_unhashed(&p->is_hlist))
320 hlist_del(&p->is_hlist);
321 write_unlock(&st_lock);
325 * @brief Gets uprobe by insn slot.
327 * @param addr Probe's address.
328 * @param tgit Probe's thread group ID.
329 * @param regs Pointer to CPU registers data.
330 * @return Pointer to the uprobe on success,\n
333 struct uprobe *get_uprobe_by_insn_slot(void *addr,
335 struct pt_regs *regs)
337 struct hlist_head *head;
339 DECLARE_NODE_PTR_FOR_HLIST(node);
342 head = &slot_table[hash_ptr(addr, UPROBE_HASH_BITS)];
343 swap_hlist_for_each_entry(p, node, head, is_hlist) {
344 if (p->ainsn.insn == addr && p->task->tgid == tgid) {
345 read_unlock(&st_lock);
349 read_unlock(&st_lock);
355 static void remove_uprobe(struct uprobe *up)
357 del_uprobe_table(up);
358 arch_remove_uprobe(up);
361 static struct hlist_head *uretprobe_inst_table_head(void *hash_key)
363 return &uretprobe_inst_table[hash_ptr(hash_key, UPROBE_HASH_BITS)];
366 /* Called with uretprobe_lock held */
367 static void add_urp_inst(struct uretprobe_instance *ri)
370 * Remove rp inst off the free list -
371 * Add it back when probed function returns
373 hlist_del(&ri->uflist);
375 /* Add rp inst onto table */
376 INIT_HLIST_NODE(&ri->hlist);
377 hlist_add_head(&ri->hlist, uretprobe_inst_table_head(ri->task->mm));
379 /* Also add this rp inst to the used list. */
380 INIT_HLIST_NODE(&ri->uflist);
381 hlist_add_head(&ri->uflist, &ri->rp->used_instances);
384 /* Called with uretprobe_lock held */
385 static void recycle_urp_inst(struct uretprobe_instance *ri)
388 hlist_del(&ri->hlist);
389 /* remove rp inst off the used list */
390 hlist_del(&ri->uflist);
391 /* put rp inst back onto the free list */
392 INIT_HLIST_NODE(&ri->uflist);
393 hlist_add_head(&ri->uflist, &ri->rp->free_instances);
397 /* Called with uretprobe_lock held */
398 static struct uretprobe_instance *get_used_urp_inst(struct uretprobe *rp)
400 struct uretprobe_instance *ri;
401 DECLARE_NODE_PTR_FOR_HLIST(node);
403 swap_hlist_for_each_entry(ri, node, &rp->used_instances, uflist) {
411 * @brief Gets free uretprobe instanse for the specified uretprobe without
412 * allocation. Called with uretprobe_lock held.
414 * @param rp Pointer to the uretprobe.
415 * @return Pointer to the uretprobe_instance on success,\n
418 struct uretprobe_instance *get_free_urp_inst_no_alloc(struct uretprobe *rp)
420 struct uretprobe_instance *ri;
421 DECLARE_NODE_PTR_FOR_HLIST(node);
423 swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
430 /* Called with uretprobe_lock held */
431 static void free_urp_inst(struct uretprobe *rp)
433 struct uretprobe_instance *ri;
434 while ((ri = get_free_urp_inst_no_alloc(rp)) != NULL) {
435 hlist_del(&ri->uflist);
440 #define COMMON_URP_NR 10
442 static int alloc_nodes_uretprobe(struct uretprobe *rp)
445 struct uretprobe_instance *inst;
448 #if 1 /* def CONFIG_PREEMPT */
449 rp->maxactive += max(COMMON_URP_NR, 2 * NR_CPUS);
451 rp->maxacpptive += NR_CPUS;
453 alloc_nodes = COMMON_URP_NR;
455 for (i = 0; i < alloc_nodes; ++i) {
456 inst = kmalloc(sizeof(*inst) + rp->data_size, GFP_ATOMIC);
461 INIT_HLIST_NODE(&inst->uflist);
462 hlist_add_head(&inst->uflist, &rp->free_instances);
468 /* Called with uretprobe_lock held */
469 static struct uretprobe_instance *get_free_urp_inst(struct uretprobe *rp)
471 struct uretprobe_instance *ri;
472 DECLARE_NODE_PTR_FOR_HLIST(node);
474 swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
478 if (!alloc_nodes_uretprobe(rp)) {
479 swap_hlist_for_each_entry(ri, node,
480 &rp->free_instances, uflist) {
487 /* =================================================================== */
490 * @brief Registers uprobe.
492 * @param up Pointer to the uprobe to register.
493 * @return 0 on success,\n
494 * negative error code on error.
496 int swap_register_uprobe(struct uprobe *p)
499 struct uprobe *old_p;
504 p->ainsn.insn = NULL;
505 INIT_LIST_HEAD(&p->list);
506 #ifdef KPROBES_PROFILE
507 p->start_tm.tv_sec = p->start_tm.tv_usec = 0;
508 p->hnd_tm_sum.tv_sec = p->hnd_tm_sum.tv_usec = 0;
512 /* get the first item */
513 old_p = get_uprobe(p->addr, p->task->tgid);
515 struct task_struct *task = p->task;
517 /* TODO: add support many uprobes on address */
518 printk(KERN_INFO "uprobe on task[%u %u %s] vaddr=%p is there\n",
519 task->tgid, task->pid, task->comm, p->addr);
523 ret = register_aggr_uprobe(old_p, p);
524 DBPRINTF("goto out\n", ret);
528 INIT_HLIST_NODE(&p->is_hlist);
530 ret = arch_prepare_uprobe(p);
532 DBPRINTF("goto out\n", ret);
536 DBPRINTF("before out ret = 0x%x\n", ret);
538 /* TODO: add uprobe (must be in function) */
539 INIT_HLIST_NODE(&p->hlist);
540 hlist_add_head_rcu(&p->hlist,
541 &uprobe_table[hash_ptr(p->addr, UPROBE_HASH_BITS)]);
545 hlist_del_rcu(&p->hlist);
551 DBPRINTF("out ret = 0x%x\n", ret);
554 EXPORT_SYMBOL_GPL(swap_register_uprobe);
557 * @brief Unregisters uprobe.
559 * @param up Pointer to the uprobe.
560 * @param disarm Disarm flag. When true uprobe is disarmed.
563 void __swap_unregister_uprobe(struct uprobe *p, int disarm)
565 struct uprobe *old_p, *list_p;
568 /* we MUST check probe for uncreated process */
572 old_p = get_uprobe(p->addr, p->task->tgid);
573 if (unlikely(!old_p))
577 list_for_each_entry_rcu(list_p, &old_p->list, list) {
579 /* uprobe p is a valid probe */
588 if ((old_p == p) || ((old_p->pre_handler == aggr_pre_uhandler) &&
589 (p->list.next == &old_p->list) && (p->list.prev == &old_p->list))) {
590 /* Only probe on the hash list */
592 disarm_uprobe(p, p->task);
594 hlist_del_rcu(&old_p->hlist);
597 list_del_rcu(&p->list);
603 list_del_rcu(&p->list);
612 if (p->break_handler)
613 old_p->break_handler = NULL;
615 if (p->post_handler) {
616 list_for_each_entry_rcu(list_p, &old_p->list, list) {
617 if (list_p->post_handler) {
624 old_p->post_handler = NULL;
628 EXPORT_SYMBOL_GPL(__swap_unregister_uprobe);
631 * @brief Unregisters uprobe. Main interface function, wrapper for
632 * __swap_unregister_uprobe.
634 * @param up Pointer to the uprobe.
637 void swap_unregister_uprobe(struct uprobe *up)
639 __swap_unregister_uprobe(up, 1);
643 * @brief Registers ujprobe.
645 * @param uj Pointer to the ujprobe function.
646 * @return 0 on success,\n
647 * error code on error.
649 int swap_register_ujprobe(struct ujprobe *jp)
653 /* Todo: Verify probepoint is a function entry point */
654 jp->up.pre_handler = setjmp_upre_handler;
655 jp->up.break_handler = longjmp_break_uhandler;
657 ret = swap_register_uprobe(&jp->up);
661 EXPORT_SYMBOL_GPL(swap_register_ujprobe);
664 * @brief Unregisters ujprobe.
666 * @param jp Pointer to the ujprobe.
667 * @param disarm Disarm flag, passed to __swap_unregister_uprobe.
670 void __swap_unregister_ujprobe(struct ujprobe *jp, int disarm)
672 __swap_unregister_uprobe(&jp->up, disarm);
674 EXPORT_SYMBOL_GPL(__swap_unregister_ujprobe);
677 * @brief Unregisters ujprobe. Main interface function, wrapper for
678 * __swap_unregister_ujprobe.
680 * @param jp Pointer to the jprobe.
683 void swap_unregister_ujprobe(struct ujprobe *jp)
685 __swap_unregister_ujprobe(jp, 1);
687 EXPORT_SYMBOL_GPL(swap_unregister_ujprobe);
690 * @brief Trampoline uprobe handler.
692 * @param p Pointer to the uprobe.
693 * @param regs Pointer to CPU register data.
696 int trampoline_uprobe_handler(struct uprobe *p, struct pt_regs *regs)
698 struct uretprobe_instance *ri = NULL;
700 struct hlist_head *head;
701 unsigned long flags, tramp_addr, orig_ret_addr = 0;
702 struct hlist_node *tmp;
703 DECLARE_NODE_PTR_FOR_HLIST(node);
705 tramp_addr = arch_get_trampoline_addr(p, regs);
706 spin_lock_irqsave(&uretprobe_lock, flags);
708 head = uretprobe_inst_table_head(current->mm);
711 * It is possible to have multiple instances associated with a given
712 * task either because an multiple functions in the call path
713 * have a return probe installed on them, and/or more then one
714 * return probe was registered for a target function.
716 * We can handle this because:
717 * - instances are always inserted at the head of the list
718 * - when multiple return probes are registered for the same
719 * function, the first instance's ret_addr will point to the
720 * real return address, and all the rest will point to
721 * uretprobe_trampoline
723 swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
724 if (ri->task != current) {
725 /* another task is sharing our hash bucket */
734 ri->rp->handler(ri, regs);
737 orig_ret_addr = (unsigned long)ri->ret_addr;
738 recycle_urp_inst(ri);
740 if ((orig_ret_addr != tramp_addr && up == p) || up == NULL) {
742 * This is the real return address. Any other
743 * instances associated with this task are for
744 * other calls deeper on the call stack
750 spin_unlock_irqrestore(&uretprobe_lock, flags);
751 /* orig_ret_addr is NULL when there is no need to restore anything
752 * (all the magic is performed inside handler) */
753 if (likely(orig_ret_addr))
754 arch_set_orig_ret_addr(orig_ret_addr, regs);
759 static int pre_handler_uretprobe(struct uprobe *p, struct pt_regs *regs)
761 struct uretprobe *rp = container_of(p, struct uretprobe, up);
763 int noret = thumb_mode(regs) ? rp->thumb_noret : rp->arm_noret;
765 struct uretprobe_instance *ri;
773 /* TODO: consider to only swap the
774 * RA after the last pre_handler fired */
775 spin_lock_irqsave(&uretprobe_lock, flags);
777 /* TODO: test - remove retprobe after func entry but before its exit */
778 ri = get_free_urp_inst(rp);
785 ri->preload_thumb = 0;
788 if (rp->entry_handler)
789 rp->entry_handler(ri, regs);
791 ret = arch_prepare_uretprobe(ri, regs);
794 recycle_urp_inst(ri);
801 spin_unlock_irqrestore(&uretprobe_lock, flags);
807 * @brief Registers uretprobe.
809 * @param rp Pointer to the uretprobe.
810 * @return 0 on success,\n
811 * negative error code on error.
813 int swap_register_uretprobe(struct uretprobe *rp)
816 struct uretprobe_instance *inst;
820 rp->up.pre_handler = pre_handler_uretprobe;
821 rp->up.post_handler = NULL;
822 rp->up.fault_handler = NULL;
823 rp->up.break_handler = NULL;
825 /* Pre-allocate memory for max kretprobe instances */
826 if (rp->maxactive <= 0) {
827 #if 1 /* def CONFIG_PREEMPT */
828 rp->maxactive = max(10, 2 * NR_CPUS);
830 rp->maxactive = NR_CPUS;
834 INIT_HLIST_HEAD(&rp->used_instances);
835 INIT_HLIST_HEAD(&rp->free_instances);
837 for (i = 0; i < rp->maxactive; i++) {
838 inst = kmalloc(sizeof(*inst) + rp->data_size, GFP_KERNEL);
844 INIT_HLIST_NODE(&inst->uflist);
845 hlist_add_head(&inst->uflist, &rp->free_instances);
850 /* Establish function entry probe point */
851 ret = swap_register_uprobe(&rp->up);
855 arch_opcode_analysis_uretprobe(rp);
859 EXPORT_SYMBOL_GPL(swap_register_uretprobe);
862 * @brief Unregisters uretprobe.
864 * @param rp Pointer to the ureprobe.
865 * @param disarm Disarm flag, passed to __swap_unregister_uprobe
868 void __swap_unregister_uretprobe(struct uretprobe *rp, int disarm)
871 struct uretprobe_instance *ri;
873 __swap_unregister_uprobe(&rp->up, disarm);
875 spin_lock_irqsave(&uretprobe_lock, flags);
876 while ((ri = get_used_urp_inst(rp)) != NULL) {
877 bool is_current = ri->task == current;
880 spin_unlock_irqrestore(&uretprobe_lock, flags);
882 /* FIXME: arch_disarm_urp_inst() for no current context */
883 if (arch_disarm_urp_inst(ri, ri->task, 0) != 0)
884 printk(KERN_INFO "%s (%d/%d): "
885 "cannot disarm urp instance (%08lx)\n",
886 ri->task->comm, ri->task->tgid, ri->task->pid,
887 (unsigned long)rp->up.addr);
890 spin_lock_irqsave(&uretprobe_lock, flags);
892 recycle_urp_inst(ri);
895 while ((ri = get_used_urp_inst(rp)) != NULL) {
897 hlist_del(&ri->uflist);
899 spin_unlock_irqrestore(&uretprobe_lock, flags);
903 EXPORT_SYMBOL_GPL(__swap_unregister_uretprobe);
906 * @brief Unregistets uretprobe. Main interface function, wrapper for
907 * __swap_unregister_uretprobe.
909 * @param rp Pointer to the uretprobe.
912 void swap_unregister_uretprobe(struct uretprobe *rp)
914 __swap_unregister_uretprobe(rp, 1);
916 EXPORT_SYMBOL_GPL(swap_unregister_uretprobe);
919 * @brief Unregisters all uprobes for task's thread group ID.
921 * @param task Pointer to the task_struct
924 void swap_unregister_all_uprobes(struct task_struct *task)
926 struct hlist_head *head;
929 struct hlist_node *tnode;
930 DECLARE_NODE_PTR_FOR_HLIST(node);
932 for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
933 head = &uprobe_table[i];
934 swap_hlist_for_each_entry_safe(p, node, tnode, head, hlist) {
935 if (p->task->tgid == task->tgid) {
936 printk(KERN_INFO "%s: delete uprobe at %p[%lx]"
937 " for %s/%d\n", __func__, p->addr,
938 (unsigned long)p->opcode,
939 task->comm, task->pid);
940 swap_unregister_uprobe(p);
945 EXPORT_SYMBOL_GPL(swap_unregister_all_uprobes);
948 * @brief Arch-independent wrapper for arch_ujprobe_return.
952 void swap_ujprobe_return(void)
954 arch_ujprobe_return();
956 EXPORT_SYMBOL_GPL(swap_ujprobe_return);
959 static struct urinst_info *urinst_info_create(struct uretprobe_instance *ri)
961 struct urinst_info *urinst;
963 urinst = kmalloc(sizeof(*urinst), GFP_ATOMIC);
965 INIT_HLIST_NODE(&urinst->hlist);
966 urinst->task = ri->task;
967 urinst->sp = (unsigned long)ri->sp;
968 urinst->tramp = arch_tramp_by_ri(ri);
969 urinst->ret_addr = (unsigned long)ri->ret_addr;
971 pr_err("Cannot allocate memory for urinst\n");
977 static void urinst_info_destroy(struct urinst_info *urinst)
982 static void urinst_info_disarm(struct urinst_info *urinst, struct task_struct *task)
984 struct uretprobe_instance ri;
985 unsigned long tramp = urinst->tramp;
987 /* set necessary data*/
988 ri.task = urinst->task;
989 ri.sp = (kprobe_opcode_t *)urinst->sp;
990 ri.ret_addr = (kprobe_opcode_t *)urinst->ret_addr;
992 arch_disarm_urp_inst(&ri, task, tramp);
995 void urinst_info_get_current_hlist(struct hlist_head *head, bool recycle)
998 struct task_struct *task = current;
999 struct uretprobe_instance *ri;
1000 struct hlist_head *hhead;
1001 struct hlist_node *n;
1002 struct hlist_node *last = NULL;
1003 DECLARE_NODE_PTR_FOR_HLIST(node);
1005 spin_lock_irqsave(&uretprobe_lock, flags);
1006 hhead = uretprobe_inst_table_head(task->mm);
1007 swap_hlist_for_each_entry_safe(ri, node, n, hhead, hlist) {
1008 if (task == ri->task) {
1009 struct urinst_info *urinst;
1011 urinst = urinst_info_create(ri);
1014 hlist_add_after(last, &urinst->hlist);
1016 hlist_add_head(&urinst->hlist, head);
1018 last = &urinst->hlist;
1022 recycle_urp_inst(ri);
1025 spin_unlock_irqrestore(&uretprobe_lock, flags);
1027 EXPORT_SYMBOL_GPL(urinst_info_get_current_hlist);
1029 void urinst_info_put_current_hlist(struct hlist_head *head,
1030 struct task_struct *task)
1032 struct urinst_info *urinst;
1033 struct hlist_node *tmp;
1034 DECLARE_NODE_PTR_FOR_HLIST(node);
1036 swap_hlist_for_each_entry_safe(urinst, node, tmp, head, hlist) {
1037 /* check on disarm */
1039 urinst_info_disarm(urinst, task);
1041 hlist_del(&urinst->hlist);
1042 urinst_info_destroy(urinst);
1045 EXPORT_SYMBOL_GPL(urinst_info_put_current_hlist);
1048 static int once(void)
1050 init_uprobe_table();
1051 init_uprobes_insn_slots();
1052 init_uretprobe_inst_table();
1057 SWAP_LIGHT_INIT_MODULE(once, swap_arch_init_uprobes, swap_arch_exit_uprobes,
1060 MODULE_LICENSE("GPL");