[FIX] remove uprobe from hash table on error
[kernel/swap-modules.git] / uprobe / swap_uprobes.c
1 /**
2  * uprobe/swap_uprobes.c
3  * @author Alexey Gerenkov <a.gerenkov@samsung.com> User-Space Probes initial
4  * implementation; Support x86/ARM/MIPS for both user and kernel spaces.
5  * @author Ekaterina Gorelkina <e.gorelkina@samsung.com>: redesign module for
6  * separating core and arch parts
7  *
8  * @section LICENSE
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23  *
24  * @section COPYRIGHT
25  *
26  * Copyright (C) Samsung Electronics, 2006-2010
27  *
28  * @section DESCRIPTION
29  *
30  * Uprobes implementation.
31  */
32
33
34 #include <linux/hash.h>
35 #include <linux/mempolicy.h>
36 #include <linux/module.h>
37
38 #include <master/swap_initializer.h>
39 #include <kprobe/swap_slots.h>
40 #include <kprobe/swap_kdebug.h>
41 #include <kprobe/swap_kprobes_deps.h>
42
43 #include <swap-asm/swap_uprobes.h>
44
45 #include "swap_uprobes.h"
46
47
48 enum {
49         UPROBE_HASH_BITS  = 10,
50         UPROBE_TABLE_SIZE = (1 << UPROBE_HASH_BITS)
51 };
52
53 struct hlist_head uprobe_insn_slot_table[UPROBE_TABLE_SIZE];
54 struct hlist_head uprobe_table[UPROBE_TABLE_SIZE];
55
56 DEFINE_SPINLOCK(uretprobe_lock);        /* Protects uretprobe_inst_table */
57 static struct hlist_head uretprobe_inst_table[UPROBE_TABLE_SIZE];
58
59 #define DEBUG_PRINT_HASH_TABLE 0
60
61 #if DEBUG_PRINT_HASH_TABLE
62 void print_uprobe_hash_table(void)
63 {
64         int i;
65         struct hlist_head *head;
66         struct kprobe *p;
67         DECLARE_NODE_PTR_FOR_HLIST(node);
68
69         /* print uprobe table */
70         for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
71                 head = &uprobe_insn_slot_table[i];
72                 swap_hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
73                         printk(KERN_INFO "####### find U tgid=%u, addr=%x\n",
74                                         p->tgid, p->addr);
75                 }
76         }
77 }
78 #endif
79
80 /*
81  * Keep all fields in the uprobe consistent
82  */
83 static inline void copy_uprobe(struct kprobe *old_p, struct kprobe *p)
84 {
85         memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
86         memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
87 #ifdef CONFIG_ARM
88         p->safe_arm = old_p->safe_arm;
89         p->safe_thumb = old_p->safe_thumb;
90 #endif
91 }
92
93 /*
94  * Aggregate handlers for multiple uprobes support - these handlers
95  * take care of invoking the individual uprobe handlers on p->list
96  */
97 static int aggr_pre_uhandler(struct kprobe *p, struct pt_regs *regs)
98 {
99         struct kprobe *kp;
100         int ret;
101
102         list_for_each_entry_rcu(kp, &p->list, list) {
103                 if (kp->pre_handler) {
104                         ret = kp->pre_handler(kp, regs);
105                         if (ret)
106                                 return ret;
107                 }
108         }
109
110         return 0;
111 }
112
113 static void aggr_post_uhandler(struct kprobe *p, struct pt_regs *regs,
114                                unsigned long flags)
115 {
116         struct kprobe *kp;
117
118         list_for_each_entry_rcu(kp, &p->list, list) {
119                 if (kp->post_handler)
120                         kp->post_handler(kp, regs, flags);
121         }
122 }
123
124 static int aggr_fault_uhandler(struct kprobe *p,
125                                struct pt_regs *regs,
126                                int trapnr)
127 {
128         return 0;
129 }
130
131 static int aggr_break_uhandler(struct kprobe *p, struct pt_regs *regs)
132 {
133         return 0;
134 }
135
136 /*
137  * Add the new probe to old_p->list. Fail if this is the
138  * second ujprobe at the address - two ujprobes can't coexist
139  */
140 static int add_new_uprobe(struct kprobe *old_p, struct kprobe *p)
141 {
142         if (p->break_handler) {
143                 if (old_p->break_handler)
144                         return -EEXIST;
145
146                 list_add_tail_rcu(&p->list, &old_p->list);
147                 old_p->break_handler = aggr_break_uhandler;
148         } else {
149                 list_add_rcu(&p->list, &old_p->list);
150         }
151
152         if (p->post_handler && !old_p->post_handler)
153                 old_p->post_handler = aggr_post_uhandler;
154
155         return 0;
156 }
157
158 /*
159  * Fill in the required fields of the "manager uprobe". Replace the
160  * earlier uprobe in the hlist with the manager uprobe
161  */
162 static inline void add_aggr_uprobe(struct kprobe *ap, struct kprobe *p)
163 {
164         copy_uprobe(p, ap);
165
166         ap->addr = p->addr;
167         ap->pre_handler = aggr_pre_uhandler;
168         ap->fault_handler = aggr_fault_uhandler;
169
170         if (p->post_handler)
171                 ap->post_handler = aggr_post_uhandler;
172
173         if (p->break_handler)
174                 ap->break_handler = aggr_break_uhandler;
175
176         INIT_LIST_HEAD(&ap->list);
177         list_add_rcu(&p->list, &ap->list);
178
179         hlist_replace_rcu(&p->hlist, &ap->hlist);
180 }
181
182 /*
183  * This is the second or subsequent uprobe at the address - handle
184  * the intricacies
185  */
186 static int register_aggr_uprobe(struct kprobe *old_p, struct kprobe *p)
187 {
188         int ret = 0;
189         struct kprobe *ap;
190
191         if (old_p->pre_handler == aggr_pre_uhandler) {
192                 copy_uprobe(old_p, p);
193                 ret = add_new_uprobe(old_p, p);
194         } else {
195                 struct uprobe *uap = kzalloc(sizeof(*uap), GFP_KERNEL);
196                 if (!uap)
197                         return -ENOMEM;
198
199                 uap->task = kp2up(p)->task;
200                 ap = up2kp(uap);
201                 add_aggr_uprobe(ap, old_p);
202                 copy_uprobe(ap, p);
203                 ret = add_new_uprobe(ap, p);
204         }
205
206         return ret;
207 }
208
209 static int arm_uprobe(struct uprobe *p)
210 {
211         kprobe_opcode_t insn = BREAKPOINT_INSTRUCTION;
212         int ret = write_proc_vm_atomic(p->task, (unsigned long)p->kp.addr,
213                                        &insn, sizeof(insn));
214         if (!ret) {
215                 printk("arm_uprobe: failed to write memory "
216                        "tgid=%u addr=%p!\n", p->task->tgid, p->kp.addr);
217
218                 return -EACCES;
219         }
220
221         return 0;
222 }
223
224 /**
225  * @brief Disarms uprobe.
226  *
227  * @param p Pointer to the uprobe's kprobe.
228  * @param task Pointer to the target task.
229  * @return Void.
230  */
231 void disarm_uprobe(struct kprobe *p, struct task_struct *task)
232 {
233         int ret = write_proc_vm_atomic(task, (unsigned long)p->addr,
234                                        &p->opcode, sizeof(p->opcode));
235         if (!ret) {
236                 printk("disarm_uprobe: failed to write memory "
237                        "tgid=%u, addr=%p!\n", task->tgid, p->addr);
238         }
239 }
240 EXPORT_SYMBOL_GPL(disarm_uprobe);
241
242 static void init_uprobes_insn_slots(void)
243 {
244         int i;
245         for (i = 0; i < UPROBE_TABLE_SIZE; ++i)
246                 INIT_HLIST_HEAD(&uprobe_insn_slot_table[i]);
247 }
248
249 static void init_uprobe_table(void)
250 {
251         int i;
252         for (i = 0; i < UPROBE_TABLE_SIZE; ++i)
253                 INIT_HLIST_HEAD(&uprobe_table[i]);
254 }
255
256 static void init_uretprobe_inst_table(void)
257 {
258         int i;
259         for (i = 0; i < UPROBE_TABLE_SIZE; ++i)
260                 INIT_HLIST_HEAD(&uretprobe_inst_table[i]);
261 }
262
263 /**
264  * @brief Gets uprobe's kprobe.
265  *
266  * @param addr Probe's address.
267  * @param tgid Probes's thread group ID.
268  * @return Pointer to the kprobe on success,\n
269  * NULL otherwise.
270  */
271 struct kprobe *get_ukprobe(void *addr, pid_t tgid)
272 {
273         struct hlist_head *head;
274         struct kprobe *p;
275         DECLARE_NODE_PTR_FOR_HLIST(node);
276
277         head = &uprobe_table[hash_ptr(addr, UPROBE_HASH_BITS)];
278         swap_hlist_for_each_entry_rcu(p, node, head, hlist) {
279                 if (p->addr == addr && kp2up(p)->task->tgid == tgid)
280                         return p;
281         }
282
283         return NULL;
284 }
285
286 /**
287  * @brief Adds uprobe to hlist when trampoline have been made.
288  *
289  * @param p Pointer to the uprobe's kprobe.
290  * @return Void.
291  */
292 void add_uprobe_table(struct kprobe *p)
293 {
294         hlist_add_head_rcu(&p->is_hlist,
295                            &uprobe_insn_slot_table[hash_ptr(p->ainsn.insn,
296                                                             UPROBE_HASH_BITS)]);
297 }
298
299 /**
300  * @brief Gets kprobe by insn slot.
301  *
302  * @param addr Probe's address.
303  * @param tgit Probe's thread group ID.
304  * @param regs Pointer to CPU registers data.
305  * @return Pointer to the kprobe on success,\n
306  * NULL otherwise.
307  */
308 struct kprobe *get_ukprobe_by_insn_slot(void *addr,
309                                         pid_t tgid,
310                                         struct pt_regs *regs)
311 {
312         struct hlist_head *head;
313         struct kprobe *p;
314         DECLARE_NODE_PTR_FOR_HLIST(node);
315
316         /* TODO: test - two processes invokes instrumented function */
317         head = &uprobe_insn_slot_table[hash_ptr(addr, UPROBE_HASH_BITS)];
318         swap_hlist_for_each_entry_rcu(p, node, head, is_hlist) {
319                 if (p->ainsn.insn == addr && kp2up(p)->task->tgid == tgid)
320                         return p;
321         }
322
323         return NULL;
324 }
325
326
327 static void remove_uprobe(struct uprobe *up)
328 {
329         arch_remove_uprobe(up);
330 }
331
332 static struct hlist_head *uretprobe_inst_table_head(void *hash_key)
333 {
334         return &uretprobe_inst_table[hash_ptr(hash_key, UPROBE_HASH_BITS)];
335 }
336
337 /* Called with uretprobe_lock held */
338 static void add_urp_inst(struct uretprobe_instance *ri)
339 {
340         /*
341          * Remove rp inst off the free list -
342          * Add it back when probed function returns
343          */
344         hlist_del(&ri->uflist);
345
346         /* Add rp inst onto table */
347         INIT_HLIST_NODE(&ri->hlist);
348         hlist_add_head(&ri->hlist, uretprobe_inst_table_head(ri->task->mm));
349
350         /* Also add this rp inst to the used list. */
351         INIT_HLIST_NODE(&ri->uflist);
352         hlist_add_head(&ri->uflist, &ri->rp->used_instances);
353 }
354
355 /* Called with uretprobe_lock held */
356 static void recycle_urp_inst(struct uretprobe_instance *ri)
357 {
358         if (ri->rp) {
359                 hlist_del(&ri->hlist);
360                 /* remove rp inst off the used list */
361                 hlist_del(&ri->uflist);
362                 /* put rp inst back onto the free list */
363                 INIT_HLIST_NODE(&ri->uflist);
364                 hlist_add_head(&ri->uflist, &ri->rp->free_instances);
365         }
366 }
367
368 /* Called with uretprobe_lock held */
369 static struct uretprobe_instance *get_used_urp_inst(struct uretprobe *rp)
370 {
371         struct uretprobe_instance *ri;
372         DECLARE_NODE_PTR_FOR_HLIST(node);
373
374         swap_hlist_for_each_entry(ri, node, &rp->used_instances, uflist) {
375                 return ri;
376         }
377
378         return NULL;
379 }
380
381 /**
382  * @brief Gets free uretprobe instanse for the specified uretprobe without
383  * allocation. Called with uretprobe_lock held.
384  *
385  * @param rp Pointer to the uretprobe.
386  * @return Pointer to the uretprobe_instance on success,\n
387  * NULL otherwise.
388  */
389 struct uretprobe_instance *get_free_urp_inst_no_alloc(struct uretprobe *rp)
390 {
391         struct uretprobe_instance *ri;
392         DECLARE_NODE_PTR_FOR_HLIST(node);
393
394         swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
395                 return ri;
396         }
397
398         return NULL;
399 }
400
401 /* Called with uretprobe_lock held */
402 static void free_urp_inst(struct uretprobe *rp)
403 {
404         struct uretprobe_instance *ri;
405         while ((ri = get_free_urp_inst_no_alloc(rp)) != NULL) {
406                 hlist_del(&ri->uflist);
407                 kfree(ri);
408         }
409 }
410
411 #define COMMON_URP_NR 10
412
413 static int alloc_nodes_uretprobe(struct uretprobe *rp)
414 {
415         int alloc_nodes;
416         struct uretprobe_instance *inst;
417         int i;
418
419 #if 1 /* def CONFIG_PREEMPT */
420         rp->maxactive += max(COMMON_URP_NR, 2 * NR_CPUS);
421 #else
422         rp->maxacpptive += NR_CPUS;
423 #endif
424         alloc_nodes = COMMON_URP_NR;
425
426         for (i = 0; i < alloc_nodes; ++i) {
427                 inst = kmalloc(sizeof(*inst) + rp->data_size, GFP_ATOMIC);
428                 if (inst == NULL) {
429                         free_urp_inst(rp);
430                         return -ENOMEM;
431                 }
432                 INIT_HLIST_NODE(&inst->uflist);
433                 hlist_add_head(&inst->uflist, &rp->free_instances);
434         }
435
436         return 0;
437 }
438
439 /* Called with uretprobe_lock held */
440 static struct uretprobe_instance *get_free_urp_inst(struct uretprobe *rp)
441 {
442         struct uretprobe_instance *ri;
443         DECLARE_NODE_PTR_FOR_HLIST(node);
444
445         swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
446                 return ri;
447         }
448
449         if (!alloc_nodes_uretprobe(rp)) {
450                 swap_hlist_for_each_entry(ri, node,
451                                           &rp->free_instances, uflist) {
452                         return ri;
453                 }
454         }
455
456         return NULL;
457 }
458 /* =================================================================== */
459
460 /**
461  * @brief Registers uprobe.
462  *
463  * @param up Pointer to the uprobe to register.
464  * @return 0 on success,\n
465  * negative error code on error.
466  */
467 int swap_register_uprobe(struct uprobe *up)
468 {
469         int ret = 0;
470         struct kprobe *p, *old_p;
471
472         p = &up->kp;
473         if (!p->addr)
474                 return -EINVAL;
475
476         DBPRINTF("p->addr = 0x%p p = 0x%p\n", p->addr, p);
477
478 /* thumb address = address-1; */
479 #if defined(CONFIG_ARM)
480         /* TODO: must be corrected in 'bundle' */
481         if ((unsigned long) p->addr & 0x01)
482                 p->addr = (kprobe_opcode_t *)((unsigned long)p->addr &
483                                               0xfffffffe);
484 #endif
485
486         p->ainsn.insn = NULL;
487         p->mod_refcounted = 0;
488         p->nmissed = 0;
489         INIT_LIST_HEAD(&p->list);
490 #ifdef KPROBES_PROFILE
491         p->start_tm.tv_sec = p->start_tm.tv_usec = 0;
492         p->hnd_tm_sum.tv_sec = p->hnd_tm_sum.tv_usec = 0;
493         p->count = 0;
494 #endif
495
496         /* get the first item */
497         old_p = get_ukprobe(p->addr, kp2up(p)->task->tgid);
498         if (old_p) {
499                 struct task_struct *task = up->task;
500
501                 /* TODO: add support many uprobes on address */
502                 printk(KERN_INFO "uprobe on task[%u %u %s] vaddr=%p is there\n",
503                        task->tgid, task->pid, task->comm, p->addr);
504                 ret = -EINVAL;
505                 goto out;
506 #ifdef CONFIG_ARM
507                 p->safe_arm = old_p->safe_arm;
508                 p->safe_thumb = old_p->safe_thumb;
509 #endif
510                 ret = register_aggr_uprobe(old_p, p);
511                 DBPRINTF("goto out\n", ret);
512                 goto out;
513         }
514
515         INIT_HLIST_NODE(&p->is_hlist);
516
517         ret = arch_prepare_uprobe(up);
518         if (ret) {
519                 DBPRINTF("goto out\n", ret);
520                 goto out;
521         }
522
523         DBPRINTF("before out ret = 0x%x\n", ret);
524
525         /* TODO: add uprobe (must be in function) */
526         INIT_HLIST_NODE(&p->hlist);
527         hlist_add_head_rcu(&p->hlist,
528                            &uprobe_table[hash_ptr(p->addr, UPROBE_HASH_BITS)]);
529
530         ret = arm_uprobe(up);
531         if (ret) {
532                 hlist_del_rcu(&p->hlist);
533                 synchronize_rcu();
534                 remove_uprobe(up);
535         }
536
537 out:
538         DBPRINTF("out ret = 0x%x\n", ret);
539         return ret;
540 }
541 EXPORT_SYMBOL_GPL(swap_register_uprobe);
542
543 /**
544  * @brief Unregisters uprobe.
545  *
546  * @param up Pointer to the uprobe.
547  * @param disarm Disarm flag. When true uprobe is disarmed.
548  * @return Void.
549  */
550 void __swap_unregister_uprobe(struct uprobe *up, int disarm)
551 {
552         struct kprobe *p, *old_p, *list_p;
553         int cleanup_p;
554
555         p = &up->kp;
556         old_p = get_ukprobe(p->addr, kp2up(p)->task->tgid);
557         if (unlikely(!old_p))
558                 return;
559
560         if (p != old_p) {
561                 list_for_each_entry_rcu(list_p, &old_p->list, list) {
562                         if (list_p == p) {
563                                 /* uprobe p is a valid probe */
564                                 goto valid_p;
565                         }
566                 }
567
568                 return;
569         }
570
571 valid_p:
572         if ((old_p == p) || ((old_p->pre_handler == aggr_pre_uhandler) &&
573             (p->list.next == &old_p->list) && (p->list.prev == &old_p->list))) {
574                 /* Only probe on the hash list */
575                 if (disarm)
576                         disarm_uprobe(&up->kp, up->task);
577
578                 hlist_del_rcu(&old_p->hlist);
579                 cleanup_p = 1;
580         } else {
581                 list_del_rcu(&p->list);
582                 cleanup_p = 0;
583         }
584
585         if (cleanup_p) {
586                 if (p != old_p) {
587                         list_del_rcu(&p->list);
588                         kfree(old_p);
589                 }
590
591                 if (!in_atomic())
592                         synchronize_sched();
593
594                 remove_uprobe(up);
595         } else {
596                 if (p->break_handler)
597                         old_p->break_handler = NULL;
598
599                 if (p->post_handler) {
600                         list_for_each_entry_rcu(list_p, &old_p->list, list) {
601                                 if (list_p->post_handler) {
602                                         cleanup_p = 2;
603                                         break;
604                                 }
605                         }
606
607                         if (cleanup_p == 0)
608                                 old_p->post_handler = NULL;
609                 }
610         }
611 }
612 EXPORT_SYMBOL_GPL(__swap_unregister_uprobe);
613
614 /**
615  * @brief Unregisters uprobe. Main interface function, wrapper for
616  * __swap_unregister_uprobe.
617  *
618  * @param up Pointer to the uprobe.
619  * @return Void.
620  */
621 void swap_unregister_uprobe(struct uprobe *up)
622 {
623         __swap_unregister_uprobe(up, 1);
624 }
625
626 /**
627  * @brief Registers ujprobe.
628  *
629  * @param uj Pointer to the ujprobe function.
630  * @return 0 on success,\n
631  * error code on error.
632  */
633 int swap_register_ujprobe(struct ujprobe *jp)
634 {
635         int ret = 0;
636
637         /* Todo: Verify probepoint is a function entry point */
638         jp->up.kp.pre_handler = setjmp_upre_handler;
639         jp->up.kp.break_handler = longjmp_break_uhandler;
640
641         ret = swap_register_uprobe(&jp->up);
642
643         return ret;
644 }
645 EXPORT_SYMBOL_GPL(swap_register_ujprobe);
646
647 /**
648  * @brief Unregisters ujprobe.
649  *
650  * @param jp Pointer to the ujprobe.
651  * @param disarm Disarm flag, passed to __swap_unregister_uprobe.
652  * @return Void.
653  */
654 void __swap_unregister_ujprobe(struct ujprobe *jp, int disarm)
655 {
656         __swap_unregister_uprobe(&jp->up, disarm);
657         /*
658          * Here is an attempt to unregister even those probes that have not been
659          * installed (hence not added to the hlist).
660          * So if we try to delete them from the hlist we will get NULL pointer
661          * dereference error. That is why we check whether this node
662          * really belongs to the hlist.
663          */
664         if (!(hlist_unhashed(&jp->up.kp.is_hlist)))
665                 hlist_del_rcu(&jp->up.kp.is_hlist);
666 }
667 EXPORT_SYMBOL_GPL(__swap_unregister_ujprobe);
668
669 /**
670  * @brief Unregisters ujprobe. Main interface function, wrapper for
671  * __swap_unregister_ujprobe.
672  *
673  * @param jp Pointer to the jprobe.
674  * @return Void.
675  */
676 void swap_unregister_ujprobe(struct ujprobe *jp)
677 {
678         __swap_unregister_ujprobe(jp, 1);
679 }
680 EXPORT_SYMBOL_GPL(swap_unregister_ujprobe);
681
682 /**
683  * @brief Trampoline uprobe handler.
684  *
685  * @param p Pointer to the uprobe's kprobe.
686  * @param regs Pointer to CPU register data.
687  * @return 1
688  */
689 int trampoline_uprobe_handler(struct kprobe *p, struct pt_regs *regs)
690 {
691         struct uretprobe_instance *ri = NULL;
692         struct kprobe *kp;
693         struct hlist_head *head;
694         unsigned long flags, tramp_addr, orig_ret_addr = 0;
695         struct hlist_node *tmp;
696         DECLARE_NODE_PTR_FOR_HLIST(node);
697
698         tramp_addr = arch_get_trampoline_addr(p, regs);
699         spin_lock_irqsave(&uretprobe_lock, flags);
700
701         head = uretprobe_inst_table_head(current->mm);
702
703         /*
704          * It is possible to have multiple instances associated with a given
705          * task either because an multiple functions in the call path
706          * have a return probe installed on them, and/or more then one
707          * return probe was registered for a target function.
708          *
709          * We can handle this because:
710          *     - instances are always inserted at the head of the list
711          *     - when multiple return probes are registered for the same
712          *       function, the first instance's ret_addr will point to the
713          *       real return address, and all the rest will point to
714          *       uretprobe_trampoline
715          */
716         swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
717                 if (ri->task != current) {
718                         /* another task is sharing our hash bucket */
719                         continue;
720                 }
721
722                 kp = NULL;
723                 if (ri->rp) {
724                         kp = up2kp(&ri->rp->up);
725
726                         if (ri->rp->handler)
727                                 ri->rp->handler(ri, regs);
728                 }
729
730                 orig_ret_addr = (unsigned long)ri->ret_addr;
731                 recycle_urp_inst(ri);
732
733                 if ((orig_ret_addr != tramp_addr && kp == p) || kp == NULL) {
734                         /*
735                          * This is the real return address. Any other
736                          * instances associated with this task are for
737                          * other calls deeper on the call stack
738                          */
739                         break;
740                 }
741         }
742
743         spin_unlock_irqrestore(&uretprobe_lock, flags);
744         /* orig_ret_addr is NULL when there is no need to restore anything
745          * (all the magic is performed inside handler) */
746         if (likely(orig_ret_addr))
747                 arch_set_orig_ret_addr(orig_ret_addr, regs);
748
749         return 1;
750 }
751
752 static int pre_handler_uretprobe(struct kprobe *p, struct pt_regs *regs)
753 {
754         struct uprobe *up = container_of(p, struct uprobe, kp);
755         struct uretprobe *rp = container_of(up, struct uretprobe, up);
756 #ifdef CONFIG_ARM
757         int noret = thumb_mode(regs) ? rp->thumb_noret : rp->arm_noret;
758 #endif
759         struct uretprobe_instance *ri;
760         unsigned long flags;
761
762 #ifdef CONFIG_ARM
763         if (noret)
764                 return 0;
765 #endif
766
767         /* TODO: consider to only swap the
768          * RA after the last pre_handler fired */
769         spin_lock_irqsave(&uretprobe_lock, flags);
770
771         /* TODO: test - remove retprobe after func entry but before its exit */
772         ri = get_free_urp_inst(rp);
773         if (ri != NULL) {
774                 int ret;
775
776                 ri->rp = rp;
777                 ri->task = current;
778 #ifdef CONFIG_ARM
779                 ri->preload_thumb = 0;
780 #endif
781
782                 if (rp->entry_handler)
783                         rp->entry_handler(ri, regs);
784
785                 ret = arch_prepare_uretprobe(ri, regs);
786                 add_urp_inst(ri);
787                 if (ret) {
788                         recycle_urp_inst(ri);
789                         ++rp->nmissed;
790                 }
791         } else {
792                 ++rp->nmissed;
793         }
794
795         spin_unlock_irqrestore(&uretprobe_lock, flags);
796
797         return 0;
798 }
799
800 /**
801  * @brief Registers uretprobe.
802  *
803  * @param rp Pointer to the uretprobe.
804  * @return 0 on success,\n
805  * negative error code on error.
806  */
807 int swap_register_uretprobe(struct uretprobe *rp)
808 {
809         int i, ret = 0;
810         struct uretprobe_instance *inst;
811
812         DBPRINTF("START\n");
813
814         rp->up.kp.pre_handler = pre_handler_uretprobe;
815         rp->up.kp.post_handler = NULL;
816         rp->up.kp.fault_handler = NULL;
817         rp->up.kp.break_handler = NULL;
818
819         /* Pre-allocate memory for max kretprobe instances */
820         if (rp->maxactive <= 0) {
821 #if 1 /* def CONFIG_PREEMPT */
822                 rp->maxactive = max(10, 2 * NR_CPUS);
823 #else
824                 rp->maxactive = NR_CPUS;
825 #endif
826         }
827
828         INIT_HLIST_HEAD(&rp->used_instances);
829         INIT_HLIST_HEAD(&rp->free_instances);
830
831         for (i = 0; i < rp->maxactive; i++) {
832                 inst = kmalloc(sizeof(*inst) + rp->data_size, GFP_ATOMIC);
833                 if (inst == NULL) {
834                         free_urp_inst(rp);
835                         return -ENOMEM;
836                 }
837
838                 INIT_HLIST_NODE(&inst->uflist);
839                 hlist_add_head(&inst->uflist, &rp->free_instances);
840         }
841
842         rp->nmissed = 0;
843
844         /* Establish function entry probe point */
845         ret = swap_register_uprobe(&rp->up);
846         if (ret)
847                 return ret;
848
849         arch_opcode_analysis_uretprobe(rp);
850
851         return 0;
852 }
853 EXPORT_SYMBOL_GPL(swap_register_uretprobe);
854
855 /**
856  * @brief Disarms uretprobe instances for the specified child task.
857  *
858  * @param parent Pointer to the parent task struct.
859  * @param task Pointer to the child task struct.
860  * @return 0
861  */
862 int swap_disarm_urp_inst_for_task(struct task_struct *parent,
863                                   struct task_struct *task)
864 {
865         unsigned long flags;
866         struct uretprobe_instance *ri;
867         struct hlist_head *head;
868         struct hlist_node *tmp;
869         DECLARE_NODE_PTR_FOR_HLIST(node);
870
871         spin_lock_irqsave(&uretprobe_lock, flags);
872
873         head = uretprobe_inst_table_head(parent->mm);
874         swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
875                 if (parent == ri->task)
876                         arch_disarm_urp_inst(ri, task);
877         }
878
879         spin_unlock_irqrestore(&uretprobe_lock, flags);
880
881         return 0;
882 }
883 EXPORT_SYMBOL_GPL(swap_disarm_urp_inst_for_task);
884
885 /**
886  * @brief Disarms uretprobes for specified task.
887  *
888  * @param task Pointer to the task_struct.
889  * @return Void.
890  */
891 void swap_discard_pending_uretprobes(struct task_struct *task)
892 {
893         unsigned long flags;
894         struct uretprobe_instance *ri;
895         struct hlist_head *head;
896         struct hlist_node *tmp;
897         DECLARE_NODE_PTR_FOR_HLIST(node);
898
899         spin_lock_irqsave(&uretprobe_lock, flags);
900
901         head = uretprobe_inst_table_head(task->mm);
902         swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
903                 if (ri->task == task) {
904                         printk(KERN_INFO "%s (%d/%d): pending urp inst: %08lx\n",
905                                task->comm, task->tgid, task->pid,
906                                (unsigned long)ri->rp->up.kp.addr);
907                         arch_disarm_urp_inst(ri, task);
908                         recycle_urp_inst(ri);
909                 }
910         }
911
912         spin_unlock_irqrestore(&uretprobe_lock, flags);
913 }
914 EXPORT_SYMBOL_GPL(swap_discard_pending_uretprobes);
915
916 /**
917  * @brief Unregisters uretprobe.
918  *
919  * @param rp Pointer to the ureprobe.
920  * @param disarm Disarm flag, passed to __swap_unregister_uprobe
921  * @return Void.
922  */
923 void __swap_unregister_uretprobe(struct uretprobe *rp, int disarm)
924 {
925         unsigned long flags;
926         struct uretprobe_instance *ri;
927
928         __swap_unregister_uprobe(&rp->up, disarm);
929         spin_lock_irqsave(&uretprobe_lock, flags);
930
931         while ((ri = get_used_urp_inst(rp)) != NULL) {
932                 if (arch_disarm_urp_inst(ri, ri->task) != 0)
933                         printk(KERN_INFO "%s (%d/%d): "
934                                "cannot disarm urp instance (%08lx)\n",
935                                ri->task->comm, ri->task->tgid, ri->task->pid,
936                                (unsigned long)rp->up.kp.addr);
937                 recycle_urp_inst(ri);
938         }
939
940         if (hlist_empty(&rp->used_instances)) {
941                 struct kprobe *p = &rp->up.kp;
942
943                 if (!(hlist_unhashed(&p->is_hlist)))
944                         hlist_del_rcu(&p->is_hlist);
945         }
946
947         while ((ri = get_used_urp_inst(rp)) != NULL) {
948                 ri->rp = NULL;
949                 hlist_del(&ri->uflist);
950         }
951
952         spin_unlock_irqrestore(&uretprobe_lock, flags);
953         free_urp_inst(rp);
954 }
955 EXPORT_SYMBOL_GPL(__swap_unregister_uretprobe);
956
957 /**
958  * @brief Unregistets uretprobe. Main interface function, wrapper for
959  * __swap_unregister_uretprobe.
960  *
961  * @param rp Pointer to the uretprobe.
962  * @return Void.
963  */
964 void swap_unregister_uretprobe(struct uretprobe *rp)
965 {
966         __swap_unregister_uretprobe(rp, 1);
967 }
968 EXPORT_SYMBOL_GPL(swap_unregister_uretprobe);
969
970 /**
971  * @brief Unregisters all uprobes for task's thread group ID.
972  *
973  * @param task Pointer to the task_struct
974  * @return Void.
975  */
976 void swap_unregister_all_uprobes(struct task_struct *task)
977 {
978         struct hlist_head *head;
979         struct kprobe *p;
980         int i;
981         struct hlist_node *tnode;
982         DECLARE_NODE_PTR_FOR_HLIST(node);
983
984         for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
985                 head = &uprobe_table[i];
986                 swap_hlist_for_each_entry_safe(p, node, tnode, head, hlist) {
987                         if (kp2up(p)->task->tgid == task->tgid) {
988                                 struct uprobe *up =
989                                         container_of(p, struct uprobe, kp);
990                                 printk(KERN_INFO "%s: delete uprobe at %p[%lx]"
991                                        " for %s/%d\n", __func__, p->addr,
992                                        (unsigned long)p->opcode,
993                                        task->comm, task->pid);
994                                 swap_unregister_uprobe(up);
995                         }
996                 }
997         }
998 }
999 EXPORT_SYMBOL_GPL(swap_unregister_all_uprobes);
1000
1001 /**
1002  * @brief Arch-independent wrapper for arch_ujprobe_return.
1003  *
1004  * @return Void.
1005  */
1006 void swap_ujprobe_return(void)
1007 {
1008         arch_ujprobe_return();
1009 }
1010 EXPORT_SYMBOL_GPL(swap_ujprobe_return);
1011
1012 static int once(void)
1013 {
1014         init_uprobe_table();
1015         init_uprobes_insn_slots();
1016         init_uretprobe_inst_table();
1017
1018         return 0;
1019 }
1020
1021 SWAP_LIGHT_INIT_MODULE(once, swap_arch_init_uprobes, swap_arch_exit_uprobes,
1022                        NULL, NULL);
1023
1024 MODULE_LICENSE("GPL");