[REFACTOR] create restore_opcode_for_thumb()
[kernel/swap-modules.git] / uprobe / swap_uprobes.c
1 /*
2  *  Dynamic Binary Instrumentation Module based on KProbes
3  *  modules/kprobe/dbi_uprobes.h
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18  *
19  * Copyright (C) Samsung Electronics, 2006-2010
20  *
21  * 2008-2009    Alexey Gerenkov <a.gerenkov@samsung.com> User-Space
22  *              Probes initial implementation; Support x86/ARM/MIPS for both user and kernel spaces.
23  * 2010         Ekaterina Gorelkina <e.gorelkina@samsung.com>: redesign module for separating core and arch parts
24  *
25  */
26
27
28 #include "swap_uprobes.h"
29 #include "dbi_kdebug.h"
30
31 #include <asm/swap_uprobes.h>
32
33 #include <linux/hash.h>
34 #include <linux/mempolicy.h>
35 #include <linux/module.h>
36 #include <dbi_insn_slots.h>
37
38 enum {
39         UPROBE_HASH_BITS  = 10,
40         UPROBE_TABLE_SIZE = (1 << UPROBE_HASH_BITS)
41 };
42
43 struct hlist_head uprobe_insn_slot_table[UPROBE_TABLE_SIZE];
44 struct hlist_head uprobe_table[UPROBE_TABLE_SIZE];
45 struct hlist_head uprobe_insn_pages;
46
47 DEFINE_SPINLOCK(uretprobe_lock);        /* Protects uretprobe_inst_table */
48 static struct hlist_head uretprobe_inst_table[UPROBE_TABLE_SIZE];
49
50 #define DEBUG_PRINT_HASH_TABLE 0
51
52 #if DEBUG_PRINT_HASH_TABLE
53 void print_kprobe_hash_table(void)
54 {
55         int i;
56         struct hlist_head *head;
57         struct hlist_node *node;
58         struct kprobe *p;
59
60         // print uprobe table
61         for (i = 0; i < KPROBE_TABLE_SIZE; ++i) {
62                 head = &kprobe_table[i];
63                 hlist_for_each_entry_rcu (p, node, head, is_hlist_arm) {
64                         printk("####### find K tgid=%u, addr=%x\n",
65                                         p->tgid, p->addr);
66                 }
67         }
68 }
69
70 void print_kretprobe_hash_table(void)
71 {
72         int i;
73         struct hlist_head *head;
74         struct hlist_node *node;
75         struct kprobe *p;
76
77         // print uprobe table
78         for (i = 0; i < KPROBE_TABLE_SIZE; ++i) {
79                 head = &kretprobe_inst_table[i];
80                 hlist_for_each_entry_rcu (p, node, head, is_hlist_arm) {
81                         printk("####### find KR tgid=%u, addr=%x\n",
82                                         p->tgid, p->addr);
83                 }
84         }
85 }
86
87 void print_uprobe_hash_table(void)
88 {
89         int i;
90         struct hlist_head *head;
91         struct hlist_node *node;
92         struct kprobe *p;
93
94         // print uprobe table
95         for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
96                 head = &uprobe_insn_slot_table[i];
97                 hlist_for_each_entry_rcu (p, node, head, is_hlist_arm) {
98                         printk("####### find U tgid=%u, addr=%x\n",
99                                         p->tgid, p->addr);
100                 }
101         }
102 }
103 #endif
104
105 /*
106  * Keep all fields in the uprobe consistent
107  */
108 static inline void copy_uprobe(struct kprobe *old_p, struct kprobe *p)
109 {
110         memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
111         memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
112         p->ss_addr = old_p->ss_addr;
113 #ifdef CONFIG_ARM
114         p->safe_arm = old_p->safe_arm;
115         p->safe_thumb = old_p->safe_thumb;
116 #endif
117 }
118
119 /*
120  * Aggregate handlers for multiple uprobes support - these handlers
121  * take care of invoking the individual uprobe handlers on p->list
122  */
123 static int aggr_pre_uhandler(struct kprobe *p, struct pt_regs *regs)
124 {
125         struct kprobe *kp;
126         int ret;
127
128         list_for_each_entry_rcu(kp, &p->list, list) {
129                 if (kp->pre_handler) {
130                         ret = kp->pre_handler(kp, regs);
131                         if (ret) {
132                                 return ret;
133                         }
134                 }
135         }
136
137         return 0;
138 }
139
140 static void aggr_post_uhandler(struct kprobe *p, struct pt_regs *regs, unsigned long flags)
141 {
142         struct kprobe *kp;
143
144         list_for_each_entry_rcu(kp, &p->list, list) {
145                 if (kp->post_handler) {
146                         kp->post_handler(kp, regs, flags);
147                 }
148         }
149 }
150
151 static int aggr_fault_uhandler(struct kprobe *p, struct pt_regs *regs, int trapnr)
152 {
153         return 0;
154 }
155
156 static int aggr_break_uhandler(struct kprobe *p, struct pt_regs *regs)
157 {
158         return 0;
159 }
160
161 /*
162  * Add the new probe to old_p->list. Fail if this is the
163  * second ujprobe at the address - two ujprobes can't coexist
164  */
165 static int add_new_uprobe(struct kprobe *old_p, struct kprobe *p)
166 {
167         if (p->break_handler) {
168                 if (old_p->break_handler) {
169                         return -EEXIST;
170                 }
171
172                 list_add_tail_rcu(&p->list, &old_p->list);
173                 old_p->break_handler = aggr_break_uhandler;
174         } else {
175                 list_add_rcu (&p->list, &old_p->list);
176         }
177
178         if (p->post_handler && !old_p->post_handler) {
179                 old_p->post_handler = aggr_post_uhandler;
180         }
181
182         return 0;
183 }
184
185 /*
186  * Fill in the required fields of the "manager uprobe". Replace the
187  * earlier uprobe in the hlist with the manager uprobe
188  */
189 static inline void add_aggr_uprobe(struct kprobe *ap, struct kprobe *p)
190 {
191         copy_uprobe(p, ap);
192
193         ap->addr = p->addr;
194         ap->pre_handler = aggr_pre_uhandler;
195         ap->fault_handler = aggr_fault_uhandler;
196
197         if (p->post_handler) {
198                 ap->post_handler = aggr_post_uhandler;
199         }
200
201         if (p->break_handler) {
202                 ap->break_handler = aggr_break_uhandler;
203         }
204
205         INIT_LIST_HEAD(&ap->list);
206         list_add_rcu(&p->list, &ap->list);
207
208         hlist_replace_rcu(&p->hlist, &ap->hlist);
209 }
210
211 /*
212  * This is the second or subsequent uprobe at the address - handle
213  * the intricacies
214  */
215 static int register_aggr_uprobe(struct kprobe *old_p, struct kprobe *p)
216 {
217         int ret = 0;
218         struct kprobe *ap;
219
220         if (old_p->pre_handler == aggr_pre_uhandler) {
221                 copy_uprobe(old_p, p);
222                 ret = add_new_uprobe(old_p, p);
223         } else {
224                 struct uprobe *uap = kzalloc(sizeof(*uap), GFP_KERNEL);
225                 if (!uap) {
226                         return -ENOMEM;
227                 }
228
229                 uap->task = kp2up(p)->task;
230                 ap = up2kp(uap);
231                 add_aggr_uprobe(ap, old_p);
232                 copy_uprobe(ap, p);
233                 ret = add_new_uprobe(ap, p);
234         }
235
236         return ret;
237 }
238
239 static void arm_uprobe(struct uprobe *p)
240 {
241         kprobe_opcode_t insn = BREAKPOINT_INSTRUCTION;
242         int ret = write_proc_vm_atomic(p->task, (unsigned long)p->kp.addr,
243                                        &insn, sizeof(insn));
244         if (!ret) {
245                 panic("arm_uprobe: failed to write memory "
246                       "tgid=%u addr=%p!\n", p->task->tgid, p->kp.addr);
247         }
248 }
249
250 void disarm_uprobe(struct uprobe *p)
251 {
252         int ret = write_proc_vm_atomic(p->task, (unsigned long)p->kp.addr,
253                                        &p->kp.opcode, sizeof(p->kp.opcode));
254         if (!ret) {
255                 panic("disarm_uprobe: failed to write memory "
256                       "tgid=%u, addr=%p!\n", p->task->tgid, p->kp.addr);
257         }
258 }
259 EXPORT_SYMBOL_GPL(disarm_uprobe);
260
261 static void init_uprobes_insn_slots(void)
262 {
263         int i;
264         for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
265                 INIT_HLIST_HEAD(&uprobe_insn_slot_table[i]);
266         }
267 }
268
269 static void init_uprobe_table(void)
270 {
271         int i;
272         for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
273                 INIT_HLIST_HEAD(&uprobe_table[i]);
274         }
275 }
276
277 static void init_uretprobe_inst_table(void)
278 {
279         int i;
280         for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
281                 INIT_HLIST_HEAD (&uretprobe_inst_table[i]);
282         }
283 }
284
285 struct kprobe *get_ukprobe(void *addr, pid_t tgid)
286 {
287         struct hlist_head *head;
288         struct hlist_node *node;
289         struct kprobe *p;
290
291         head = &uprobe_table[hash_ptr(addr, UPROBE_HASH_BITS)];
292         hlist_for_each_entry_rcu(p, node, head, hlist) {
293                 if (p->addr == addr && kp2up(p)->task->tgid == tgid) {
294                         return p;
295                 }
296         }
297
298         return NULL;
299 }
300
301 static void add_uprobe_table(struct kprobe *p)
302 {
303 #ifdef CONFIG_ARM
304         INIT_HLIST_NODE(&p->is_hlist_arm);
305         hlist_add_head_rcu(&p->is_hlist_arm, &uprobe_insn_slot_table[hash_ptr(p->ainsn.insn_arm, UPROBE_HASH_BITS)]);
306         INIT_HLIST_NODE(&p->is_hlist_thumb);
307         hlist_add_head_rcu(&p->is_hlist_thumb, &uprobe_insn_slot_table[hash_ptr(p->ainsn.insn_thumb, UPROBE_HASH_BITS)]);
308 #else /* CONFIG_ARM */
309         INIT_HLIST_NODE(&p->is_hlist);
310         hlist_add_head_rcu(&p->is_hlist, &uprobe_insn_slot_table[hash_ptr(p->ainsn.insn, UPROBE_HASH_BITS)]);
311 #endif /* CONFIG_ARM */
312 }
313
314 #ifdef CONFIG_ARM
315 static struct kprobe *get_ukprobe_bis_arm(void *addr, pid_t tgid)
316 {
317         struct hlist_head *head;
318         struct hlist_node *node;
319         struct kprobe *p;
320
321         /* TODO: test - two processes invokes instrumented function */
322         head = &uprobe_insn_slot_table[hash_ptr(addr, UPROBE_HASH_BITS)];
323         hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
324                 if (p->ainsn.insn == addr && kp2up(p)->task->tgid == tgid) {
325                         return p;
326                 }
327         }
328
329         return NULL;
330 }
331
332 static struct kprobe *get_ukprobe_bis_thumb(void *addr, pid_t tgid)
333 {
334         struct hlist_head *head;
335         struct hlist_node *node;
336         struct kprobe *p;
337
338         /* TODO: test - two processes invokes instrumented function */
339         head = &uprobe_insn_slot_table[hash_ptr(addr, UPROBE_HASH_BITS)];
340         hlist_for_each_entry_rcu(p, node, head, is_hlist_thumb) {
341                 if (p->ainsn.insn == addr && kp2up(p)->task->tgid == tgid) {
342                         return p;
343                 }
344         }
345
346         return NULL;
347 }
348
349 struct kprobe *get_ukprobe_by_insn_slot(void *addr, pid_t tgid, struct pt_regs *regs)
350 {
351         return thumb_mode(regs) ?
352                         get_ukprobe_bis_thumb(addr - 0x1a, tgid) :
353                         get_ukprobe_bis_arm(addr - 4 * UPROBES_TRAMP_RET_BREAK_IDX, tgid);
354 }
355 #else /* CONFIG_ARM */
356 struct kprobe *get_ukprobe_by_insn_slot(void *addr, pid_t tgid, struct pt_regs *regs)
357 {
358         struct hlist_head *head;
359         struct hlist_node *node;
360         struct kprobe *p;
361
362         /* TODO: test - two processes invokes instrumented function */
363         head = &uprobe_insn_slot_table[hash_ptr(addr, UPROBE_HASH_BITS)];
364         hlist_for_each_entry_rcu(p, node, head, is_hlist) {
365                 if (p->ainsn.insn == addr && kp2up(p)->task->tgid == tgid) {
366                         return p;
367                 }
368         }
369
370         return NULL;
371 }
372 #endif /* CONFIG_ARM */
373
374
375 static void remove_uprobe(struct uprobe *up)
376 {
377         struct kprobe *p = &up->kp;
378         struct task_struct *task = up->task;
379
380 #ifdef CONFIG_ARM
381         free_insn_slot(&uprobe_insn_pages, task, p->ainsn.insn_arm);
382         free_insn_slot(&uprobe_insn_pages, task, p->ainsn.insn_thumb);
383 #else /* CONFIG_ARM */
384         free_insn_slot(&uprobe_insn_pages, task, p->ainsn.insn);
385 #endif /* CONFIG_ARM */
386 }
387
388 static struct hlist_head *uretprobe_inst_table_head(void *hash_key)
389 {
390         return &uretprobe_inst_table[hash_ptr (hash_key, UPROBE_HASH_BITS)];
391 }
392
393 /* Called with uretprobe_lock held */
394 static void add_urp_inst(struct uretprobe_instance *ri)
395 {
396         /*
397          * Remove rp inst off the free list -
398          * Add it back when probed function returns
399          */
400         hlist_del(&ri->uflist);
401
402         /* Add rp inst onto table */
403         INIT_HLIST_NODE(&ri->hlist);
404         hlist_add_head(&ri->hlist, uretprobe_inst_table_head(ri->task->mm));
405
406         /* Also add this rp inst to the used list. */
407         INIT_HLIST_NODE(&ri->uflist);
408         hlist_add_head(&ri->uflist, &ri->rp->used_instances);
409 }
410
411 /* Called with uretprobe_lock held */
412 static void recycle_urp_inst(struct uretprobe_instance *ri)
413 {
414         if (ri->rp) {
415                 hlist_del(&ri->hlist);
416                 /* remove rp inst off the used list */
417                 hlist_del(&ri->uflist);
418                 /* put rp inst back onto the free list */
419                 INIT_HLIST_NODE(&ri->uflist);
420                 hlist_add_head(&ri->uflist, &ri->rp->free_instances);
421         }
422 }
423
424 /* Called with uretprobe_lock held */
425 static struct uretprobe_instance *get_used_urp_inst(struct uretprobe *rp)
426 {
427         struct hlist_node *node;
428         struct uretprobe_instance *ri;
429
430         hlist_for_each_entry(ri, node, &rp->used_instances, uflist) {
431                 return ri;
432         }
433
434         return NULL;
435 }
436
437 /* Called with uretprobe_lock held */
438 struct uretprobe_instance *get_free_urp_inst_no_alloc(struct uretprobe *rp)
439 {
440         struct hlist_node *node;
441         struct uretprobe_instance *ri;
442
443         hlist_for_each_entry (ri, node, &rp->free_instances, uflist) {
444                 return ri;
445         }
446
447         return NULL;
448 }
449
450 /* Called with uretprobe_lock held */
451 static void free_urp_inst(struct uretprobe *rp)
452 {
453         struct uretprobe_instance *ri;
454         while ((ri = get_free_urp_inst_no_alloc(rp)) != NULL) {
455                 hlist_del(&ri->uflist);
456                 kfree(ri);
457         }
458 }
459
460 #define COMMON_URP_NR 10
461
462 static int alloc_nodes_uretprobe(struct uretprobe *rp)
463 {
464         int alloc_nodes;
465         struct uretprobe_instance *inst;
466         int i;
467
468 #if 1//def CONFIG_PREEMPT
469         rp->maxactive += max(COMMON_URP_NR, 2 * NR_CPUS);
470 #else
471         rp->maxacpptive += NR_CPUS;
472 #endif
473         alloc_nodes = COMMON_URP_NR;
474
475         for (i = 0; i < alloc_nodes; ++i) {
476                 inst = kmalloc(sizeof(*inst), GFP_ATOMIC);
477                 if (inst == NULL) {
478                         free_urp_inst(rp);
479                         return -ENOMEM;
480                 }
481                 INIT_HLIST_NODE(&inst->uflist);
482                 hlist_add_head(&inst->uflist, &rp->free_instances);
483         }
484
485         return 0;
486 }
487
488 /* Called with uretprobe_lock held */
489 static struct uretprobe_instance *get_free_urp_inst(struct uretprobe *rp)
490 {
491         struct hlist_node *node;
492         struct uretprobe_instance *ri;
493
494         hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
495                 return ri;
496         }
497
498         if (!alloc_nodes_uretprobe(rp)) {
499                 hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
500                         return ri;
501                 }
502         }
503
504         return NULL;
505 }
506 // ===================================================================
507
508 int dbi_register_uprobe(struct uprobe *up, int atomic)
509 {
510         int ret = 0;
511         struct kprobe *p, *old_p;
512
513         p = &up->kp;
514         if (!p->addr) {
515                 return -EINVAL;
516         }
517
518         DBPRINTF("p->addr = 0x%p p = 0x%p\n", p->addr, p);
519
520 // thumb address = address-1;
521 #if defined(CONFIG_ARM)
522         // TODO: must be corrected in 'bundle'
523         if ((unsigned long) p->addr & 0x01) {
524                 p->addr = (kprobe_opcode_t *)((unsigned long)p->addr & 0xfffffffe);
525         }
526 #endif
527
528         p->mod_refcounted = 0;
529         p->nmissed = 0;
530         INIT_LIST_HEAD(&p->list);
531 #ifdef KPROBES_PROFILE
532         p->start_tm.tv_sec = p->start_tm.tv_usec = 0;
533         p->hnd_tm_sum.tv_sec = p->hnd_tm_sum.tv_usec = 0;
534         p->count = 0;
535 #endif
536
537         // get the first item
538         old_p = get_ukprobe(p->addr, kp2up(p)->task->tgid);
539         if (old_p) {
540 #ifdef CONFIG_ARM
541                 p->safe_arm = old_p->safe_arm;
542                 p->safe_thumb = old_p->safe_thumb;
543 #endif
544                 ret = register_aggr_uprobe(old_p, p);
545                 if (!ret) {
546 //                      atomic_inc(&kprobe_count);
547                         add_uprobe_table(p);
548                 }
549                 DBPRINTF("goto out\n", ret);
550                 goto out;
551         }
552
553         ret = arch_prepare_uprobe(up, atomic);
554         if (ret) {
555                 DBPRINTF("goto out\n", ret);
556                 goto out;
557         }
558
559         DBPRINTF ("before out ret = 0x%x\n", ret);
560
561         // TODO: add uprobe (must be in function)
562         INIT_HLIST_NODE(&p->hlist);
563         hlist_add_head_rcu(&p->hlist, &uprobe_table[hash_ptr(p->addr, UPROBE_HASH_BITS)]);
564         add_uprobe_table(p);
565         arm_uprobe(up);
566
567 out:
568         DBPRINTF("out ret = 0x%x\n", ret);
569         return ret;
570 }
571
572 void dbi_unregister_uprobe(struct uprobe *up, int atomic)
573 {
574         struct kprobe *p, *old_p, *list_p;
575         int cleanup_p;
576
577         p = &up->kp;
578         old_p = get_ukprobe(p->addr, kp2up(p)->task->tgid);
579         if (unlikely(!old_p)) {
580                 return;
581         }
582
583         if (p != old_p) {
584                 list_for_each_entry_rcu(list_p, &old_p->list, list) {
585                         if (list_p == p) {
586                                 /* uprobe p is a valid probe */
587                                 goto valid_p;
588                         }
589                 }
590
591                 return;
592         }
593
594 valid_p:
595         if ((old_p == p) || ((old_p->pre_handler == aggr_pre_uhandler) &&
596             (p->list.next == &old_p->list) && (p->list.prev == &old_p->list))) {
597                 /* Only probe on the hash list */
598                 disarm_uprobe(up);
599                 hlist_del_rcu(&old_p->hlist);
600                 cleanup_p = 1;
601         } else {
602                 list_del_rcu(&p->list);
603                 cleanup_p = 0;
604         }
605
606         if (cleanup_p) {
607                 if (p != old_p) {
608                         list_del_rcu(&p->list);
609                         kfree(old_p);
610                 }
611
612                 if (!in_atomic()) {
613                         synchronize_sched();
614                 }
615
616                 remove_uprobe(up);
617         } else {
618                 if (p->break_handler) {
619                         old_p->break_handler = NULL;
620                 }
621
622                 if (p->post_handler) {
623                         list_for_each_entry_rcu (list_p, &old_p->list, list) {
624                                 if (list_p->post_handler) {
625                                         cleanup_p = 2;
626                                         break;
627                                 }
628                         }
629
630                         if (cleanup_p == 0) {
631                                 old_p->post_handler = NULL;
632                         }
633                 }
634         }
635 }
636
637 int dbi_register_ujprobe(struct ujprobe *jp, int atomic)
638 {
639         int ret = 0;
640
641         /* Todo: Verify probepoint is a function entry point */
642         jp->up.kp.pre_handler = setjmp_upre_handler;
643         jp->up.kp.break_handler = longjmp_break_uhandler;
644
645         ret = dbi_register_uprobe(&jp->up, atomic);
646
647         return ret;
648 }
649
650 void dbi_unregister_ujprobe(struct ujprobe *jp, int atomic)
651 {
652         dbi_unregister_uprobe(&jp->up, atomic);
653         /*
654          * Here is an attempt to unregister even those probes that have not been
655          * installed (hence not added to the hlist).
656          * So if we try to delete them from the hlist we will get NULL pointer
657          * dereference error. That is why we check whether this node
658          * really belongs to the hlist.
659          */
660 #ifdef CONFIG_ARM
661         if (!(hlist_unhashed(&jp->up.kp.is_hlist_arm))) {
662                 hlist_del_rcu(&jp->up.kp.is_hlist_arm);
663         }
664         if (!(hlist_unhashed(&jp->up.kp.is_hlist_thumb))) {
665                 hlist_del_rcu(&jp->up.kp.is_hlist_thumb);
666         }
667 #else /* CONFIG_ARM */
668         if (!(hlist_unhashed(&jp->up.kp.is_hlist))) {
669                 hlist_del_rcu(&jp->up.kp.is_hlist);
670         }
671 #endif /* CONFIG_ARM */
672 }
673
674 int trampoline_uprobe_handler(struct kprobe *p, struct pt_regs *regs)
675 {
676         struct uretprobe_instance *ri = NULL;
677         struct hlist_head *head;
678         struct hlist_node *node, *tmp;
679         unsigned long flags, tramp_addr, orig_ret_addr = 0;
680
681         tramp_addr = arch_get_trampoline_addr(p, regs);
682         spin_lock_irqsave(&uretprobe_lock, flags);
683
684         head = uretprobe_inst_table_head(current->mm);
685
686         /*
687          * It is possible to have multiple instances associated with a given
688          * task either because an multiple functions in the call path
689          * have a return probe installed on them, and/or more then one
690          * return probe was registered for a target function.
691          *
692          * We can handle this because:
693          *     - instances are always inserted at the head of the list
694          *     - when multiple return probes are registered for the same
695          *       function, the first instance's ret_addr will point to the
696          *       real return address, and all the rest will point to
697          *       uretprobe_trampoline
698          */
699         hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
700                 if (ri->task != current) {
701                         /* another task is sharing our hash bucket */
702                         continue;
703                 }
704
705                 if (ri->rp && ri->rp->handler) {
706                         ri->rp->handler(ri, regs, ri->rp->priv_arg);
707                 }
708
709                 orig_ret_addr = (unsigned long)ri->ret_addr;
710                 recycle_urp_inst(ri);
711
712                 if (orig_ret_addr != tramp_addr) {
713                         /*
714                          * This is the real return address. Any other
715                          * instances associated with this task are for
716                          * other calls deeper on the call stack
717                          */
718                         break;
719                 }
720         }
721
722         spin_unlock_irqrestore(&uretprobe_lock, flags);
723         arch_set_orig_ret_addr(orig_ret_addr, regs);
724
725         return 1;
726 }
727
728 static int pre_handler_uretprobe(struct kprobe *p, struct pt_regs *regs)
729 {
730         struct uprobe *up = container_of(p, struct uprobe, kp);
731         struct uretprobe *rp = container_of(up, struct uretprobe, up);
732         struct uretprobe_instance *ri;
733         unsigned long flags;
734
735         /* TODO: consider to only swap the RA after the last pre_handler fired */
736         spin_lock_irqsave(&uretprobe_lock, flags);
737
738         /* TODO: test - remove retprobe after func entry but before its exit */
739         if ((ri = get_free_urp_inst(rp)) != NULL) {
740                 ri->rp = rp;
741                 ri->task = current;
742
743                 arch_prepare_uretprobe_hl(ri, regs);
744
745                 add_urp_inst(ri);
746         } else {
747                 ++rp->nmissed;
748         }
749
750         spin_unlock_irqrestore(&uretprobe_lock, flags);
751
752         return 0;
753 }
754
755 int dbi_register_uretprobe(struct uretprobe *rp, int atomic)
756 {
757         int i, ret = 0;
758         struct uretprobe_instance *inst;
759
760         DBPRINTF ("START\n");
761
762         rp->up.kp.pre_handler = pre_handler_uretprobe;
763         rp->up.kp.post_handler = NULL;
764         rp->up.kp.fault_handler = NULL;
765         rp->up.kp.break_handler = NULL;
766
767         /* Pre-allocate memory for max kretprobe instances */
768         if (rp->maxactive <= 0) {
769 #if 1//def CONFIG_PREEMPT
770                 rp->maxactive = max(10, 2 * NR_CPUS);
771 #else
772                 rp->maxactive = NR_CPUS;
773 #endif
774         }
775
776         INIT_HLIST_HEAD(&rp->used_instances);
777         INIT_HLIST_HEAD(&rp->free_instances);
778
779         for (i = 0; i < rp->maxactive; i++) {
780                 inst = kmalloc(sizeof(*inst), GFP_KERNEL);
781                 if (inst == NULL) {
782                         free_urp_inst(rp);
783                         ret = -ENOMEM;
784                         goto out;
785                 }
786
787                 INIT_HLIST_NODE(&inst->uflist);
788                 hlist_add_head(&inst->uflist, &rp->free_instances);
789         }
790
791         rp->nmissed = 0;
792
793         /* Establish function entry probe point */
794         ret = dbi_register_uprobe(&rp->up, atomic);
795         if (ret) {
796                 free_urp_inst(rp);
797                 goto out;
798         }
799
800 out:
801         return ret;
802 }
803
804 int dbi_disarm_urp_inst(struct uretprobe_instance *ri, struct task_struct *rm_task)
805 {
806         struct task_struct *task = rm_task ? rm_task : ri->task;
807         kprobe_opcode_t *tramp;
808         kprobe_opcode_t *sp = (kprobe_opcode_t *)((long)ri->sp & ~1);
809         kprobe_opcode_t *stack = sp - RETPROBE_STACK_DEPTH + 1;
810         kprobe_opcode_t *found = NULL;
811         kprobe_opcode_t *buf[RETPROBE_STACK_DEPTH];
812         int i, retval;
813
814         /* Understand function mode */
815         if ((long)ri->sp & 1) {
816                 tramp = (kprobe_opcode_t *)
817                         ((unsigned long)ri->rp->up.kp.ainsn.insn + 0x1b);
818         } else {
819                 tramp = (kprobe_opcode_t *)
820                         (ri->rp->up.kp.ainsn.insn + UPROBES_TRAMP_RET_BREAK_IDX);
821         }
822
823         retval = read_proc_vm_atomic(task, (unsigned long)stack, buf, sizeof(buf));
824         if (retval != sizeof(buf)) {
825                 printk("---> %s (%d/%d): failed to read stack from %08lx",
826                         task->comm, task->tgid, task->pid, (unsigned long)stack);
827                 retval = -EFAULT;
828                 goto out;
829         }
830
831         /* search the stack from the bottom */
832         for (i = RETPROBE_STACK_DEPTH - 1; i >= 0; i--) {
833                 if (buf[i] == tramp) {
834                         found = stack + i;
835                         break;
836                 }
837         }
838
839         if (found) {
840                 printk("---> %s (%d/%d): trampoline found at %08lx (%08lx /%+d) - %p\n",
841                                 task->comm, task->tgid, task->pid,
842                                 (unsigned long)found, (unsigned long)sp,
843                                 found - sp, ri->rp->up.kp.addr);
844                 retval = write_proc_vm_atomic(task, (unsigned long)found, &ri->ret_addr,
845                                 sizeof(ri->ret_addr));
846                 if (retval != sizeof(ri->ret_addr)) {
847                         printk("---> %s (%d/%d): failed to write value to %08lx",
848                                 task->comm, task->tgid, task->pid, (unsigned long)found);
849                         retval = -EFAULT;
850                 } else {
851                         retval = 0;
852                 }
853         } else {
854                 struct pt_regs *uregs = task_pt_regs(ri->task);
855                 unsigned long ra = dbi_get_ret_addr(uregs);
856                 if (ra == (unsigned long)tramp) {
857                         printk("---> %s (%d/%d): trampoline found at lr = %08lx - %p\n",
858                                         task->comm, task->tgid, task->pid, ra, ri->rp->up.kp.addr);
859                         dbi_set_ret_addr(uregs, (unsigned long)tramp);
860                         retval = 0;
861                 } else {
862                         printk("---> %s (%d/%d): trampoline NOT found at sp = %08lx, lr = %08lx - %p\n",
863                                         task->comm, task->tgid, task->pid,
864                                         (unsigned long)sp, ra, ri->rp->up.kp.addr);
865                         retval = -ENOENT;
866                 }
867         }
868
869 out:
870         return retval;
871 }
872
873 /* Called with uretprobe_lock held */
874 int dbi_disarm_urp_inst_for_task(struct task_struct *parent, struct task_struct *task)
875 {
876         struct uretprobe_instance *ri;
877         struct hlist_node *node, *tmp;
878         struct hlist_head *head = uretprobe_inst_table_head(parent->mm);
879
880         hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
881                 if (parent == ri->task) {
882                         dbi_disarm_urp_inst(ri, task);
883                 }
884         }
885
886         return 0;
887 }
888 EXPORT_SYMBOL_GPL(dbi_disarm_urp_inst_for_task);
889
890 void dbi_unregister_uretprobe(struct uretprobe *rp, int atomic)
891 {
892         unsigned long flags;
893         struct uretprobe_instance *ri;
894
895         spin_lock_irqsave (&uretprobe_lock, flags);
896
897         while ((ri = get_used_urp_inst(rp)) != NULL) {
898                 if (dbi_disarm_urp_inst(ri, NULL) != 0)
899                         /*panic*/printk("%s (%d/%d): cannot disarm urp instance (%08lx)\n",
900                                         ri->task->comm, ri->task->tgid, ri->task->pid,
901                                         (unsigned long)rp->up.kp.addr);
902                 recycle_urp_inst(ri);
903         }
904
905         if (hlist_empty(&rp->used_instances)) {
906                 struct kprobe *p = &rp->up.kp;
907 #ifdef CONFIG_ARM
908                 if (!(hlist_unhashed(&p->is_hlist_arm))) {
909                         hlist_del_rcu(&p->is_hlist_arm);
910                 }
911
912                 if (!(hlist_unhashed(&p->is_hlist_thumb))) {
913                         hlist_del_rcu(&p->is_hlist_thumb);
914                 }
915 #else /* CONFIG_ARM */
916                 if (!(hlist_unhashed(&p->is_hlist))) {
917                         hlist_del_rcu(&p->is_hlist);
918                 }
919 #endif /* CONFIG_ARM */
920         }
921
922         while ((ri = get_used_urp_inst(rp)) != NULL) {
923                 ri->rp = NULL;
924                 hlist_del(&ri->uflist);
925         }
926
927         spin_unlock_irqrestore(&uretprobe_lock, flags);
928         free_urp_inst(rp);
929
930         dbi_unregister_uprobe(&rp->up, atomic);
931 }
932
933 void dbi_unregister_all_uprobes(struct task_struct *task, int atomic)
934 {
935         struct hlist_head *head;
936         struct hlist_node *node, *tnode;
937         struct kprobe *p;
938         int i;
939
940         for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
941                 head = &uprobe_table[i];
942                 hlist_for_each_entry_safe(p, node, tnode, head, hlist) {
943                         if (kp2up(p)->task->tgid == task->tgid) {
944                                 struct uprobe *up = container_of(p, struct uprobe, kp);
945                                 printk("dbi_unregister_all_uprobes: delete uprobe at %p[%lx] for %s/%d\n",
946                                                 p->addr, (unsigned long)p->opcode, task->comm, task->pid);
947                                 dbi_unregister_uprobe(up, atomic);
948                         }
949                 }
950         }
951 }
952
953 void dbi_uprobe_return(void)
954 {
955         dbi_arch_uprobe_return();
956 }
957
958 static int __init init_uprobes(void)
959 {
960         init_uprobe_table();
961         init_uprobes_insn_slots();
962         init_uretprobe_inst_table();
963
964         return swap_arch_init_uprobes();
965 }
966
967 static void __exit exit_uprobes(void)
968 {
969         swap_arch_exit_uprobes();
970 }
971
972 EXPORT_SYMBOL_GPL(dbi_uprobe_return);
973 EXPORT_SYMBOL_GPL(dbi_register_ujprobe);
974 EXPORT_SYMBOL_GPL(dbi_unregister_ujprobe);
975 EXPORT_SYMBOL_GPL(dbi_register_uretprobe);
976 EXPORT_SYMBOL_GPL(dbi_unregister_uretprobe);
977 EXPORT_SYMBOL_GPL(dbi_unregister_all_uprobes);
978
979 module_init(init_uprobes);
980 module_exit(exit_uprobes);
981
982 MODULE_LICENSE ("GPL");