[REFACTOR] remove unnecessary functions declaration
[kernel/swap-modules.git] / uprobe / swap_uprobes.c
1 /*
2  *  Dynamic Binary Instrumentation Module based on KProbes
3  *  modules/uprobe/swap_uprobes.h
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18  *
19  * Copyright (C) Samsung Electronics, 2006-2010
20  *
21  * 2008-2009    Alexey Gerenkov <a.gerenkov@samsung.com> User-Space
22  *              Probes initial implementation; Support x86/ARM/MIPS for both user and kernel spaces.
23  * 2010         Ekaterina Gorelkina <e.gorelkina@samsung.com>: redesign module for separating core and arch parts
24  *
25  */
26
27
28 #include "swap_uprobes.h"
29 #include <kprobe/dbi_kdebug.h>
30
31 #include <uprobe/arch/asm/swap_uprobes.h>
32
33 #include <linux/hash.h>
34 #include <linux/mempolicy.h>
35 #include <linux/module.h>
36 #include <kprobe/swap_slots.h>
37 #include <kprobe/dbi_kprobes_deps.h>
38
39 enum {
40         UPROBE_HASH_BITS  = 10,
41         UPROBE_TABLE_SIZE = (1 << UPROBE_HASH_BITS)
42 };
43
44 struct hlist_head uprobe_insn_slot_table[UPROBE_TABLE_SIZE];
45 struct hlist_head uprobe_table[UPROBE_TABLE_SIZE];
46
47 DEFINE_SPINLOCK(uretprobe_lock);        /* Protects uretprobe_inst_table */
48 static struct hlist_head uretprobe_inst_table[UPROBE_TABLE_SIZE];
49
50 #define DEBUG_PRINT_HASH_TABLE 0
51
52 #if DEBUG_PRINT_HASH_TABLE
53 void print_kprobe_hash_table(void)
54 {
55         int i;
56         struct hlist_head *head;
57         struct kprobe *p;
58         DECLARE_NODE_PTR_FOR_HLIST(node);
59
60         // print uprobe table
61         for (i = 0; i < KPROBE_TABLE_SIZE; ++i) {
62                 head = &kprobe_table[i];
63                 swap_hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
64                         printk("####### find K tgid=%u, addr=%x\n",
65                                         p->tgid, p->addr);
66                 }
67         }
68 }
69
70 void print_kretprobe_hash_table(void)
71 {
72         int i;
73         struct hlist_head *head;
74         struct kprobe *p;
75         DECLARE_NODE_PTR_FOR_HLIST(node);
76
77         // print uprobe table
78         for (i = 0; i < KPROBE_TABLE_SIZE; ++i) {
79                 head = &kretprobe_inst_table[i];
80                 swap_hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
81                         printk("####### find KR tgid=%u, addr=%x\n",
82                                         p->tgid, p->addr);
83                 }
84         }
85 }
86
87 void print_uprobe_hash_table(void)
88 {
89         int i;
90         struct hlist_head *head;
91         struct kprobe *p;
92         DECLARE_NODE_PTR_FOR_HLIST(node);
93
94         // print uprobe table
95         for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
96                 head = &uprobe_insn_slot_table[i];
97                 swap_hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
98                         printk("####### find U tgid=%u, addr=%x\n",
99                                         p->tgid, p->addr);
100                 }
101         }
102 }
103 #endif
104
105 /*
106  * Keep all fields in the uprobe consistent
107  */
108 static inline void copy_uprobe(struct kprobe *old_p, struct kprobe *p)
109 {
110         memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
111         memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
112 #ifdef CONFIG_ARM
113         p->safe_arm = old_p->safe_arm;
114         p->safe_thumb = old_p->safe_thumb;
115 #endif
116 }
117
118 /*
119  * Aggregate handlers for multiple uprobes support - these handlers
120  * take care of invoking the individual uprobe handlers on p->list
121  */
122 static int aggr_pre_uhandler(struct kprobe *p, struct pt_regs *regs)
123 {
124         struct kprobe *kp;
125         int ret;
126
127         list_for_each_entry_rcu(kp, &p->list, list) {
128                 if (kp->pre_handler) {
129                         ret = kp->pre_handler(kp, regs);
130                         if (ret) {
131                                 return ret;
132                         }
133                 }
134         }
135
136         return 0;
137 }
138
139 static void aggr_post_uhandler(struct kprobe *p, struct pt_regs *regs, unsigned long flags)
140 {
141         struct kprobe *kp;
142
143         list_for_each_entry_rcu(kp, &p->list, list) {
144                 if (kp->post_handler) {
145                         kp->post_handler(kp, regs, flags);
146                 }
147         }
148 }
149
150 static int aggr_fault_uhandler(struct kprobe *p, struct pt_regs *regs, int trapnr)
151 {
152         return 0;
153 }
154
155 static int aggr_break_uhandler(struct kprobe *p, struct pt_regs *regs)
156 {
157         return 0;
158 }
159
160 /*
161  * Add the new probe to old_p->list. Fail if this is the
162  * second ujprobe at the address - two ujprobes can't coexist
163  */
164 static int add_new_uprobe(struct kprobe *old_p, struct kprobe *p)
165 {
166         if (p->break_handler) {
167                 if (old_p->break_handler) {
168                         return -EEXIST;
169                 }
170
171                 list_add_tail_rcu(&p->list, &old_p->list);
172                 old_p->break_handler = aggr_break_uhandler;
173         } else {
174                 list_add_rcu (&p->list, &old_p->list);
175         }
176
177         if (p->post_handler && !old_p->post_handler) {
178                 old_p->post_handler = aggr_post_uhandler;
179         }
180
181         return 0;
182 }
183
184 /*
185  * Fill in the required fields of the "manager uprobe". Replace the
186  * earlier uprobe in the hlist with the manager uprobe
187  */
188 static inline void add_aggr_uprobe(struct kprobe *ap, struct kprobe *p)
189 {
190         copy_uprobe(p, ap);
191
192         ap->addr = p->addr;
193         ap->pre_handler = aggr_pre_uhandler;
194         ap->fault_handler = aggr_fault_uhandler;
195
196         if (p->post_handler) {
197                 ap->post_handler = aggr_post_uhandler;
198         }
199
200         if (p->break_handler) {
201                 ap->break_handler = aggr_break_uhandler;
202         }
203
204         INIT_LIST_HEAD(&ap->list);
205         list_add_rcu(&p->list, &ap->list);
206
207         hlist_replace_rcu(&p->hlist, &ap->hlist);
208 }
209
210 /*
211  * This is the second or subsequent uprobe at the address - handle
212  * the intricacies
213  */
214 static int register_aggr_uprobe(struct kprobe *old_p, struct kprobe *p)
215 {
216         int ret = 0;
217         struct kprobe *ap;
218
219         if (old_p->pre_handler == aggr_pre_uhandler) {
220                 copy_uprobe(old_p, p);
221                 ret = add_new_uprobe(old_p, p);
222         } else {
223                 struct uprobe *uap = kzalloc(sizeof(*uap), GFP_KERNEL);
224                 if (!uap) {
225                         return -ENOMEM;
226                 }
227
228                 uap->task = kp2up(p)->task;
229                 ap = up2kp(uap);
230                 add_aggr_uprobe(ap, old_p);
231                 copy_uprobe(ap, p);
232                 ret = add_new_uprobe(ap, p);
233         }
234
235         return ret;
236 }
237
238 static void arm_uprobe(struct uprobe *p)
239 {
240         kprobe_opcode_t insn = BREAKPOINT_INSTRUCTION;
241         int ret = write_proc_vm_atomic(p->task, (unsigned long)p->kp.addr,
242                                        &insn, sizeof(insn));
243         if (!ret) {
244                 panic("arm_uprobe: failed to write memory "
245                       "tgid=%u addr=%p!\n", p->task->tgid, p->kp.addr);
246         }
247 }
248
249 void disarm_uprobe(struct kprobe *p, struct task_struct *task)
250 {
251         int ret = write_proc_vm_atomic(task, (unsigned long)p->addr,
252                                        &p->opcode, sizeof(p->opcode));
253         if (!ret) {
254                 panic("disarm_uprobe: failed to write memory "
255                       "tgid=%u, addr=%p!\n", task->tgid, p->addr);
256         }
257 }
258 EXPORT_SYMBOL_GPL(disarm_uprobe);
259
260 static void init_uprobes_insn_slots(void)
261 {
262         int i;
263         for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
264                 INIT_HLIST_HEAD(&uprobe_insn_slot_table[i]);
265         }
266 }
267
268 static void init_uprobe_table(void)
269 {
270         int i;
271         for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
272                 INIT_HLIST_HEAD(&uprobe_table[i]);
273         }
274 }
275
276 static void init_uretprobe_inst_table(void)
277 {
278         int i;
279         for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
280                 INIT_HLIST_HEAD (&uretprobe_inst_table[i]);
281         }
282 }
283
284 struct kprobe *get_ukprobe(void *addr, pid_t tgid)
285 {
286         struct hlist_head *head;
287         struct kprobe *p;
288         DECLARE_NODE_PTR_FOR_HLIST(node);
289
290         head = &uprobe_table[hash_ptr(addr, UPROBE_HASH_BITS)];
291         swap_hlist_for_each_entry_rcu(p, node, head, hlist) {
292                 if (p->addr == addr && kp2up(p)->task->tgid == tgid) {
293                         return p;
294                 }
295         }
296
297         return NULL;
298 }
299
300 void add_uprobe_table(struct kprobe *p)
301 {
302         INIT_HLIST_NODE(&p->is_hlist);
303         hlist_add_head_rcu(&p->is_hlist, &uprobe_insn_slot_table[hash_ptr(p->ainsn.insn, UPROBE_HASH_BITS)]);
304 }
305
306 struct kprobe *get_ukprobe_by_insn_slot(void *addr, pid_t tgid, struct pt_regs *regs)
307 {
308         struct hlist_head *head;
309         struct kprobe *p;
310         DECLARE_NODE_PTR_FOR_HLIST(node);
311
312         /* TODO: test - two processes invokes instrumented function */
313         head = &uprobe_insn_slot_table[hash_ptr(addr, UPROBE_HASH_BITS)];
314         swap_hlist_for_each_entry_rcu(p, node, head, is_hlist) {
315                 if (p->ainsn.insn == addr && kp2up(p)->task->tgid == tgid) {
316                         return p;
317                 }
318         }
319
320         return NULL;
321 }
322
323
324 static void remove_uprobe(struct uprobe *up)
325 {
326         struct kprobe *p = up2kp(up);
327
328         swap_slot_free(up->sm, p->ainsn.insn);
329 }
330
331 static struct hlist_head *uretprobe_inst_table_head(void *hash_key)
332 {
333         return &uretprobe_inst_table[hash_ptr (hash_key, UPROBE_HASH_BITS)];
334 }
335
336 /* Called with uretprobe_lock held */
337 static void add_urp_inst(struct uretprobe_instance *ri)
338 {
339         /*
340          * Remove rp inst off the free list -
341          * Add it back when probed function returns
342          */
343         hlist_del(&ri->uflist);
344
345         /* Add rp inst onto table */
346         INIT_HLIST_NODE(&ri->hlist);
347         hlist_add_head(&ri->hlist, uretprobe_inst_table_head(ri->task->mm));
348
349         /* Also add this rp inst to the used list. */
350         INIT_HLIST_NODE(&ri->uflist);
351         hlist_add_head(&ri->uflist, &ri->rp->used_instances);
352 }
353
354 /* Called with uretprobe_lock held */
355 static void recycle_urp_inst(struct uretprobe_instance *ri)
356 {
357         if (ri->rp) {
358                 hlist_del(&ri->hlist);
359                 /* remove rp inst off the used list */
360                 hlist_del(&ri->uflist);
361                 /* put rp inst back onto the free list */
362                 INIT_HLIST_NODE(&ri->uflist);
363                 hlist_add_head(&ri->uflist, &ri->rp->free_instances);
364         }
365 }
366
367 /* Called with uretprobe_lock held */
368 static struct uretprobe_instance *get_used_urp_inst(struct uretprobe *rp)
369 {
370         struct uretprobe_instance *ri;
371         DECLARE_NODE_PTR_FOR_HLIST(node);
372
373         swap_hlist_for_each_entry(ri, node, &rp->used_instances, uflist) {
374                 return ri;
375         }
376
377         return NULL;
378 }
379
380 /* Called with uretprobe_lock held */
381 struct uretprobe_instance *get_free_urp_inst_no_alloc(struct uretprobe *rp)
382 {
383         struct uretprobe_instance *ri;
384         DECLARE_NODE_PTR_FOR_HLIST(node);
385
386         swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
387                 return ri;
388         }
389
390         return NULL;
391 }
392
393 /* Called with uretprobe_lock held */
394 static void free_urp_inst(struct uretprobe *rp)
395 {
396         struct uretprobe_instance *ri;
397         while ((ri = get_free_urp_inst_no_alloc(rp)) != NULL) {
398                 hlist_del(&ri->uflist);
399                 kfree(ri);
400         }
401 }
402
403 #define COMMON_URP_NR 10
404
405 static int alloc_nodes_uretprobe(struct uretprobe *rp)
406 {
407         int alloc_nodes;
408         struct uretprobe_instance *inst;
409         int i;
410
411 #if 1//def CONFIG_PREEMPT
412         rp->maxactive += max(COMMON_URP_NR, 2 * NR_CPUS);
413 #else
414         rp->maxacpptive += NR_CPUS;
415 #endif
416         alloc_nodes = COMMON_URP_NR;
417
418         for (i = 0; i < alloc_nodes; ++i) {
419                 inst = kmalloc(sizeof(*inst), GFP_ATOMIC);
420                 if (inst == NULL) {
421                         free_urp_inst(rp);
422                         return -ENOMEM;
423                 }
424                 INIT_HLIST_NODE(&inst->uflist);
425                 hlist_add_head(&inst->uflist, &rp->free_instances);
426         }
427
428         return 0;
429 }
430
431 /* Called with uretprobe_lock held */
432 static struct uretprobe_instance *get_free_urp_inst(struct uretprobe *rp)
433 {
434         struct uretprobe_instance *ri;
435         DECLARE_NODE_PTR_FOR_HLIST(node);
436
437         swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
438                 return ri;
439         }
440
441         if (!alloc_nodes_uretprobe(rp)) {
442                 swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
443                         return ri;
444                 }
445         }
446
447         return NULL;
448 }
449 // ===================================================================
450
451 int dbi_register_uprobe(struct uprobe *up)
452 {
453         int ret = 0;
454         struct kprobe *p, *old_p;
455
456         p = &up->kp;
457         if (!p->addr) {
458                 return -EINVAL;
459         }
460
461         DBPRINTF("p->addr = 0x%p p = 0x%p\n", p->addr, p);
462
463 // thumb address = address-1;
464 #if defined(CONFIG_ARM)
465         // TODO: must be corrected in 'bundle'
466         if ((unsigned long) p->addr & 0x01) {
467                 p->addr = (kprobe_opcode_t *)((unsigned long)p->addr & 0xfffffffe);
468         }
469 #endif
470
471         p->ainsn.insn = NULL;
472         p->mod_refcounted = 0;
473         p->nmissed = 0;
474         INIT_LIST_HEAD(&p->list);
475 #ifdef KPROBES_PROFILE
476         p->start_tm.tv_sec = p->start_tm.tv_usec = 0;
477         p->hnd_tm_sum.tv_sec = p->hnd_tm_sum.tv_usec = 0;
478         p->count = 0;
479 #endif
480
481         // get the first item
482         old_p = get_ukprobe(p->addr, kp2up(p)->task->tgid);
483         if (old_p) {
484 #ifdef CONFIG_ARM
485                 p->safe_arm = old_p->safe_arm;
486                 p->safe_thumb = old_p->safe_thumb;
487 #endif
488                 ret = register_aggr_uprobe(old_p, p);
489                 DBPRINTF("goto out\n", ret);
490                 goto out;
491         }
492
493         ret = arch_prepare_uprobe(up);
494         if (ret) {
495                 DBPRINTF("goto out\n", ret);
496                 goto out;
497         }
498
499         DBPRINTF ("before out ret = 0x%x\n", ret);
500
501         // TODO: add uprobe (must be in function)
502         INIT_HLIST_NODE(&p->hlist);
503         hlist_add_head_rcu(&p->hlist, &uprobe_table[hash_ptr(p->addr, UPROBE_HASH_BITS)]);
504         arm_uprobe(up);
505
506 out:
507         DBPRINTF("out ret = 0x%x\n", ret);
508         return ret;
509 }
510
511 void __dbi_unregister_uprobe(struct uprobe *up, int disarm)
512 {
513         struct kprobe *p, *old_p, *list_p;
514         int cleanup_p;
515
516         p = &up->kp;
517         old_p = get_ukprobe(p->addr, kp2up(p)->task->tgid);
518         if (unlikely(!old_p)) {
519                 return;
520         }
521
522         if (p != old_p) {
523                 list_for_each_entry_rcu(list_p, &old_p->list, list) {
524                         if (list_p == p) {
525                                 /* uprobe p is a valid probe */
526                                 goto valid_p;
527                         }
528                 }
529
530                 return;
531         }
532
533 valid_p:
534         if ((old_p == p) || ((old_p->pre_handler == aggr_pre_uhandler) &&
535             (p->list.next == &old_p->list) && (p->list.prev == &old_p->list))) {
536                 /* Only probe on the hash list */
537                 if (disarm)
538                         disarm_uprobe(&up->kp, up->task);
539
540                 hlist_del_rcu(&old_p->hlist);
541                 cleanup_p = 1;
542         } else {
543                 list_del_rcu(&p->list);
544                 cleanup_p = 0;
545         }
546
547         if (cleanup_p) {
548                 if (p != old_p) {
549                         list_del_rcu(&p->list);
550                         kfree(old_p);
551                 }
552
553                 if (!in_atomic()) {
554                         synchronize_sched();
555                 }
556
557                 remove_uprobe(up);
558         } else {
559                 if (p->break_handler) {
560                         old_p->break_handler = NULL;
561                 }
562
563                 if (p->post_handler) {
564                         list_for_each_entry_rcu (list_p, &old_p->list, list) {
565                                 if (list_p->post_handler) {
566                                         cleanup_p = 2;
567                                         break;
568                                 }
569                         }
570
571                         if (cleanup_p == 0) {
572                                 old_p->post_handler = NULL;
573                         }
574                 }
575         }
576 }
577 EXPORT_SYMBOL_GPL(__dbi_unregister_uprobe);
578
579 void dbi_unregister_uprobe(struct uprobe *up)
580 {
581         __dbi_unregister_uprobe(up, 1);
582 }
583
584 int dbi_register_ujprobe(struct ujprobe *jp)
585 {
586         int ret = 0;
587
588         /* Todo: Verify probepoint is a function entry point */
589         jp->up.kp.pre_handler = setjmp_upre_handler;
590         jp->up.kp.break_handler = longjmp_break_uhandler;
591
592         ret = dbi_register_uprobe(&jp->up);
593
594         return ret;
595 }
596
597 void __dbi_unregister_ujprobe(struct ujprobe *jp, int disarm)
598 {
599         __dbi_unregister_uprobe(&jp->up, disarm);
600         /*
601          * Here is an attempt to unregister even those probes that have not been
602          * installed (hence not added to the hlist).
603          * So if we try to delete them from the hlist we will get NULL pointer
604          * dereference error. That is why we check whether this node
605          * really belongs to the hlist.
606          */
607         if (!(hlist_unhashed(&jp->up.kp.is_hlist))) {
608                 hlist_del_rcu(&jp->up.kp.is_hlist);
609         }
610 }
611 EXPORT_SYMBOL_GPL(__dbi_unregister_ujprobe);
612
613 void dbi_unregister_ujprobe(struct ujprobe *jp)
614 {
615         __dbi_unregister_ujprobe(jp, 1);
616 }
617
618 int trampoline_uprobe_handler(struct kprobe *p, struct pt_regs *regs)
619 {
620         struct uretprobe_instance *ri = NULL;
621         struct kprobe *kp;
622         struct hlist_head *head;
623         unsigned long flags, tramp_addr, orig_ret_addr = 0;
624         struct hlist_node *tmp;
625         DECLARE_NODE_PTR_FOR_HLIST(node);
626
627         tramp_addr = arch_get_trampoline_addr(p, regs);
628         spin_lock_irqsave(&uretprobe_lock, flags);
629
630         head = uretprobe_inst_table_head(current->mm);
631
632         /*
633          * It is possible to have multiple instances associated with a given
634          * task either because an multiple functions in the call path
635          * have a return probe installed on them, and/or more then one
636          * return probe was registered for a target function.
637          *
638          * We can handle this because:
639          *     - instances are always inserted at the head of the list
640          *     - when multiple return probes are registered for the same
641          *       function, the first instance's ret_addr will point to the
642          *       real return address, and all the rest will point to
643          *       uretprobe_trampoline
644          */
645         swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
646                 if (ri->task != current) {
647                         /* another task is sharing our hash bucket */
648                         continue;
649                 }
650
651                 kp = NULL;
652                 if (ri->rp) {
653                         kp = up2kp(&ri->rp->up);
654
655                         if (ri->rp->handler)
656                                 ri->rp->handler(ri, regs);
657                 }
658
659                 orig_ret_addr = (unsigned long)ri->ret_addr;
660                 recycle_urp_inst(ri);
661
662                 if ((orig_ret_addr != tramp_addr && kp == p) || kp == NULL) {
663                         /*
664                          * This is the real return address. Any other
665                          * instances associated with this task are for
666                          * other calls deeper on the call stack
667                          */
668                         break;
669                 }
670         }
671
672         spin_unlock_irqrestore(&uretprobe_lock, flags);
673         arch_set_orig_ret_addr(orig_ret_addr, regs);
674
675         return 1;
676 }
677
678 static int pre_handler_uretprobe(struct kprobe *p, struct pt_regs *regs)
679 {
680         struct uprobe *up = container_of(p, struct uprobe, kp);
681         struct uretprobe *rp = container_of(up, struct uretprobe, up);
682 #ifdef CONFIG_ARM
683         int noret = thumb_mode(regs) ? rp->thumb_noret : rp->arm_noret;
684 #endif
685         struct uretprobe_instance *ri;
686         unsigned long flags;
687
688 #ifdef CONFIG_ARM
689         if (noret)
690                 return 0;
691 #endif
692
693         /* TODO: consider to only swap the RA after the last pre_handler fired */
694         spin_lock_irqsave(&uretprobe_lock, flags);
695
696         /* TODO: test - remove retprobe after func entry but before its exit */
697         if ((ri = get_free_urp_inst(rp)) != NULL) {
698                 ri->rp = rp;
699                 ri->task = current;
700
701                 if (rp->entry_handler)
702                         rp->entry_handler(ri, regs);
703
704                 arch_prepare_uretprobe(ri, regs);
705
706                 add_urp_inst(ri);
707         } else {
708                 ++rp->nmissed;
709         }
710
711         spin_unlock_irqrestore(&uretprobe_lock, flags);
712
713         return 0;
714 }
715
716 int dbi_register_uretprobe(struct uretprobe *rp)
717 {
718         int i, ret = 0;
719         struct uretprobe_instance *inst;
720
721         DBPRINTF ("START\n");
722
723         rp->up.kp.pre_handler = pre_handler_uretprobe;
724         rp->up.kp.post_handler = NULL;
725         rp->up.kp.fault_handler = NULL;
726         rp->up.kp.break_handler = NULL;
727
728         /* Pre-allocate memory for max kretprobe instances */
729         if (rp->maxactive <= 0) {
730 #if 1//def CONFIG_PREEMPT
731                 rp->maxactive = max(10, 2 * NR_CPUS);
732 #else
733                 rp->maxactive = NR_CPUS;
734 #endif
735         }
736
737         INIT_HLIST_HEAD(&rp->used_instances);
738         INIT_HLIST_HEAD(&rp->free_instances);
739
740         for (i = 0; i < rp->maxactive; i++) {
741                 inst = kmalloc(sizeof(*inst), GFP_ATOMIC);
742                 if (inst == NULL) {
743                         free_urp_inst(rp);
744                         return -ENOMEM;
745                 }
746
747                 INIT_HLIST_NODE(&inst->uflist);
748                 hlist_add_head(&inst->uflist, &rp->free_instances);
749         }
750
751         rp->nmissed = 0;
752
753         /* Establish function entry probe point */
754         ret = dbi_register_uprobe(&rp->up);
755         if (ret)
756                 return ret;
757
758         arch_opcode_analysis_uretprobe(rp);
759
760         return 0;
761 }
762
763 int dbi_disarm_urp_inst_for_task(struct task_struct *parent, struct task_struct *task)
764 {
765         unsigned long flags;
766         struct uretprobe_instance *ri;
767         struct hlist_head *head;
768         struct hlist_node *tmp;
769         DECLARE_NODE_PTR_FOR_HLIST(node);
770
771         spin_lock_irqsave(&uretprobe_lock, flags);
772
773         head = uretprobe_inst_table_head(parent->mm);
774         swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
775                 if (parent == ri->task) {
776                         arch_disarm_urp_inst(ri, task);
777                 }
778         }
779
780         spin_unlock_irqrestore(&uretprobe_lock, flags);
781
782         return 0;
783 }
784 EXPORT_SYMBOL_GPL(dbi_disarm_urp_inst_for_task);
785
786 void dbi_discard_pending_uretprobes(struct task_struct *task)
787 {
788         unsigned long flags;
789         struct uretprobe_instance *ri;
790         struct hlist_head *head;
791         struct hlist_node *tmp;
792         DECLARE_NODE_PTR_FOR_HLIST(node);
793
794         spin_lock_irqsave(&uretprobe_lock, flags);
795
796         head = uretprobe_inst_table_head(task->mm);
797         swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
798                 if (ri->task == task) {
799                         printk("%s (%d/%d): pending urp inst: %08lx\n",
800                                task->comm, task->tgid, task->pid,
801                                (unsigned long)ri->rp->up.kp.addr);
802                         arch_disarm_urp_inst(ri, task);
803                         recycle_urp_inst(ri);
804                 }
805         }
806
807         spin_unlock_irqrestore(&uretprobe_lock, flags);
808 }
809 EXPORT_SYMBOL_GPL(dbi_discard_pending_uretprobes);
810
811 void __dbi_unregister_uretprobe(struct uretprobe *rp, int disarm)
812 {
813         unsigned long flags;
814         struct uretprobe_instance *ri;
815
816         __dbi_unregister_uprobe(&rp->up, disarm);
817         spin_lock_irqsave (&uretprobe_lock, flags);
818
819         while ((ri = get_used_urp_inst(rp)) != NULL) {
820                 if (arch_disarm_urp_inst(ri, ri->task) != 0)
821                         printk("%s (%d/%d): cannot disarm urp instance (%08lx)\n",
822                                         ri->task->comm, ri->task->tgid, ri->task->pid,
823                                         (unsigned long)rp->up.kp.addr);
824                 recycle_urp_inst(ri);
825         }
826
827         if (hlist_empty(&rp->used_instances)) {
828                 struct kprobe *p = &rp->up.kp;
829
830                 if (!(hlist_unhashed(&p->is_hlist))) {
831                         hlist_del_rcu(&p->is_hlist);
832                 }
833         }
834
835         while ((ri = get_used_urp_inst(rp)) != NULL) {
836                 ri->rp = NULL;
837                 hlist_del(&ri->uflist);
838         }
839
840         spin_unlock_irqrestore(&uretprobe_lock, flags);
841         free_urp_inst(rp);
842 }
843 EXPORT_SYMBOL_GPL(__dbi_unregister_uretprobe);
844
845 void dbi_unregister_uretprobe(struct uretprobe *rp)
846 {
847         __dbi_unregister_uretprobe(rp, 1);
848 }
849
850 void dbi_unregister_all_uprobes(struct task_struct *task)
851 {
852         struct hlist_head *head;
853         struct kprobe *p;
854         int i;
855         struct hlist_node *tnode;
856         DECLARE_NODE_PTR_FOR_HLIST(node);
857
858         for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
859                 head = &uprobe_table[i];
860                 swap_hlist_for_each_entry_safe(p, node, tnode, head, hlist) {
861                         if (kp2up(p)->task->tgid == task->tgid) {
862                                 struct uprobe *up = container_of(p, struct uprobe, kp);
863                                 printk("dbi_unregister_all_uprobes: delete uprobe at %p[%lx] for %s/%d\n",
864                                                 p->addr, (unsigned long)p->opcode, task->comm, task->pid);
865                                 dbi_unregister_uprobe(up);
866                         }
867                 }
868         }
869 }
870
871 void swap_ujprobe_return(void)
872 {
873         arch_ujprobe_return();
874 }
875 EXPORT_SYMBOL_GPL(swap_ujprobe_return);
876
877 static int __init init_uprobes(void)
878 {
879         init_uprobe_table();
880         init_uprobes_insn_slots();
881         init_uretprobe_inst_table();
882
883         return swap_arch_init_uprobes();
884 }
885
886 static void __exit exit_uprobes(void)
887 {
888         swap_arch_exit_uprobes();
889 }
890
891 EXPORT_SYMBOL_GPL(dbi_register_ujprobe);
892 EXPORT_SYMBOL_GPL(dbi_unregister_ujprobe);
893 EXPORT_SYMBOL_GPL(dbi_register_uretprobe);
894 EXPORT_SYMBOL_GPL(dbi_unregister_uretprobe);
895 EXPORT_SYMBOL_GPL(dbi_unregister_all_uprobes);
896
897 module_init(init_uprobes);
898 module_exit(exit_uprobes);
899
900 MODULE_LICENSE ("GPL");