[FIX] correct allocation uretprobe_instance struct
[kernel/swap-modules.git] / uprobe / swap_uprobes.c
1 /*
2  *  Dynamic Binary Instrumentation Module based on KProbes
3  *  modules/uprobe/swap_uprobes.h
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18  *
19  * Copyright (C) Samsung Electronics, 2006-2010
20  *
21  * 2008-2009    Alexey Gerenkov <a.gerenkov@samsung.com> User-Space
22  *              Probes initial implementation; Support x86/ARM/MIPS for both user and kernel spaces.
23  * 2010         Ekaterina Gorelkina <e.gorelkina@samsung.com>: redesign module for separating core and arch parts
24  *
25  */
26
27
28 #include "swap_uprobes.h"
29 #include <kprobe/dbi_kdebug.h>
30
31 #include <uprobe/arch/asm/swap_uprobes.h>
32
33 #include <linux/hash.h>
34 #include <linux/mempolicy.h>
35 #include <linux/module.h>
36 #include <kprobe/dbi_insn_slots.h>
37 #include <kprobe/dbi_kprobes_deps.h>
38
39 enum {
40         UPROBE_HASH_BITS  = 10,
41         UPROBE_TABLE_SIZE = (1 << UPROBE_HASH_BITS)
42 };
43
44 struct hlist_head uprobe_insn_slot_table[UPROBE_TABLE_SIZE];
45 struct hlist_head uprobe_table[UPROBE_TABLE_SIZE];
46
47 DEFINE_SPINLOCK(uretprobe_lock);        /* Protects uretprobe_inst_table */
48 static struct hlist_head uretprobe_inst_table[UPROBE_TABLE_SIZE];
49
50 #define DEBUG_PRINT_HASH_TABLE 0
51
52 #if DEBUG_PRINT_HASH_TABLE
53 void print_kprobe_hash_table(void)
54 {
55         int i;
56         struct hlist_head *head;
57         struct kprobe *p;
58         DECLARE_NODE_PTR_FOR_HLIST(node);
59
60         // print uprobe table
61         for (i = 0; i < KPROBE_TABLE_SIZE; ++i) {
62                 head = &kprobe_table[i];
63                 swap_hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
64                         printk("####### find K tgid=%u, addr=%x\n",
65                                         p->tgid, p->addr);
66                 }
67         }
68 }
69
70 void print_kretprobe_hash_table(void)
71 {
72         int i;
73         struct hlist_head *head;
74         struct kprobe *p;
75         DECLARE_NODE_PTR_FOR_HLIST(node);
76
77         // print uprobe table
78         for (i = 0; i < KPROBE_TABLE_SIZE; ++i) {
79                 head = &kretprobe_inst_table[i];
80                 swap_hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
81                         printk("####### find KR tgid=%u, addr=%x\n",
82                                         p->tgid, p->addr);
83                 }
84         }
85 }
86
87 void print_uprobe_hash_table(void)
88 {
89         int i;
90         struct hlist_head *head;
91         struct kprobe *p;
92         DECLARE_NODE_PTR_FOR_HLIST(node);
93
94         // print uprobe table
95         for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
96                 head = &uprobe_insn_slot_table[i];
97                 swap_hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
98                         printk("####### find U tgid=%u, addr=%x\n",
99                                         p->tgid, p->addr);
100                 }
101         }
102 }
103 #endif
104
105 /*
106  * Keep all fields in the uprobe consistent
107  */
108 static inline void copy_uprobe(struct kprobe *old_p, struct kprobe *p)
109 {
110         memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
111         memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
112         p->ss_addr = old_p->ss_addr;
113 #ifdef CONFIG_ARM
114         p->safe_arm = old_p->safe_arm;
115         p->safe_thumb = old_p->safe_thumb;
116 #endif
117 }
118
119 /*
120  * Aggregate handlers for multiple uprobes support - these handlers
121  * take care of invoking the individual uprobe handlers on p->list
122  */
123 static int aggr_pre_uhandler(struct kprobe *p, struct pt_regs *regs)
124 {
125         struct kprobe *kp;
126         int ret;
127
128         list_for_each_entry_rcu(kp, &p->list, list) {
129                 if (kp->pre_handler) {
130                         ret = kp->pre_handler(kp, regs);
131                         if (ret) {
132                                 return ret;
133                         }
134                 }
135         }
136
137         return 0;
138 }
139
140 static void aggr_post_uhandler(struct kprobe *p, struct pt_regs *regs, unsigned long flags)
141 {
142         struct kprobe *kp;
143
144         list_for_each_entry_rcu(kp, &p->list, list) {
145                 if (kp->post_handler) {
146                         kp->post_handler(kp, regs, flags);
147                 }
148         }
149 }
150
151 static int aggr_fault_uhandler(struct kprobe *p, struct pt_regs *regs, int trapnr)
152 {
153         return 0;
154 }
155
156 static int aggr_break_uhandler(struct kprobe *p, struct pt_regs *regs)
157 {
158         return 0;
159 }
160
161 /*
162  * Add the new probe to old_p->list. Fail if this is the
163  * second ujprobe at the address - two ujprobes can't coexist
164  */
165 static int add_new_uprobe(struct kprobe *old_p, struct kprobe *p)
166 {
167         if (p->break_handler) {
168                 if (old_p->break_handler) {
169                         return -EEXIST;
170                 }
171
172                 list_add_tail_rcu(&p->list, &old_p->list);
173                 old_p->break_handler = aggr_break_uhandler;
174         } else {
175                 list_add_rcu (&p->list, &old_p->list);
176         }
177
178         if (p->post_handler && !old_p->post_handler) {
179                 old_p->post_handler = aggr_post_uhandler;
180         }
181
182         return 0;
183 }
184
185 /*
186  * Fill in the required fields of the "manager uprobe". Replace the
187  * earlier uprobe in the hlist with the manager uprobe
188  */
189 static inline void add_aggr_uprobe(struct kprobe *ap, struct kprobe *p)
190 {
191         copy_uprobe(p, ap);
192
193         ap->addr = p->addr;
194         ap->pre_handler = aggr_pre_uhandler;
195         ap->fault_handler = aggr_fault_uhandler;
196
197         if (p->post_handler) {
198                 ap->post_handler = aggr_post_uhandler;
199         }
200
201         if (p->break_handler) {
202                 ap->break_handler = aggr_break_uhandler;
203         }
204
205         INIT_LIST_HEAD(&ap->list);
206         list_add_rcu(&p->list, &ap->list);
207
208         hlist_replace_rcu(&p->hlist, &ap->hlist);
209 }
210
211 /*
212  * This is the second or subsequent uprobe at the address - handle
213  * the intricacies
214  */
215 static int register_aggr_uprobe(struct kprobe *old_p, struct kprobe *p)
216 {
217         int ret = 0;
218         struct kprobe *ap;
219
220         if (old_p->pre_handler == aggr_pre_uhandler) {
221                 copy_uprobe(old_p, p);
222                 ret = add_new_uprobe(old_p, p);
223         } else {
224                 struct uprobe *uap = kzalloc(sizeof(*uap), GFP_KERNEL);
225                 if (!uap) {
226                         return -ENOMEM;
227                 }
228
229                 uap->task = kp2up(p)->task;
230                 ap = up2kp(uap);
231                 add_aggr_uprobe(ap, old_p);
232                 copy_uprobe(ap, p);
233                 ret = add_new_uprobe(ap, p);
234         }
235
236         return ret;
237 }
238
239 static void arm_uprobe(struct uprobe *p)
240 {
241         kprobe_opcode_t insn = BREAKPOINT_INSTRUCTION;
242         int ret = write_proc_vm_atomic(p->task, (unsigned long)p->kp.addr,
243                                        &insn, sizeof(insn));
244         if (!ret) {
245                 panic("arm_uprobe: failed to write memory "
246                       "tgid=%u addr=%p!\n", p->task->tgid, p->kp.addr);
247         }
248 }
249
250 void disarm_uprobe(struct kprobe *p, struct task_struct *task)
251 {
252         int ret = write_proc_vm_atomic(task, (unsigned long)p->addr,
253                                        &p->opcode, sizeof(p->opcode));
254         if (!ret) {
255                 panic("disarm_uprobe: failed to write memory "
256                       "tgid=%u, addr=%p!\n", task->tgid, p->addr);
257         }
258 }
259 EXPORT_SYMBOL_GPL(disarm_uprobe);
260
261 static void init_uprobes_insn_slots(void)
262 {
263         int i;
264         for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
265                 INIT_HLIST_HEAD(&uprobe_insn_slot_table[i]);
266         }
267 }
268
269 static void init_uprobe_table(void)
270 {
271         int i;
272         for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
273                 INIT_HLIST_HEAD(&uprobe_table[i]);
274         }
275 }
276
277 static void init_uretprobe_inst_table(void)
278 {
279         int i;
280         for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
281                 INIT_HLIST_HEAD (&uretprobe_inst_table[i]);
282         }
283 }
284
285 struct kprobe *get_ukprobe(void *addr, pid_t tgid)
286 {
287         struct hlist_head *head;
288         struct kprobe *p;
289         DECLARE_NODE_PTR_FOR_HLIST(node);
290
291         head = &uprobe_table[hash_ptr(addr, UPROBE_HASH_BITS)];
292         swap_hlist_for_each_entry_rcu(p, node, head, hlist) {
293                 if (p->addr == addr && kp2up(p)->task->tgid == tgid) {
294                         return p;
295                 }
296         }
297
298         return NULL;
299 }
300
301 void add_uprobe_table(struct kprobe *p)
302 {
303         INIT_HLIST_NODE(&p->is_hlist);
304         hlist_add_head_rcu(&p->is_hlist, &uprobe_insn_slot_table[hash_ptr(p->ainsn.insn, UPROBE_HASH_BITS)]);
305 }
306
307 struct kprobe *get_ukprobe_by_insn_slot(void *addr, pid_t tgid, struct pt_regs *regs)
308 {
309         struct hlist_head *head;
310         struct kprobe *p;
311         DECLARE_NODE_PTR_FOR_HLIST(node);
312
313         /* TODO: test - two processes invokes instrumented function */
314         head = &uprobe_insn_slot_table[hash_ptr(addr, UPROBE_HASH_BITS)];
315         swap_hlist_for_each_entry_rcu(p, node, head, is_hlist) {
316                 if (p->ainsn.insn == addr && kp2up(p)->task->tgid == tgid) {
317                         return p;
318                 }
319         }
320
321         return NULL;
322 }
323
324
325 static void remove_uprobe(struct uprobe *up)
326 {
327         struct kprobe *p = up2kp(up);
328
329         free_insn_slot(up->sm, p->ainsn.insn);
330 }
331
332 static struct hlist_head *uretprobe_inst_table_head(void *hash_key)
333 {
334         return &uretprobe_inst_table[hash_ptr (hash_key, UPROBE_HASH_BITS)];
335 }
336
337 /* Called with uretprobe_lock held */
338 static void add_urp_inst(struct uretprobe_instance *ri)
339 {
340         /*
341          * Remove rp inst off the free list -
342          * Add it back when probed function returns
343          */
344         hlist_del(&ri->uflist);
345
346         /* Add rp inst onto table */
347         INIT_HLIST_NODE(&ri->hlist);
348         hlist_add_head(&ri->hlist, uretprobe_inst_table_head(ri->task->mm));
349
350         /* Also add this rp inst to the used list. */
351         INIT_HLIST_NODE(&ri->uflist);
352         hlist_add_head(&ri->uflist, &ri->rp->used_instances);
353 }
354
355 /* Called with uretprobe_lock held */
356 static void recycle_urp_inst(struct uretprobe_instance *ri)
357 {
358         if (ri->rp) {
359                 hlist_del(&ri->hlist);
360                 /* remove rp inst off the used list */
361                 hlist_del(&ri->uflist);
362                 /* put rp inst back onto the free list */
363                 INIT_HLIST_NODE(&ri->uflist);
364                 hlist_add_head(&ri->uflist, &ri->rp->free_instances);
365         }
366 }
367
368 /* Called with uretprobe_lock held */
369 static struct uretprobe_instance *get_used_urp_inst(struct uretprobe *rp)
370 {
371         struct uretprobe_instance *ri;
372         DECLARE_NODE_PTR_FOR_HLIST(node);
373
374         swap_hlist_for_each_entry(ri, node, &rp->used_instances, uflist) {
375                 return ri;
376         }
377
378         return NULL;
379 }
380
381 /* Called with uretprobe_lock held */
382 struct uretprobe_instance *get_free_urp_inst_no_alloc(struct uretprobe *rp)
383 {
384         struct uretprobe_instance *ri;
385         DECLARE_NODE_PTR_FOR_HLIST(node);
386
387         swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
388                 return ri;
389         }
390
391         return NULL;
392 }
393
394 /* Called with uretprobe_lock held */
395 static void free_urp_inst(struct uretprobe *rp)
396 {
397         struct uretprobe_instance *ri;
398         while ((ri = get_free_urp_inst_no_alloc(rp)) != NULL) {
399                 hlist_del(&ri->uflist);
400                 kfree(ri);
401         }
402 }
403
404 #define COMMON_URP_NR 10
405
406 static int alloc_nodes_uretprobe(struct uretprobe *rp)
407 {
408         int alloc_nodes;
409         struct uretprobe_instance *inst;
410         int i;
411
412 #if 1//def CONFIG_PREEMPT
413         rp->maxactive += max(COMMON_URP_NR, 2 * NR_CPUS);
414 #else
415         rp->maxacpptive += NR_CPUS;
416 #endif
417         alloc_nodes = COMMON_URP_NR;
418
419         for (i = 0; i < alloc_nodes; ++i) {
420                 inst = kmalloc(sizeof(*inst), GFP_ATOMIC);
421                 if (inst == NULL) {
422                         free_urp_inst(rp);
423                         return -ENOMEM;
424                 }
425                 INIT_HLIST_NODE(&inst->uflist);
426                 hlist_add_head(&inst->uflist, &rp->free_instances);
427         }
428
429         return 0;
430 }
431
432 /* Called with uretprobe_lock held */
433 static struct uretprobe_instance *get_free_urp_inst(struct uretprobe *rp)
434 {
435         struct uretprobe_instance *ri;
436         DECLARE_NODE_PTR_FOR_HLIST(node);
437
438         swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
439                 return ri;
440         }
441
442         if (!alloc_nodes_uretprobe(rp)) {
443                 swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
444                         return ri;
445                 }
446         }
447
448         return NULL;
449 }
450 // ===================================================================
451
452 int dbi_register_uprobe(struct uprobe *up)
453 {
454         int ret = 0;
455         struct kprobe *p, *old_p;
456
457         p = &up->kp;
458         if (!p->addr) {
459                 return -EINVAL;
460         }
461
462         DBPRINTF("p->addr = 0x%p p = 0x%p\n", p->addr, p);
463
464 // thumb address = address-1;
465 #if defined(CONFIG_ARM)
466         // TODO: must be corrected in 'bundle'
467         if ((unsigned long) p->addr & 0x01) {
468                 p->addr = (kprobe_opcode_t *)((unsigned long)p->addr & 0xfffffffe);
469         }
470 #endif
471
472         p->ainsn.insn = NULL;
473         p->mod_refcounted = 0;
474         p->nmissed = 0;
475         INIT_LIST_HEAD(&p->list);
476 #ifdef KPROBES_PROFILE
477         p->start_tm.tv_sec = p->start_tm.tv_usec = 0;
478         p->hnd_tm_sum.tv_sec = p->hnd_tm_sum.tv_usec = 0;
479         p->count = 0;
480 #endif
481
482         // get the first item
483         old_p = get_ukprobe(p->addr, kp2up(p)->task->tgid);
484         if (old_p) {
485 #ifdef CONFIG_ARM
486                 p->safe_arm = old_p->safe_arm;
487                 p->safe_thumb = old_p->safe_thumb;
488 #endif
489                 ret = register_aggr_uprobe(old_p, p);
490                 DBPRINTF("goto out\n", ret);
491                 goto out;
492         }
493
494         ret = arch_prepare_uprobe(up);
495         if (ret) {
496                 DBPRINTF("goto out\n", ret);
497                 goto out;
498         }
499
500         DBPRINTF ("before out ret = 0x%x\n", ret);
501
502         // TODO: add uprobe (must be in function)
503         INIT_HLIST_NODE(&p->hlist);
504         hlist_add_head_rcu(&p->hlist, &uprobe_table[hash_ptr(p->addr, UPROBE_HASH_BITS)]);
505         arm_uprobe(up);
506
507 out:
508         DBPRINTF("out ret = 0x%x\n", ret);
509         return ret;
510 }
511
512 void __dbi_unregister_uprobe(struct uprobe *up, int disarm)
513 {
514         struct kprobe *p, *old_p, *list_p;
515         int cleanup_p;
516
517         p = &up->kp;
518         old_p = get_ukprobe(p->addr, kp2up(p)->task->tgid);
519         if (unlikely(!old_p)) {
520                 return;
521         }
522
523         if (p != old_p) {
524                 list_for_each_entry_rcu(list_p, &old_p->list, list) {
525                         if (list_p == p) {
526                                 /* uprobe p is a valid probe */
527                                 goto valid_p;
528                         }
529                 }
530
531                 return;
532         }
533
534 valid_p:
535         if ((old_p == p) || ((old_p->pre_handler == aggr_pre_uhandler) &&
536             (p->list.next == &old_p->list) && (p->list.prev == &old_p->list))) {
537                 /* Only probe on the hash list */
538                 if (disarm)
539                         disarm_uprobe(&up->kp, up->task);
540
541                 hlist_del_rcu(&old_p->hlist);
542                 cleanup_p = 1;
543         } else {
544                 list_del_rcu(&p->list);
545                 cleanup_p = 0;
546         }
547
548         if (cleanup_p) {
549                 if (p != old_p) {
550                         list_del_rcu(&p->list);
551                         kfree(old_p);
552                 }
553
554                 if (!in_atomic()) {
555                         synchronize_sched();
556                 }
557
558                 remove_uprobe(up);
559         } else {
560                 if (p->break_handler) {
561                         old_p->break_handler = NULL;
562                 }
563
564                 if (p->post_handler) {
565                         list_for_each_entry_rcu (list_p, &old_p->list, list) {
566                                 if (list_p->post_handler) {
567                                         cleanup_p = 2;
568                                         break;
569                                 }
570                         }
571
572                         if (cleanup_p == 0) {
573                                 old_p->post_handler = NULL;
574                         }
575                 }
576         }
577 }
578 EXPORT_SYMBOL_GPL(__dbi_unregister_uprobe);
579
580 void dbi_unregister_uprobe(struct uprobe *up)
581 {
582         __dbi_unregister_uprobe(up, 1);
583 }
584
585 int dbi_register_ujprobe(struct ujprobe *jp)
586 {
587         int ret = 0;
588
589         /* Todo: Verify probepoint is a function entry point */
590         jp->up.kp.pre_handler = setjmp_upre_handler;
591         jp->up.kp.break_handler = longjmp_break_uhandler;
592
593         ret = dbi_register_uprobe(&jp->up);
594
595         return ret;
596 }
597
598 void __dbi_unregister_ujprobe(struct ujprobe *jp, int disarm)
599 {
600         __dbi_unregister_uprobe(&jp->up, disarm);
601         /*
602          * Here is an attempt to unregister even those probes that have not been
603          * installed (hence not added to the hlist).
604          * So if we try to delete them from the hlist we will get NULL pointer
605          * dereference error. That is why we check whether this node
606          * really belongs to the hlist.
607          */
608         if (!(hlist_unhashed(&jp->up.kp.is_hlist))) {
609                 hlist_del_rcu(&jp->up.kp.is_hlist);
610         }
611 }
612 EXPORT_SYMBOL_GPL(__dbi_unregister_ujprobe);
613
614 void dbi_unregister_ujprobe(struct ujprobe *jp)
615 {
616         __dbi_unregister_ujprobe(jp, 1);
617 }
618
619 int trampoline_uprobe_handler(struct kprobe *p, struct pt_regs *regs)
620 {
621         struct uretprobe_instance *ri = NULL;
622         struct hlist_head *head;
623         unsigned long flags, tramp_addr, orig_ret_addr = 0;
624         struct hlist_node *tmp;
625         DECLARE_NODE_PTR_FOR_HLIST(node);
626
627         tramp_addr = arch_get_trampoline_addr(p, regs);
628         spin_lock_irqsave(&uretprobe_lock, flags);
629
630         head = uretprobe_inst_table_head(current->mm);
631
632         /*
633          * It is possible to have multiple instances associated with a given
634          * task either because an multiple functions in the call path
635          * have a return probe installed on them, and/or more then one
636          * return probe was registered for a target function.
637          *
638          * We can handle this because:
639          *     - instances are always inserted at the head of the list
640          *     - when multiple return probes are registered for the same
641          *       function, the first instance's ret_addr will point to the
642          *       real return address, and all the rest will point to
643          *       uretprobe_trampoline
644          */
645         swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
646                 if (ri->task != current) {
647                         /* another task is sharing our hash bucket */
648                         continue;
649                 }
650
651                 if (ri->rp && ri->rp->handler) {
652                         ri->rp->handler(ri, regs);
653                 }
654
655                 orig_ret_addr = (unsigned long)ri->ret_addr;
656                 recycle_urp_inst(ri);
657
658                 if (orig_ret_addr != tramp_addr) {
659                         /*
660                          * This is the real return address. Any other
661                          * instances associated with this task are for
662                          * other calls deeper on the call stack
663                          */
664                         break;
665                 }
666         }
667
668         spin_unlock_irqrestore(&uretprobe_lock, flags);
669         arch_set_orig_ret_addr(orig_ret_addr, regs);
670
671         return 1;
672 }
673
674 static int pre_handler_uretprobe(struct kprobe *p, struct pt_regs *regs)
675 {
676         struct uprobe *up = container_of(p, struct uprobe, kp);
677         struct uretprobe *rp = container_of(up, struct uretprobe, up);
678 #ifdef CONFIG_ARM
679         int noret = thumb_mode(regs) ? rp->thumb_noret : rp->arm_noret;
680 #endif
681         struct uretprobe_instance *ri;
682         unsigned long flags;
683
684 #ifdef CONFIG_ARM
685         if (noret)
686                 return 0;
687 #endif
688
689         /* TODO: consider to only swap the RA after the last pre_handler fired */
690         spin_lock_irqsave(&uretprobe_lock, flags);
691
692         /* TODO: test - remove retprobe after func entry but before its exit */
693         if ((ri = get_free_urp_inst(rp)) != NULL) {
694                 ri->rp = rp;
695                 ri->task = current;
696
697                 if (rp->entry_handler)
698                         rp->entry_handler(ri, regs);
699
700                 arch_prepare_uretprobe(ri, regs);
701
702                 add_urp_inst(ri);
703         } else {
704                 ++rp->nmissed;
705         }
706
707         spin_unlock_irqrestore(&uretprobe_lock, flags);
708
709         return 0;
710 }
711
712 int dbi_register_uretprobe(struct uretprobe *rp)
713 {
714         int i, ret = 0;
715         struct uretprobe_instance *inst;
716
717         DBPRINTF ("START\n");
718
719         rp->up.kp.pre_handler = pre_handler_uretprobe;
720         rp->up.kp.post_handler = NULL;
721         rp->up.kp.fault_handler = NULL;
722         rp->up.kp.break_handler = NULL;
723
724         /* Pre-allocate memory for max kretprobe instances */
725         if (rp->maxactive <= 0) {
726 #if 1//def CONFIG_PREEMPT
727                 rp->maxactive = max(10, 2 * NR_CPUS);
728 #else
729                 rp->maxactive = NR_CPUS;
730 #endif
731         }
732
733         INIT_HLIST_HEAD(&rp->used_instances);
734         INIT_HLIST_HEAD(&rp->free_instances);
735
736         for (i = 0; i < rp->maxactive; i++) {
737                 inst = kmalloc(sizeof(*inst), GFP_ATOMIC);
738                 if (inst == NULL) {
739                         free_urp_inst(rp);
740                         return -ENOMEM;
741                 }
742
743                 INIT_HLIST_NODE(&inst->uflist);
744                 hlist_add_head(&inst->uflist, &rp->free_instances);
745         }
746
747         rp->nmissed = 0;
748
749         /* Establish function entry probe point */
750         ret = dbi_register_uprobe(&rp->up);
751         if (ret)
752                 return ret;
753
754         arch_opcode_analysis_uretprobe(rp);
755
756         return 0;
757 }
758
759 /* Called with uretprobe_lock held */
760 int dbi_disarm_urp_inst_for_task(struct task_struct *parent, struct task_struct *task)
761 {
762         struct uretprobe_instance *ri;
763         struct hlist_head *head = uretprobe_inst_table_head(parent->mm);
764         struct hlist_node *tmp;
765         DECLARE_NODE_PTR_FOR_HLIST(node);
766
767         swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
768                 if (parent == ri->task) {
769                         arch_disarm_urp_inst(ri, task);
770                 }
771         }
772
773         return 0;
774 }
775 EXPORT_SYMBOL_GPL(dbi_disarm_urp_inst_for_task);
776
777 void __dbi_unregister_uretprobe(struct uretprobe *rp, int disarm)
778 {
779         unsigned long flags;
780         struct uretprobe_instance *ri;
781
782         __dbi_unregister_uprobe(&rp->up, disarm);
783         spin_lock_irqsave (&uretprobe_lock, flags);
784
785         while ((ri = get_used_urp_inst(rp)) != NULL) {
786                 if (arch_disarm_urp_inst(ri, ri->task) != 0)
787                         printk("%s (%d/%d): cannot disarm urp instance (%08lx)\n",
788                                         ri->task->comm, ri->task->tgid, ri->task->pid,
789                                         (unsigned long)rp->up.kp.addr);
790                 recycle_urp_inst(ri);
791         }
792
793         if (hlist_empty(&rp->used_instances)) {
794                 struct kprobe *p = &rp->up.kp;
795
796                 if (!(hlist_unhashed(&p->is_hlist))) {
797                         hlist_del_rcu(&p->is_hlist);
798                 }
799         }
800
801         while ((ri = get_used_urp_inst(rp)) != NULL) {
802                 ri->rp = NULL;
803                 hlist_del(&ri->uflist);
804         }
805
806         spin_unlock_irqrestore(&uretprobe_lock, flags);
807         free_urp_inst(rp);
808 }
809 EXPORT_SYMBOL_GPL(__dbi_unregister_uretprobe);
810
811 void dbi_unregister_uretprobe(struct uretprobe *rp)
812 {
813         __dbi_unregister_uretprobe(rp, 1);
814 }
815
816 void dbi_unregister_all_uprobes(struct task_struct *task)
817 {
818         struct hlist_head *head;
819         struct kprobe *p;
820         int i;
821         struct hlist_node *tnode;
822         DECLARE_NODE_PTR_FOR_HLIST(node);
823
824         for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
825                 head = &uprobe_table[i];
826                 swap_hlist_for_each_entry_safe(p, node, tnode, head, hlist) {
827                         if (kp2up(p)->task->tgid == task->tgid) {
828                                 struct uprobe *up = container_of(p, struct uprobe, kp);
829                                 printk("dbi_unregister_all_uprobes: delete uprobe at %p[%lx] for %s/%d\n",
830                                                 p->addr, (unsigned long)p->opcode, task->comm, task->pid);
831                                 dbi_unregister_uprobe(up);
832                         }
833                 }
834         }
835 }
836
837 void swap_ujprobe_return(void)
838 {
839         arch_ujprobe_return();
840 }
841 EXPORT_SYMBOL_GPL(swap_ujprobe_return);
842
843 static int __init init_uprobes(void)
844 {
845         init_uprobe_table();
846         init_uprobes_insn_slots();
847         init_uretprobe_inst_table();
848
849         return swap_arch_init_uprobes();
850 }
851
852 static void __exit exit_uprobes(void)
853 {
854         swap_arch_exit_uprobes();
855 }
856
857 EXPORT_SYMBOL_GPL(dbi_register_ujprobe);
858 EXPORT_SYMBOL_GPL(dbi_unregister_ujprobe);
859 EXPORT_SYMBOL_GPL(dbi_register_uretprobe);
860 EXPORT_SYMBOL_GPL(dbi_unregister_uretprobe);
861 EXPORT_SYMBOL_GPL(dbi_unregister_all_uprobes);
862
863 module_init(init_uprobes);
864 module_exit(exit_uprobes);
865
866 MODULE_LICENSE ("GPL");