[IMPROVE] x86: apply jumper for US probes installing
[kernel/swap-modules.git] / uprobe / swap_uprobes.c
1 /**
2  * uprobe/swap_uprobes.c
3  * @author Alexey Gerenkov <a.gerenkov@samsung.com> User-Space Probes initial
4  * implementation; Support x86/ARM/MIPS for both user and kernel spaces.
5  * @author Ekaterina Gorelkina <e.gorelkina@samsung.com>: redesign module for
6  * separating core and arch parts
7  *
8  * @section LICENSE
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23  *
24  * @section COPYRIGHT
25  *
26  * Copyright (C) Samsung Electronics, 2006-2010
27  *
28  * @section DESCRIPTION
29  *
30  * Uprobes implementation.
31  */
32
33
34 #include <linux/hash.h>
35 #include <linux/mempolicy.h>
36 #include <linux/module.h>
37
38 #include <kprobe/swap_slots.h>
39 #include <kprobe/swap_kdebug.h>
40 #include <kprobe/swap_kprobes_deps.h>
41
42 #include <swap-asm/swap_uprobes.h>
43
44 #include "swap_uprobes.h"
45
46
47 enum {
48         UPROBE_HASH_BITS  = 10,
49         UPROBE_TABLE_SIZE = (1 << UPROBE_HASH_BITS)
50 };
51
52 struct hlist_head uprobe_insn_slot_table[UPROBE_TABLE_SIZE];
53 struct hlist_head uprobe_table[UPROBE_TABLE_SIZE];
54
55 DEFINE_SPINLOCK(uretprobe_lock);        /* Protects uretprobe_inst_table */
56 static struct hlist_head uretprobe_inst_table[UPROBE_TABLE_SIZE];
57
58 #define DEBUG_PRINT_HASH_TABLE 0
59
60 #if DEBUG_PRINT_HASH_TABLE
61 void print_uprobe_hash_table(void)
62 {
63         int i;
64         struct hlist_head *head;
65         struct kprobe *p;
66         DECLARE_NODE_PTR_FOR_HLIST(node);
67
68         // print uprobe table
69         for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
70                 head = &uprobe_insn_slot_table[i];
71                 swap_hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
72                         printk("####### find U tgid=%u, addr=%x\n",
73                                         p->tgid, p->addr);
74                 }
75         }
76 }
77 #endif
78
79 /*
80  * Keep all fields in the uprobe consistent
81  */
82 static inline void copy_uprobe(struct kprobe *old_p, struct kprobe *p)
83 {
84         memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
85         memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
86 #ifdef CONFIG_ARM
87         p->safe_arm = old_p->safe_arm;
88         p->safe_thumb = old_p->safe_thumb;
89 #endif
90 }
91
92 /*
93  * Aggregate handlers for multiple uprobes support - these handlers
94  * take care of invoking the individual uprobe handlers on p->list
95  */
96 static int aggr_pre_uhandler(struct kprobe *p, struct pt_regs *regs)
97 {
98         struct kprobe *kp;
99         int ret;
100
101         list_for_each_entry_rcu(kp, &p->list, list) {
102                 if (kp->pre_handler) {
103                         ret = kp->pre_handler(kp, regs);
104                         if (ret) {
105                                 return ret;
106                         }
107                 }
108         }
109
110         return 0;
111 }
112
113 static void aggr_post_uhandler(struct kprobe *p, struct pt_regs *regs, unsigned long flags)
114 {
115         struct kprobe *kp;
116
117         list_for_each_entry_rcu(kp, &p->list, list) {
118                 if (kp->post_handler) {
119                         kp->post_handler(kp, regs, flags);
120                 }
121         }
122 }
123
124 static int aggr_fault_uhandler(struct kprobe *p, struct pt_regs *regs, int trapnr)
125 {
126         return 0;
127 }
128
129 static int aggr_break_uhandler(struct kprobe *p, struct pt_regs *regs)
130 {
131         return 0;
132 }
133
134 /*
135  * Add the new probe to old_p->list. Fail if this is the
136  * second ujprobe at the address - two ujprobes can't coexist
137  */
138 static int add_new_uprobe(struct kprobe *old_p, struct kprobe *p)
139 {
140         if (p->break_handler) {
141                 if (old_p->break_handler) {
142                         return -EEXIST;
143                 }
144
145                 list_add_tail_rcu(&p->list, &old_p->list);
146                 old_p->break_handler = aggr_break_uhandler;
147         } else {
148                 list_add_rcu (&p->list, &old_p->list);
149         }
150
151         if (p->post_handler && !old_p->post_handler) {
152                 old_p->post_handler = aggr_post_uhandler;
153         }
154
155         return 0;
156 }
157
158 /*
159  * Fill in the required fields of the "manager uprobe". Replace the
160  * earlier uprobe in the hlist with the manager uprobe
161  */
162 static inline void add_aggr_uprobe(struct kprobe *ap, struct kprobe *p)
163 {
164         copy_uprobe(p, ap);
165
166         ap->addr = p->addr;
167         ap->pre_handler = aggr_pre_uhandler;
168         ap->fault_handler = aggr_fault_uhandler;
169
170         if (p->post_handler) {
171                 ap->post_handler = aggr_post_uhandler;
172         }
173
174         if (p->break_handler) {
175                 ap->break_handler = aggr_break_uhandler;
176         }
177
178         INIT_LIST_HEAD(&ap->list);
179         list_add_rcu(&p->list, &ap->list);
180
181         hlist_replace_rcu(&p->hlist, &ap->hlist);
182 }
183
184 /*
185  * This is the second or subsequent uprobe at the address - handle
186  * the intricacies
187  */
188 static int register_aggr_uprobe(struct kprobe *old_p, struct kprobe *p)
189 {
190         int ret = 0;
191         struct kprobe *ap;
192
193         if (old_p->pre_handler == aggr_pre_uhandler) {
194                 copy_uprobe(old_p, p);
195                 ret = add_new_uprobe(old_p, p);
196         } else {
197                 struct uprobe *uap = kzalloc(sizeof(*uap), GFP_KERNEL);
198                 if (!uap) {
199                         return -ENOMEM;
200                 }
201
202                 uap->task = kp2up(p)->task;
203                 ap = up2kp(uap);
204                 add_aggr_uprobe(ap, old_p);
205                 copy_uprobe(ap, p);
206                 ret = add_new_uprobe(ap, p);
207         }
208
209         return ret;
210 }
211
212 static void arm_uprobe(struct uprobe *p)
213 {
214         kprobe_opcode_t insn = BREAKPOINT_INSTRUCTION;
215         int ret = write_proc_vm_atomic(p->task, (unsigned long)p->kp.addr,
216                                        &insn, sizeof(insn));
217         if (!ret) {
218                 panic("arm_uprobe: failed to write memory "
219                       "tgid=%u addr=%p!\n", p->task->tgid, p->kp.addr);
220         }
221 }
222
223 /**
224  * @brief Disarms uprobe.
225  *
226  * @param p Pointer to the uprobe's kprobe.
227  * @param task Pointer to the target task.
228  * @return Void.
229  */
230 void disarm_uprobe(struct kprobe *p, struct task_struct *task)
231 {
232         int ret = write_proc_vm_atomic(task, (unsigned long)p->addr,
233                                        &p->opcode, sizeof(p->opcode));
234         if (!ret) {
235                 panic("disarm_uprobe: failed to write memory "
236                       "tgid=%u, addr=%p!\n", task->tgid, p->addr);
237         }
238 }
239 EXPORT_SYMBOL_GPL(disarm_uprobe);
240
241 static void init_uprobes_insn_slots(void)
242 {
243         int i;
244         for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
245                 INIT_HLIST_HEAD(&uprobe_insn_slot_table[i]);
246         }
247 }
248
249 static void init_uprobe_table(void)
250 {
251         int i;
252         for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
253                 INIT_HLIST_HEAD(&uprobe_table[i]);
254         }
255 }
256
257 static void init_uretprobe_inst_table(void)
258 {
259         int i;
260         for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
261                 INIT_HLIST_HEAD (&uretprobe_inst_table[i]);
262         }
263 }
264
265 /**
266  * @brief Gets uprobe's kprobe.
267  *
268  * @param addr Probe's address.
269  * @param tgid Probes's thread group ID.
270  * @return Pointer to the kprobe on success,\n
271  * NULL otherwise.
272  */
273 struct kprobe *get_ukprobe(void *addr, pid_t tgid)
274 {
275         struct hlist_head *head;
276         struct kprobe *p;
277         DECLARE_NODE_PTR_FOR_HLIST(node);
278
279         head = &uprobe_table[hash_ptr(addr, UPROBE_HASH_BITS)];
280         swap_hlist_for_each_entry_rcu(p, node, head, hlist) {
281                 if (p->addr == addr && kp2up(p)->task->tgid == tgid) {
282                         return p;
283                 }
284         }
285
286         return NULL;
287 }
288
289 /**
290  * @brief Adds uprobe to hlist when trampoline have been made.
291  *
292  * @param p Pointer to the uprobe's kprobe.
293  * @return Void.
294  */
295 void add_uprobe_table(struct kprobe *p)
296 {
297         INIT_HLIST_NODE(&p->is_hlist);
298         hlist_add_head_rcu(&p->is_hlist, &uprobe_insn_slot_table[hash_ptr(p->ainsn.insn, UPROBE_HASH_BITS)]);
299 }
300
301 /**
302  * @brief Gets kprobe by insn slot.
303  *
304  * @param addr Probe's address.
305  * @param tgit Probe's thread group ID.
306  * @param regs Pointer to CPU registers data.
307  * @return Pointer to the kprobe on success,\n
308  * NULL otherwise.
309  */
310 struct kprobe *get_ukprobe_by_insn_slot(void *addr, pid_t tgid, struct pt_regs *regs)
311 {
312         struct hlist_head *head;
313         struct kprobe *p;
314         DECLARE_NODE_PTR_FOR_HLIST(node);
315
316         /* TODO: test - two processes invokes instrumented function */
317         head = &uprobe_insn_slot_table[hash_ptr(addr, UPROBE_HASH_BITS)];
318         swap_hlist_for_each_entry_rcu(p, node, head, is_hlist) {
319                 if (p->ainsn.insn == addr && kp2up(p)->task->tgid == tgid) {
320                         return p;
321                 }
322         }
323
324         return NULL;
325 }
326
327
328 static void remove_uprobe(struct uprobe *up)
329 {
330         arch_remove_uprobe(up);
331 }
332
333 static struct hlist_head *uretprobe_inst_table_head(void *hash_key)
334 {
335         return &uretprobe_inst_table[hash_ptr (hash_key, UPROBE_HASH_BITS)];
336 }
337
338 /* Called with uretprobe_lock held */
339 static void add_urp_inst(struct uretprobe_instance *ri)
340 {
341         /*
342          * Remove rp inst off the free list -
343          * Add it back when probed function returns
344          */
345         hlist_del(&ri->uflist);
346
347         /* Add rp inst onto table */
348         INIT_HLIST_NODE(&ri->hlist);
349         hlist_add_head(&ri->hlist, uretprobe_inst_table_head(ri->task->mm));
350
351         /* Also add this rp inst to the used list. */
352         INIT_HLIST_NODE(&ri->uflist);
353         hlist_add_head(&ri->uflist, &ri->rp->used_instances);
354 }
355
356 /* Called with uretprobe_lock held */
357 static void recycle_urp_inst(struct uretprobe_instance *ri)
358 {
359         if (ri->rp) {
360                 hlist_del(&ri->hlist);
361                 /* remove rp inst off the used list */
362                 hlist_del(&ri->uflist);
363                 /* put rp inst back onto the free list */
364                 INIT_HLIST_NODE(&ri->uflist);
365                 hlist_add_head(&ri->uflist, &ri->rp->free_instances);
366         }
367 }
368
369 /* Called with uretprobe_lock held */
370 static struct uretprobe_instance *get_used_urp_inst(struct uretprobe *rp)
371 {
372         struct uretprobe_instance *ri;
373         DECLARE_NODE_PTR_FOR_HLIST(node);
374
375         swap_hlist_for_each_entry(ri, node, &rp->used_instances, uflist) {
376                 return ri;
377         }
378
379         return NULL;
380 }
381
382 /**
383  * @brief Gets free uretprobe instanse for the specified uretprobe without
384  * allocation. Called with uretprobe_lock held.
385  *
386  * @param rp Pointer to the uretprobe.
387  * @return Pointer to the uretprobe_instance on success,\n
388  * NULL otherwise.
389  */
390 struct uretprobe_instance *get_free_urp_inst_no_alloc(struct uretprobe *rp)
391 {
392         struct uretprobe_instance *ri;
393         DECLARE_NODE_PTR_FOR_HLIST(node);
394
395         swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
396                 return ri;
397         }
398
399         return NULL;
400 }
401
402 /* Called with uretprobe_lock held */
403 static void free_urp_inst(struct uretprobe *rp)
404 {
405         struct uretprobe_instance *ri;
406         while ((ri = get_free_urp_inst_no_alloc(rp)) != NULL) {
407                 hlist_del(&ri->uflist);
408                 kfree(ri);
409         }
410 }
411
412 #define COMMON_URP_NR 10
413
414 static int alloc_nodes_uretprobe(struct uretprobe *rp)
415 {
416         int alloc_nodes;
417         struct uretprobe_instance *inst;
418         int i;
419
420 #if 1//def CONFIG_PREEMPT
421         rp->maxactive += max(COMMON_URP_NR, 2 * NR_CPUS);
422 #else
423         rp->maxacpptive += NR_CPUS;
424 #endif
425         alloc_nodes = COMMON_URP_NR;
426
427         for (i = 0; i < alloc_nodes; ++i) {
428                 inst = kmalloc(sizeof(*inst), GFP_ATOMIC);
429                 if (inst == NULL) {
430                         free_urp_inst(rp);
431                         return -ENOMEM;
432                 }
433                 INIT_HLIST_NODE(&inst->uflist);
434                 hlist_add_head(&inst->uflist, &rp->free_instances);
435         }
436
437         return 0;
438 }
439
440 /* Called with uretprobe_lock held */
441 static struct uretprobe_instance *get_free_urp_inst(struct uretprobe *rp)
442 {
443         struct uretprobe_instance *ri;
444         DECLARE_NODE_PTR_FOR_HLIST(node);
445
446         swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
447                 return ri;
448         }
449
450         if (!alloc_nodes_uretprobe(rp)) {
451                 swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
452                         return ri;
453                 }
454         }
455
456         return NULL;
457 }
458 // ===================================================================
459
460 /**
461  * @brief Registers uprobe.
462  *
463  * @param up Pointer to the uprobe to register.
464  * @return 0 on success,\n
465  * negative error code on error.
466  */
467 int swap_register_uprobe(struct uprobe *up)
468 {
469         int ret = 0;
470         struct kprobe *p, *old_p;
471
472         p = &up->kp;
473         if (!p->addr) {
474                 return -EINVAL;
475         }
476
477         DBPRINTF("p->addr = 0x%p p = 0x%p\n", p->addr, p);
478
479 // thumb address = address-1;
480 #if defined(CONFIG_ARM)
481         // TODO: must be corrected in 'bundle'
482         if ((unsigned long) p->addr & 0x01) {
483                 p->addr = (kprobe_opcode_t *)((unsigned long)p->addr & 0xfffffffe);
484         }
485 #endif
486
487         p->ainsn.insn = NULL;
488         p->mod_refcounted = 0;
489         p->nmissed = 0;
490         INIT_LIST_HEAD(&p->list);
491 #ifdef KPROBES_PROFILE
492         p->start_tm.tv_sec = p->start_tm.tv_usec = 0;
493         p->hnd_tm_sum.tv_sec = p->hnd_tm_sum.tv_usec = 0;
494         p->count = 0;
495 #endif
496
497         // get the first item
498         old_p = get_ukprobe(p->addr, kp2up(p)->task->tgid);
499         if (old_p) {
500 #ifdef CONFIG_ARM
501                 p->safe_arm = old_p->safe_arm;
502                 p->safe_thumb = old_p->safe_thumb;
503 #endif
504                 ret = register_aggr_uprobe(old_p, p);
505                 DBPRINTF("goto out\n", ret);
506                 goto out;
507         }
508
509         ret = arch_prepare_uprobe(up);
510         if (ret) {
511                 DBPRINTF("goto out\n", ret);
512                 goto out;
513         }
514
515         DBPRINTF ("before out ret = 0x%x\n", ret);
516
517         // TODO: add uprobe (must be in function)
518         INIT_HLIST_NODE(&p->hlist);
519         hlist_add_head_rcu(&p->hlist, &uprobe_table[hash_ptr(p->addr, UPROBE_HASH_BITS)]);
520         arm_uprobe(up);
521
522 out:
523         DBPRINTF("out ret = 0x%x\n", ret);
524         return ret;
525 }
526
527 /**
528  * @brief Unregisters uprobe.
529  *
530  * @param up Pointer to the uprobe.
531  * @param disarm Disarm flag. When true uprobe is disarmed.
532  * @return Void.
533  */
534 void __swap_unregister_uprobe(struct uprobe *up, int disarm)
535 {
536         struct kprobe *p, *old_p, *list_p;
537         int cleanup_p;
538
539         p = &up->kp;
540         old_p = get_ukprobe(p->addr, kp2up(p)->task->tgid);
541         if (unlikely(!old_p)) {
542                 return;
543         }
544
545         if (p != old_p) {
546                 list_for_each_entry_rcu(list_p, &old_p->list, list) {
547                         if (list_p == p) {
548                                 /* uprobe p is a valid probe */
549                                 goto valid_p;
550                         }
551                 }
552
553                 return;
554         }
555
556 valid_p:
557         if ((old_p == p) || ((old_p->pre_handler == aggr_pre_uhandler) &&
558             (p->list.next == &old_p->list) && (p->list.prev == &old_p->list))) {
559                 /* Only probe on the hash list */
560                 if (disarm)
561                         disarm_uprobe(&up->kp, up->task);
562
563                 hlist_del_rcu(&old_p->hlist);
564                 cleanup_p = 1;
565         } else {
566                 list_del_rcu(&p->list);
567                 cleanup_p = 0;
568         }
569
570         if (cleanup_p) {
571                 if (p != old_p) {
572                         list_del_rcu(&p->list);
573                         kfree(old_p);
574                 }
575
576                 if (!in_atomic()) {
577                         synchronize_sched();
578                 }
579
580                 remove_uprobe(up);
581         } else {
582                 if (p->break_handler) {
583                         old_p->break_handler = NULL;
584                 }
585
586                 if (p->post_handler) {
587                         list_for_each_entry_rcu (list_p, &old_p->list, list) {
588                                 if (list_p->post_handler) {
589                                         cleanup_p = 2;
590                                         break;
591                                 }
592                         }
593
594                         if (cleanup_p == 0) {
595                                 old_p->post_handler = NULL;
596                         }
597                 }
598         }
599 }
600 EXPORT_SYMBOL_GPL(__swap_unregister_uprobe);
601
602 /**
603  * @brief Unregisters uprobe. Main interface function, wrapper for
604  * __swap_unregister_uprobe.
605  *
606  * @param up Pointer to the uprobe.
607  * @return Void.
608  */
609 void swap_unregister_uprobe(struct uprobe *up)
610 {
611         __swap_unregister_uprobe(up, 1);
612 }
613
614 /**
615  * @brief Registers ujprobe.
616  *
617  * @param uj Pointer to the ujprobe function.
618  * @return 0 on success,\n
619  * error code on error.
620  */
621 int swap_register_ujprobe(struct ujprobe *jp)
622 {
623         int ret = 0;
624
625         /* Todo: Verify probepoint is a function entry point */
626         jp->up.kp.pre_handler = setjmp_upre_handler;
627         jp->up.kp.break_handler = longjmp_break_uhandler;
628
629         ret = swap_register_uprobe(&jp->up);
630
631         return ret;
632 }
633 EXPORT_SYMBOL_GPL(swap_register_ujprobe);
634
635 /**
636  * @brief Unregisters ujprobe.
637  *
638  * @param jp Pointer to the ujprobe.
639  * @param disarm Disarm flag, passed to __swap_unregister_uprobe.
640  * @return Void.
641  */
642 void __swap_unregister_ujprobe(struct ujprobe *jp, int disarm)
643 {
644         __swap_unregister_uprobe(&jp->up, disarm);
645         /*
646          * Here is an attempt to unregister even those probes that have not been
647          * installed (hence not added to the hlist).
648          * So if we try to delete them from the hlist we will get NULL pointer
649          * dereference error. That is why we check whether this node
650          * really belongs to the hlist.
651          */
652         if (!(hlist_unhashed(&jp->up.kp.is_hlist))) {
653                 hlist_del_rcu(&jp->up.kp.is_hlist);
654         }
655 }
656 EXPORT_SYMBOL_GPL(__swap_unregister_ujprobe);
657
658 /**
659  * @brief Unregisters ujprobe. Main interface function, wrapper for
660  * __swap_unregister_ujprobe.
661  *
662  * @param jp Pointer to the jprobe.
663  * @return Void.
664  */
665 void swap_unregister_ujprobe(struct ujprobe *jp)
666 {
667         __swap_unregister_ujprobe(jp, 1);
668 }
669 EXPORT_SYMBOL_GPL(swap_unregister_ujprobe);
670
671 /**
672  * @brief Trampoline uprobe handler.
673  *
674  * @param p Pointer to the uprobe's kprobe.
675  * @param regs Pointer to CPU register data.
676  * @return 1
677  */
678 int trampoline_uprobe_handler(struct kprobe *p, struct pt_regs *regs)
679 {
680         struct uretprobe_instance *ri = NULL;
681         struct kprobe *kp;
682         struct hlist_head *head;
683         unsigned long flags, tramp_addr, orig_ret_addr = 0;
684         struct hlist_node *tmp;
685         DECLARE_NODE_PTR_FOR_HLIST(node);
686
687         tramp_addr = arch_get_trampoline_addr(p, regs);
688         spin_lock_irqsave(&uretprobe_lock, flags);
689
690         head = uretprobe_inst_table_head(current->mm);
691
692         /*
693          * It is possible to have multiple instances associated with a given
694          * task either because an multiple functions in the call path
695          * have a return probe installed on them, and/or more then one
696          * return probe was registered for a target function.
697          *
698          * We can handle this because:
699          *     - instances are always inserted at the head of the list
700          *     - when multiple return probes are registered for the same
701          *       function, the first instance's ret_addr will point to the
702          *       real return address, and all the rest will point to
703          *       uretprobe_trampoline
704          */
705         swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
706                 if (ri->task != current) {
707                         /* another task is sharing our hash bucket */
708                         continue;
709                 }
710
711                 kp = NULL;
712                 if (ri->rp) {
713                         kp = up2kp(&ri->rp->up);
714
715                         if (ri->rp->handler)
716                                 ri->rp->handler(ri, regs);
717                 }
718
719                 orig_ret_addr = (unsigned long)ri->ret_addr;
720                 recycle_urp_inst(ri);
721
722                 if ((orig_ret_addr != tramp_addr && kp == p) || kp == NULL) {
723                         /*
724                          * This is the real return address. Any other
725                          * instances associated with this task are for
726                          * other calls deeper on the call stack
727                          */
728                         break;
729                 }
730         }
731
732         spin_unlock_irqrestore(&uretprobe_lock, flags);
733         arch_set_orig_ret_addr(orig_ret_addr, regs);
734
735         return 1;
736 }
737
738 static int pre_handler_uretprobe(struct kprobe *p, struct pt_regs *regs)
739 {
740         struct uprobe *up = container_of(p, struct uprobe, kp);
741         struct uretprobe *rp = container_of(up, struct uretprobe, up);
742 #ifdef CONFIG_ARM
743         int noret = thumb_mode(regs) ? rp->thumb_noret : rp->arm_noret;
744 #endif
745         struct uretprobe_instance *ri;
746         unsigned long flags;
747
748 #ifdef CONFIG_ARM
749         if (noret)
750                 return 0;
751 #endif
752
753         /* TODO: consider to only swap the RA after the last pre_handler fired */
754         spin_lock_irqsave(&uretprobe_lock, flags);
755
756         /* TODO: test - remove retprobe after func entry but before its exit */
757         if ((ri = get_free_urp_inst(rp)) != NULL) {
758                 ri->rp = rp;
759                 ri->task = current;
760
761                 if (rp->entry_handler)
762                         rp->entry_handler(ri, regs);
763
764                 arch_prepare_uretprobe(ri, regs);
765
766                 add_urp_inst(ri);
767         } else {
768                 ++rp->nmissed;
769         }
770
771         spin_unlock_irqrestore(&uretprobe_lock, flags);
772
773         return 0;
774 }
775
776 /**
777  * @brief Registers uretprobe.
778  *
779  * @param rp Pointer to the uretprobe.
780  * @return 0 on success,\n
781  * negative error code on error.
782  */
783 int swap_register_uretprobe(struct uretprobe *rp)
784 {
785         int i, ret = 0;
786         struct uretprobe_instance *inst;
787
788         DBPRINTF ("START\n");
789
790         rp->up.kp.pre_handler = pre_handler_uretprobe;
791         rp->up.kp.post_handler = NULL;
792         rp->up.kp.fault_handler = NULL;
793         rp->up.kp.break_handler = NULL;
794
795         /* Pre-allocate memory for max kretprobe instances */
796         if (rp->maxactive <= 0) {
797 #if 1//def CONFIG_PREEMPT
798                 rp->maxactive = max(10, 2 * NR_CPUS);
799 #else
800                 rp->maxactive = NR_CPUS;
801 #endif
802         }
803
804         INIT_HLIST_HEAD(&rp->used_instances);
805         INIT_HLIST_HEAD(&rp->free_instances);
806
807         for (i = 0; i < rp->maxactive; i++) {
808                 inst = kmalloc(sizeof(*inst), GFP_ATOMIC);
809                 if (inst == NULL) {
810                         free_urp_inst(rp);
811                         return -ENOMEM;
812                 }
813
814                 INIT_HLIST_NODE(&inst->uflist);
815                 hlist_add_head(&inst->uflist, &rp->free_instances);
816         }
817
818         rp->nmissed = 0;
819
820         /* Establish function entry probe point */
821         ret = swap_register_uprobe(&rp->up);
822         if (ret)
823                 return ret;
824
825         arch_opcode_analysis_uretprobe(rp);
826
827         return 0;
828 }
829 EXPORT_SYMBOL_GPL(swap_register_uretprobe);
830
831 /**
832  * @brief Disarms uretprobe instances for the specified child task.
833  *
834  * @param parent Pointer to the parent task struct.
835  * @param task Pointer to the child task struct.
836  * @return 0
837  */
838 int swap_disarm_urp_inst_for_task(struct task_struct *parent, struct task_struct *task)
839 {
840         unsigned long flags;
841         struct uretprobe_instance *ri;
842         struct hlist_head *head;
843         struct hlist_node *tmp;
844         DECLARE_NODE_PTR_FOR_HLIST(node);
845
846         spin_lock_irqsave(&uretprobe_lock, flags);
847
848         head = uretprobe_inst_table_head(parent->mm);
849         swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
850                 if (parent == ri->task) {
851                         arch_disarm_urp_inst(ri, task);
852                 }
853         }
854
855         spin_unlock_irqrestore(&uretprobe_lock, flags);
856
857         return 0;
858 }
859 EXPORT_SYMBOL_GPL(swap_disarm_urp_inst_for_task);
860
861 /**
862  * @brief Disarms uretprobes for specified task.
863  *
864  * @param task Pointer to the task_struct.
865  * @return Void.
866  */
867 void swap_discard_pending_uretprobes(struct task_struct *task)
868 {
869         unsigned long flags;
870         struct uretprobe_instance *ri;
871         struct hlist_head *head;
872         struct hlist_node *tmp;
873         DECLARE_NODE_PTR_FOR_HLIST(node);
874
875         spin_lock_irqsave(&uretprobe_lock, flags);
876
877         head = uretprobe_inst_table_head(task->mm);
878         swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
879                 if (ri->task == task) {
880                         printk("%s (%d/%d): pending urp inst: %08lx\n",
881                                task->comm, task->tgid, task->pid,
882                                (unsigned long)ri->rp->up.kp.addr);
883                         arch_disarm_urp_inst(ri, task);
884                         recycle_urp_inst(ri);
885                 }
886         }
887
888         spin_unlock_irqrestore(&uretprobe_lock, flags);
889 }
890 EXPORT_SYMBOL_GPL(swap_discard_pending_uretprobes);
891
892 /**
893  * @brief Unregisters uretprobe.
894  *
895  * @param rp Pointer to the ureprobe.
896  * @param disarm Disarm flag, passed to __swap_unregister_uprobe
897  * @return Void.
898  */
899 void __swap_unregister_uretprobe(struct uretprobe *rp, int disarm)
900 {
901         unsigned long flags;
902         struct uretprobe_instance *ri;
903
904         __swap_unregister_uprobe(&rp->up, disarm);
905         spin_lock_irqsave (&uretprobe_lock, flags);
906
907         while ((ri = get_used_urp_inst(rp)) != NULL) {
908                 if (arch_disarm_urp_inst(ri, ri->task) != 0)
909                         printk("%s (%d/%d): cannot disarm urp instance (%08lx)\n",
910                                         ri->task->comm, ri->task->tgid, ri->task->pid,
911                                         (unsigned long)rp->up.kp.addr);
912                 recycle_urp_inst(ri);
913         }
914
915         if (hlist_empty(&rp->used_instances)) {
916                 struct kprobe *p = &rp->up.kp;
917
918                 if (!(hlist_unhashed(&p->is_hlist))) {
919                         hlist_del_rcu(&p->is_hlist);
920                 }
921         }
922
923         while ((ri = get_used_urp_inst(rp)) != NULL) {
924                 ri->rp = NULL;
925                 hlist_del(&ri->uflist);
926         }
927
928         spin_unlock_irqrestore(&uretprobe_lock, flags);
929         free_urp_inst(rp);
930 }
931 EXPORT_SYMBOL_GPL(__swap_unregister_uretprobe);
932
933 /**
934  * @brief Unregistets uretprobe. Main interface function, wrapper for
935  * __swap_unregister_uretprobe.
936  *
937  * @param rp Pointer to the uretprobe.
938  * @return Void.
939  */
940 void swap_unregister_uretprobe(struct uretprobe *rp)
941 {
942         __swap_unregister_uretprobe(rp, 1);
943 }
944 EXPORT_SYMBOL_GPL(swap_unregister_uretprobe);
945
946 /**
947  * @brief Unregisters all uprobes for task's thread group ID.
948  *
949  * @param task Pointer to the task_struct
950  * @return Void.
951  */
952 void swap_unregister_all_uprobes(struct task_struct *task)
953 {
954         struct hlist_head *head;
955         struct kprobe *p;
956         int i;
957         struct hlist_node *tnode;
958         DECLARE_NODE_PTR_FOR_HLIST(node);
959
960         for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
961                 head = &uprobe_table[i];
962                 swap_hlist_for_each_entry_safe(p, node, tnode, head, hlist) {
963                         if (kp2up(p)->task->tgid == task->tgid) {
964                                 struct uprobe *up = container_of(p, struct uprobe, kp);
965                                 printk("%s: delete uprobe at %p[%lx] for "
966                                        "%s/%d\n", __func__, p->addr,
967                                        (unsigned long)p->opcode,
968                                        task->comm, task->pid);
969                                 swap_unregister_uprobe(up);
970                         }
971                 }
972         }
973 }
974 EXPORT_SYMBOL_GPL(swap_unregister_all_uprobes);
975
976 /**
977  * @brief Arch-independent wrapper for arch_ujprobe_return.
978  *
979  * @return Void.
980  */
981 void swap_ujprobe_return(void)
982 {
983         arch_ujprobe_return();
984 }
985 EXPORT_SYMBOL_GPL(swap_ujprobe_return);
986
987 static int __init init_uprobes(void)
988 {
989         init_uprobe_table();
990         init_uprobes_insn_slots();
991         init_uretprobe_inst_table();
992
993         return swap_arch_init_uprobes();
994 }
995
996 static void __exit exit_uprobes(void)
997 {
998         swap_arch_exit_uprobes();
999 }
1000
1001 module_init(init_uprobes);
1002 module_exit(exit_uprobes);
1003
1004 MODULE_LICENSE ("GPL");