Merge branch 'tizen_2.4' into tizen_2.4_dev
[kernel/swap-modules.git] / uprobe / swap_uprobes.c
1 /**
2  * uprobe/swap_uprobes.c
3  * @author Alexey Gerenkov <a.gerenkov@samsung.com> User-Space Probes initial
4  * implementation; Support x86/ARM/MIPS for both user and kernel spaces.
5  * @author Ekaterina Gorelkina <e.gorelkina@samsung.com>: redesign module for
6  * separating core and arch parts
7  *
8  * @section LICENSE
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23  *
24  * @section COPYRIGHT
25  *
26  * Copyright (C) Samsung Electronics, 2006-2010
27  *
28  * @section DESCRIPTION
29  *
30  * Uprobes implementation.
31  */
32
33
34 #include <linux/hash.h>
35 #include <linux/mempolicy.h>
36 #include <linux/module.h>
37
38 #include <master/swap_initializer.h>
39 #include <kprobe/swap_slots.h>
40 #include <kprobe/swap_kdebug.h>
41 #include <kprobe/swap_kprobes_deps.h>
42
43 #include <swap-asm/swap_uprobes.h>
44
45 #include "swap_uprobes.h"
46
47
48 enum {
49         UPROBE_HASH_BITS  = 10,
50         UPROBE_TABLE_SIZE = (1 << UPROBE_HASH_BITS)
51 };
52
53 static DEFINE_RWLOCK(st_lock);
54 static struct hlist_head slot_table[UPROBE_TABLE_SIZE];
55 struct hlist_head uprobe_table[UPROBE_TABLE_SIZE];
56
57 DEFINE_SPINLOCK(uretprobe_lock);        /* Protects uretprobe_inst_table */
58 static struct hlist_head uretprobe_inst_table[UPROBE_TABLE_SIZE];
59
60 #define DEBUG_PRINT_HASH_TABLE 0
61
62 #if DEBUG_PRINT_HASH_TABLE
63 void print_uprobe_hash_table(void)
64 {
65         int i;
66         struct hlist_head *head;
67         struct uprobe *p;
68         DECLARE_NODE_PTR_FOR_HLIST(node);
69
70         /* print uprobe table */
71         for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
72                 head = &uprobe_insn_slot_table[i];
73                 swap_hlist_for_each_entry_rcu(p, node, head, is_hlist) {
74                         printk(KERN_INFO "####### find U tgid=%u, addr=0x%lx\n",
75                                         p->task->tgid, (unsigned long)p->addr);
76                 }
77         }
78 }
79 #endif
80
81
82 struct uinst_info *uinst_info_create(unsigned long vaddr,
83                                      kprobe_opcode_t opcode)
84 {
85         struct uinst_info *uinst;
86
87         uinst = kmalloc(sizeof(*uinst), GFP_ATOMIC);
88         if (uinst) {
89                 INIT_HLIST_NODE(&uinst->hlist);
90                 uinst->vaddr = vaddr;
91                 uinst->opcode = opcode;
92         } else {
93                 pr_err("Cannot allocate memory for uinst\n");
94         }
95
96         return uinst;
97 }
98 EXPORT_SYMBOL_GPL(uinst_info_create);
99
100 void uinst_info_destroy(struct uinst_info *uinst)
101 {
102         kfree(uinst);
103 }
104 EXPORT_SYMBOL_GPL(uinst_info_destroy);
105
106 void uinst_info_disarm(struct uinst_info *uinst, struct task_struct *task)
107 {
108         int ret = write_proc_vm_atomic(task, uinst->vaddr,
109                                        &uinst->opcode, sizeof(uinst->opcode));
110         if (!ret) {
111                 printk("uinst_info_disarm: failed to write memory "
112                        "tgid=%u, vaddr=%08lx!\n", task->tgid, uinst->vaddr);
113         }
114 }
115 EXPORT_SYMBOL_GPL(uinst_info_disarm);
116
117 /*
118  * Keep all fields in the uprobe consistent
119  */
120 static inline void copy_uprobe(struct uprobe *old_p, struct uprobe *p)
121 {
122         memcpy(&p->opcode, &old_p->opcode, sizeof(uprobe_opcode_t));
123         memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_insn));
124 }
125
126 /*
127  * Aggregate handlers for multiple uprobes support - these handlers
128  * take care of invoking the individual uprobe handlers on p->list
129  */
130 static int aggr_pre_uhandler(struct uprobe *p, struct pt_regs *regs)
131 {
132         struct uprobe *up;
133         int ret;
134
135         list_for_each_entry_rcu(up, &p->list, list) {
136                 if (up->pre_handler) {
137                         ret = up->pre_handler(up, regs);
138                         if (ret)
139                                 return ret;
140                 }
141         }
142
143         return 0;
144 }
145
146 static void aggr_post_uhandler(struct uprobe *p, struct pt_regs *regs,
147                                unsigned long flags)
148 {
149         struct uprobe *up;
150
151         list_for_each_entry_rcu(up, &p->list, list) {
152                 if (up->post_handler)
153                         up->post_handler(up, regs, flags);
154         }
155 }
156
157 static int aggr_fault_uhandler(struct uprobe *p,
158                                struct pt_regs *regs,
159                                int trapnr)
160 {
161         return 0;
162 }
163
164 static int aggr_break_uhandler(struct uprobe *p, struct pt_regs *regs)
165 {
166         return 0;
167 }
168
169 /*
170  * Add the new probe to old_p->list. Fail if this is the
171  * second ujprobe at the address - two ujprobes can't coexist
172  */
173 static int add_new_uprobe(struct uprobe *old_p, struct uprobe *p)
174 {
175         if (p->break_handler) {
176                 if (old_p->break_handler)
177                         return -EEXIST;
178
179                 list_add_tail_rcu(&p->list, &old_p->list);
180                 old_p->break_handler = aggr_break_uhandler;
181         } else {
182                 list_add_rcu(&p->list, &old_p->list);
183         }
184
185         if (p->post_handler && !old_p->post_handler)
186                 old_p->post_handler = aggr_post_uhandler;
187
188         return 0;
189 }
190
191 /*
192  * Fill in the required fields of the "manager uprobe". Replace the
193  * earlier uprobe in the hlist with the manager uprobe
194  */
195 static inline void add_aggr_uprobe(struct uprobe *ap, struct uprobe *p)
196 {
197         copy_uprobe(p, ap);
198
199         ap->addr = p->addr;
200         ap->pre_handler = aggr_pre_uhandler;
201         ap->fault_handler = aggr_fault_uhandler;
202
203         if (p->post_handler)
204                 ap->post_handler = aggr_post_uhandler;
205
206         if (p->break_handler)
207                 ap->break_handler = aggr_break_uhandler;
208
209         INIT_LIST_HEAD(&ap->list);
210         list_add_rcu(&p->list, &ap->list);
211
212         hlist_replace_rcu(&p->hlist, &ap->hlist);
213 }
214
215 /*
216  * This is the second or subsequent uprobe at the address - handle
217  * the intricacies
218  */
219 static int register_aggr_uprobe(struct uprobe *old_p, struct uprobe *p)
220 {
221         int ret = 0;
222
223         if (old_p->pre_handler == aggr_pre_uhandler) {
224                 copy_uprobe(old_p, p);
225                 ret = add_new_uprobe(old_p, p);
226         } else {
227                 struct uprobe *uap = kzalloc(sizeof(*uap), GFP_KERNEL);
228                 if (!uap)
229                         return -ENOMEM;
230
231                 uap->task = p->task;
232                 add_aggr_uprobe(uap, old_p);
233                 copy_uprobe(uap, p);
234                 ret = add_new_uprobe(uap, p);
235         }
236
237         return ret;
238 }
239
240 static int arm_uprobe(struct uprobe *p)
241 {
242         return arch_arm_uprobe(p);
243 }
244
245 /**
246  * @brief Disarms uprobe.
247  *
248  * @param p Pointer to the uprobe.
249  * @param task Pointer to the target task.
250  * @return Void.
251  */
252 void disarm_uprobe(struct uprobe *p, struct task_struct *task)
253 {
254         arch_disarm_uprobe(p, task);
255 }
256 EXPORT_SYMBOL_GPL(disarm_uprobe);
257
258 static void init_uprobes_insn_slots(void)
259 {
260         int i;
261         for (i = 0; i < UPROBE_TABLE_SIZE; ++i)
262                 INIT_HLIST_HEAD(&slot_table[i]);
263 }
264
265 static void init_uprobe_table(void)
266 {
267         int i;
268         for (i = 0; i < UPROBE_TABLE_SIZE; ++i)
269                 INIT_HLIST_HEAD(&uprobe_table[i]);
270 }
271
272 static void init_uretprobe_inst_table(void)
273 {
274         int i;
275         for (i = 0; i < UPROBE_TABLE_SIZE; ++i)
276                 INIT_HLIST_HEAD(&uretprobe_inst_table[i]);
277 }
278
279 /**
280  * @brief Gets uprobe.
281  *
282  * @param addr Probe's address.
283  * @param tgid Probes's thread group ID.
284  * @return Pointer to the uprobe on success,\n
285  * NULL otherwise.
286  */
287 struct uprobe *get_uprobe(void *addr, pid_t tgid)
288 {
289         struct hlist_head *head;
290         struct uprobe *p;
291         DECLARE_NODE_PTR_FOR_HLIST(node);
292
293         head = &uprobe_table[hash_ptr(addr, UPROBE_HASH_BITS)];
294         swap_hlist_for_each_entry_rcu(p, node, head, hlist) {
295                 if (p->addr == addr && p->task->tgid == tgid)
296                         return p;
297         }
298
299         return NULL;
300 }
301
302 /**
303  * @brief Adds uprobe to hlist when trampoline have been made.
304  *
305  * @param p Pointer to the uprobe.
306  * @return Void.
307  */
308 void add_uprobe_table(struct uprobe *p)
309 {
310         write_lock(&st_lock);
311         hlist_add_head(&p->is_hlist,
312                        &slot_table[hash_ptr(p->ainsn.insn, UPROBE_HASH_BITS)]);
313         write_unlock(&st_lock);
314 }
315
316 static void del_uprobe_table(struct uprobe *p)
317 {
318         write_lock(&st_lock);
319         if (!hlist_unhashed(&p->is_hlist))
320                 hlist_del(&p->is_hlist);
321         write_unlock(&st_lock);
322 }
323
324 /**
325  * @brief Gets uprobe by insn slot.
326  *
327  * @param addr Probe's address.
328  * @param tgit Probe's thread group ID.
329  * @param regs Pointer to CPU registers data.
330  * @return Pointer to the uprobe on success,\n
331  * NULL otherwise.
332  */
333 struct uprobe *get_uprobe_by_insn_slot(void *addr,
334                                        pid_t tgid,
335                                        struct pt_regs *regs)
336 {
337         struct hlist_head *head;
338         struct uprobe *p;
339         DECLARE_NODE_PTR_FOR_HLIST(node);
340
341         read_lock(&st_lock);
342         head = &slot_table[hash_ptr(addr, UPROBE_HASH_BITS)];
343         swap_hlist_for_each_entry(p, node, head, is_hlist) {
344                 if (p->ainsn.insn == addr && p->task->tgid == tgid) {
345                         read_unlock(&st_lock);
346                         return p;
347                 }
348         }
349         read_unlock(&st_lock);
350
351         return NULL;
352 }
353
354
355 static void remove_uprobe(struct uprobe *up)
356 {
357         del_uprobe_table(up);
358         arch_remove_uprobe(up);
359 }
360
361 static struct hlist_head *uretprobe_inst_table_head(void *hash_key)
362 {
363         return &uretprobe_inst_table[hash_ptr(hash_key, UPROBE_HASH_BITS)];
364 }
365
366 /* Called with uretprobe_lock held */
367 static void add_urp_inst(struct uretprobe_instance *ri)
368 {
369         /*
370          * Remove rp inst off the free list -
371          * Add it back when probed function returns
372          */
373         hlist_del(&ri->uflist);
374
375         /* Add rp inst onto table */
376         INIT_HLIST_NODE(&ri->hlist);
377         hlist_add_head(&ri->hlist, uretprobe_inst_table_head(ri->task->mm));
378
379         /* Also add this rp inst to the used list. */
380         INIT_HLIST_NODE(&ri->uflist);
381         hlist_add_head(&ri->uflist, &ri->rp->used_instances);
382 }
383
384 /* Called with uretprobe_lock held */
385 static void recycle_urp_inst(struct uretprobe_instance *ri)
386 {
387         if (ri->rp) {
388                 hlist_del(&ri->hlist);
389                 /* remove rp inst off the used list */
390                 hlist_del(&ri->uflist);
391                 /* put rp inst back onto the free list */
392                 INIT_HLIST_NODE(&ri->uflist);
393                 hlist_add_head(&ri->uflist, &ri->rp->free_instances);
394         }
395 }
396
397 /* Called with uretprobe_lock held */
398 static struct uretprobe_instance *get_used_urp_inst(struct uretprobe *rp)
399 {
400         struct uretprobe_instance *ri;
401         DECLARE_NODE_PTR_FOR_HLIST(node);
402
403         swap_hlist_for_each_entry(ri, node, &rp->used_instances, uflist) {
404                 return ri;
405         }
406
407         return NULL;
408 }
409
410 /**
411  * @brief Gets free uretprobe instanse for the specified uretprobe without
412  * allocation. Called with uretprobe_lock held.
413  *
414  * @param rp Pointer to the uretprobe.
415  * @return Pointer to the uretprobe_instance on success,\n
416  * NULL otherwise.
417  */
418 struct uretprobe_instance *get_free_urp_inst_no_alloc(struct uretprobe *rp)
419 {
420         struct uretprobe_instance *ri;
421         DECLARE_NODE_PTR_FOR_HLIST(node);
422
423         swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
424                 return ri;
425         }
426
427         return NULL;
428 }
429
430 /* Called with uretprobe_lock held */
431 static void free_urp_inst(struct uretprobe *rp)
432 {
433         struct uretprobe_instance *ri;
434         while ((ri = get_free_urp_inst_no_alloc(rp)) != NULL) {
435                 hlist_del(&ri->uflist);
436                 kfree(ri);
437         }
438 }
439
440 #define COMMON_URP_NR 10
441
442 static int alloc_nodes_uretprobe(struct uretprobe *rp)
443 {
444         int alloc_nodes;
445         struct uretprobe_instance *inst;
446         int i;
447
448 #if 1 /* def CONFIG_PREEMPT */
449         rp->maxactive += max(COMMON_URP_NR, 2 * NR_CPUS);
450 #else
451         rp->maxacpptive += NR_CPUS;
452 #endif
453         alloc_nodes = COMMON_URP_NR;
454
455         for (i = 0; i < alloc_nodes; ++i) {
456                 inst = kmalloc(sizeof(*inst) + rp->data_size, GFP_ATOMIC);
457                 if (inst == NULL) {
458                         free_urp_inst(rp);
459                         return -ENOMEM;
460                 }
461                 INIT_HLIST_NODE(&inst->uflist);
462                 hlist_add_head(&inst->uflist, &rp->free_instances);
463         }
464
465         return 0;
466 }
467
468 /* Called with uretprobe_lock held */
469 static struct uretprobe_instance *get_free_urp_inst(struct uretprobe *rp)
470 {
471         struct uretprobe_instance *ri;
472         DECLARE_NODE_PTR_FOR_HLIST(node);
473
474         swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
475                 return ri;
476         }
477
478         if (!alloc_nodes_uretprobe(rp)) {
479                 swap_hlist_for_each_entry(ri, node,
480                                           &rp->free_instances, uflist) {
481                         return ri;
482                 }
483         }
484
485         return NULL;
486 }
487 /* =================================================================== */
488
489 /**
490  * @brief Registers uprobe.
491  *
492  * @param up Pointer to the uprobe to register.
493  * @return 0 on success,\n
494  * negative error code on error.
495  */
496 int swap_register_uprobe(struct uprobe *p)
497 {
498         int ret = 0;
499         struct uprobe *old_p;
500
501         if (!p->addr)
502                 return -EINVAL;
503
504         p->ainsn.insn = NULL;
505         INIT_LIST_HEAD(&p->list);
506 #ifdef KPROBES_PROFILE
507         p->start_tm.tv_sec = p->start_tm.tv_usec = 0;
508         p->hnd_tm_sum.tv_sec = p->hnd_tm_sum.tv_usec = 0;
509         p->count = 0;
510 #endif
511
512         /* get the first item */
513         old_p = get_uprobe(p->addr, p->task->tgid);
514         if (old_p) {
515                 struct task_struct *task = p->task;
516
517                 /* TODO: add support many uprobes on address */
518                 printk(KERN_INFO "uprobe on task[%u %u %s] vaddr=%p is there\n",
519                        task->tgid, task->pid, task->comm, p->addr);
520                 ret = -EINVAL;
521                 goto out;
522
523                 ret = register_aggr_uprobe(old_p, p);
524                 DBPRINTF("goto out\n", ret);
525                 goto out;
526         }
527
528         INIT_HLIST_NODE(&p->is_hlist);
529
530         ret = arch_prepare_uprobe(p);
531         if (ret) {
532                 DBPRINTF("goto out\n", ret);
533                 goto out;
534         }
535
536         DBPRINTF("before out ret = 0x%x\n", ret);
537
538         /* TODO: add uprobe (must be in function) */
539         INIT_HLIST_NODE(&p->hlist);
540         hlist_add_head_rcu(&p->hlist,
541                            &uprobe_table[hash_ptr(p->addr, UPROBE_HASH_BITS)]);
542
543         ret = arm_uprobe(p);
544         if (ret) {
545                 hlist_del_rcu(&p->hlist);
546                 synchronize_rcu();
547                 remove_uprobe(p);
548         }
549
550 out:
551         DBPRINTF("out ret = 0x%x\n", ret);
552         return ret;
553 }
554 EXPORT_SYMBOL_GPL(swap_register_uprobe);
555
556 /**
557  * @brief Unregisters uprobe.
558  *
559  * @param up Pointer to the uprobe.
560  * @param disarm Disarm flag. When true uprobe is disarmed.
561  * @return Void.
562  */
563 void __swap_unregister_uprobe(struct uprobe *p, int disarm)
564 {
565         struct uprobe *old_p, *list_p;
566         int cleanup_p;
567
568         /* we MUST check probe for uncreated process  */
569         if (!p->task)
570                 return;
571
572         old_p = get_uprobe(p->addr, p->task->tgid);
573         if (unlikely(!old_p))
574                 return;
575
576         if (p != old_p) {
577                 list_for_each_entry_rcu(list_p, &old_p->list, list) {
578                         if (list_p == p) {
579                                 /* uprobe p is a valid probe */
580                                 goto valid_p;
581                         }
582                 }
583
584                 return;
585         }
586
587 valid_p:
588         if ((old_p == p) || ((old_p->pre_handler == aggr_pre_uhandler) &&
589             (p->list.next == &old_p->list) && (p->list.prev == &old_p->list))) {
590                 /* Only probe on the hash list */
591                 if (disarm)
592                         disarm_uprobe(p, p->task);
593
594                 hlist_del_rcu(&old_p->hlist);
595                 cleanup_p = 1;
596         } else {
597                 list_del_rcu(&p->list);
598                 cleanup_p = 0;
599         }
600
601         if (cleanup_p) {
602                 if (p != old_p) {
603                         list_del_rcu(&p->list);
604                         kfree(old_p);
605                 }
606
607                 if (!in_atomic())
608                         synchronize_sched();
609
610                 remove_uprobe(p);
611         } else {
612                 if (p->break_handler)
613                         old_p->break_handler = NULL;
614
615                 if (p->post_handler) {
616                         list_for_each_entry_rcu(list_p, &old_p->list, list) {
617                                 if (list_p->post_handler) {
618                                         cleanup_p = 2;
619                                         break;
620                                 }
621                         }
622
623                         if (cleanup_p == 0)
624                                 old_p->post_handler = NULL;
625                 }
626         }
627 }
628 EXPORT_SYMBOL_GPL(__swap_unregister_uprobe);
629
630 /**
631  * @brief Unregisters uprobe. Main interface function, wrapper for
632  * __swap_unregister_uprobe.
633  *
634  * @param up Pointer to the uprobe.
635  * @return Void.
636  */
637 void swap_unregister_uprobe(struct uprobe *up)
638 {
639         __swap_unregister_uprobe(up, 1);
640 }
641
642 /**
643  * @brief Registers ujprobe.
644  *
645  * @param uj Pointer to the ujprobe function.
646  * @return 0 on success,\n
647  * error code on error.
648  */
649 int swap_register_ujprobe(struct ujprobe *jp)
650 {
651         int ret = 0;
652
653         /* Todo: Verify probepoint is a function entry point */
654         jp->up.pre_handler = setjmp_upre_handler;
655         jp->up.break_handler = longjmp_break_uhandler;
656
657         ret = swap_register_uprobe(&jp->up);
658
659         return ret;
660 }
661 EXPORT_SYMBOL_GPL(swap_register_ujprobe);
662
663 /**
664  * @brief Unregisters ujprobe.
665  *
666  * @param jp Pointer to the ujprobe.
667  * @param disarm Disarm flag, passed to __swap_unregister_uprobe.
668  * @return Void.
669  */
670 void __swap_unregister_ujprobe(struct ujprobe *jp, int disarm)
671 {
672         __swap_unregister_uprobe(&jp->up, disarm);
673 }
674 EXPORT_SYMBOL_GPL(__swap_unregister_ujprobe);
675
676 /**
677  * @brief Unregisters ujprobe. Main interface function, wrapper for
678  * __swap_unregister_ujprobe.
679  *
680  * @param jp Pointer to the jprobe.
681  * @return Void.
682  */
683 void swap_unregister_ujprobe(struct ujprobe *jp)
684 {
685         __swap_unregister_ujprobe(jp, 1);
686 }
687 EXPORT_SYMBOL_GPL(swap_unregister_ujprobe);
688
689 /**
690  * @brief Trampoline uprobe handler.
691  *
692  * @param p Pointer to the uprobe.
693  * @param regs Pointer to CPU register data.
694  * @return 1
695  */
696 int trampoline_uprobe_handler(struct uprobe *p, struct pt_regs *regs)
697 {
698         struct uretprobe_instance *ri = NULL;
699         struct uprobe *up;
700         struct hlist_head *head;
701         unsigned long flags, tramp_addr, orig_ret_addr = 0;
702         struct hlist_node *tmp;
703         DECLARE_NODE_PTR_FOR_HLIST(node);
704
705         tramp_addr = arch_get_trampoline_addr(p, regs);
706         spin_lock_irqsave(&uretprobe_lock, flags);
707
708         head = uretprobe_inst_table_head(current->mm);
709
710         /*
711          * It is possible to have multiple instances associated with a given
712          * task either because an multiple functions in the call path
713          * have a return probe installed on them, and/or more then one
714          * return probe was registered for a target function.
715          *
716          * We can handle this because:
717          *     - instances are always inserted at the head of the list
718          *     - when multiple return probes are registered for the same
719          *       function, the first instance's ret_addr will point to the
720          *       real return address, and all the rest will point to
721          *       uretprobe_trampoline
722          */
723         swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
724                 if (ri->task != current) {
725                         /* another task is sharing our hash bucket */
726                         continue;
727                 }
728
729                 up = NULL;
730                 if (ri->rp) {
731                         up = &ri->rp->up;
732
733                         if (ri->rp->handler)
734                                 ri->rp->handler(ri, regs);
735                 }
736
737                 orig_ret_addr = (unsigned long)ri->ret_addr;
738                 recycle_urp_inst(ri);
739
740                 if ((orig_ret_addr != tramp_addr && up == p) || up == NULL) {
741                         /*
742                          * This is the real return address. Any other
743                          * instances associated with this task are for
744                          * other calls deeper on the call stack
745                          */
746                         break;
747                 }
748         }
749
750         spin_unlock_irqrestore(&uretprobe_lock, flags);
751         /* orig_ret_addr is NULL when there is no need to restore anything
752          * (all the magic is performed inside handler) */
753         if (likely(orig_ret_addr))
754                 arch_set_orig_ret_addr(orig_ret_addr, regs);
755
756         return 1;
757 }
758
759 static int pre_handler_uretprobe(struct uprobe *p, struct pt_regs *regs)
760 {
761         struct uretprobe *rp = container_of(p, struct uretprobe, up);
762 #ifdef CONFIG_ARM
763         int noret = thumb_mode(regs) ? rp->thumb_noret : rp->arm_noret;
764 #endif
765         struct uretprobe_instance *ri;
766         unsigned long flags;
767
768 #ifdef CONFIG_ARM
769         if (noret)
770                 return 0;
771 #endif
772
773         /* TODO: consider to only swap the
774          * RA after the last pre_handler fired */
775         spin_lock_irqsave(&uretprobe_lock, flags);
776
777         /* TODO: test - remove retprobe after func entry but before its exit */
778         ri = get_free_urp_inst(rp);
779         if (ri != NULL) {
780                 int ret;
781
782                 ri->rp = rp;
783                 ri->task = current;
784 #ifdef CONFIG_ARM
785                 ri->preload_thumb = 0;
786 #endif
787
788                 if (rp->entry_handler)
789                         rp->entry_handler(ri, regs);
790
791                 ret = arch_prepare_uretprobe(ri, regs);
792                 add_urp_inst(ri);
793                 if (ret) {
794                         recycle_urp_inst(ri);
795                         ++rp->nmissed;
796                 }
797         } else {
798                 ++rp->nmissed;
799         }
800
801         spin_unlock_irqrestore(&uretprobe_lock, flags);
802
803         return 0;
804 }
805
806 /**
807  * @brief Registers uretprobe.
808  *
809  * @param rp Pointer to the uretprobe.
810  * @return 0 on success,\n
811  * negative error code on error.
812  */
813 int swap_register_uretprobe(struct uretprobe *rp)
814 {
815         int i, ret = 0;
816         struct uretprobe_instance *inst;
817
818         DBPRINTF("START\n");
819
820         rp->up.pre_handler = pre_handler_uretprobe;
821         rp->up.post_handler = NULL;
822         rp->up.fault_handler = NULL;
823         rp->up.break_handler = NULL;
824
825         /* Pre-allocate memory for max kretprobe instances */
826         if (rp->maxactive <= 0) {
827 #if 1 /* def CONFIG_PREEMPT */
828                 rp->maxactive = max(10, 2 * NR_CPUS);
829 #else
830                 rp->maxactive = NR_CPUS;
831 #endif
832         }
833
834         INIT_HLIST_HEAD(&rp->used_instances);
835         INIT_HLIST_HEAD(&rp->free_instances);
836
837         for (i = 0; i < rp->maxactive; i++) {
838                 inst = kmalloc(sizeof(*inst) + rp->data_size, GFP_KERNEL);
839                 if (inst == NULL) {
840                         free_urp_inst(rp);
841                         return -ENOMEM;
842                 }
843
844                 INIT_HLIST_NODE(&inst->uflist);
845                 hlist_add_head(&inst->uflist, &rp->free_instances);
846         }
847
848         rp->nmissed = 0;
849
850         /* Establish function entry probe point */
851         ret = swap_register_uprobe(&rp->up);
852         if (ret)
853                 return ret;
854
855         arch_opcode_analysis_uretprobe(rp);
856
857         return 0;
858 }
859 EXPORT_SYMBOL_GPL(swap_register_uretprobe);
860
861 /**
862  * @brief Unregisters uretprobe.
863  *
864  * @param rp Pointer to the ureprobe.
865  * @param disarm Disarm flag, passed to __swap_unregister_uprobe
866  * @return Void.
867  */
868 void __swap_unregister_uretprobe(struct uretprobe *rp, int disarm)
869 {
870         unsigned long flags;
871         struct uretprobe_instance *ri;
872
873         __swap_unregister_uprobe(&rp->up, disarm);
874
875         spin_lock_irqsave(&uretprobe_lock, flags);
876         while ((ri = get_used_urp_inst(rp)) != NULL) {
877                 bool is_current = ri->task == current;
878
879                 if (is_current)
880                         spin_unlock_irqrestore(&uretprobe_lock, flags);
881
882                 /* FIXME: arch_disarm_urp_inst() for no current context */
883                 if (arch_disarm_urp_inst(ri, ri->task, 0) != 0)
884                         printk(KERN_INFO "%s (%d/%d): "
885                                "cannot disarm urp instance (%08lx)\n",
886                                ri->task->comm, ri->task->tgid, ri->task->pid,
887                                (unsigned long)rp->up.addr);
888
889                 if (is_current)
890                         spin_lock_irqsave(&uretprobe_lock, flags);
891
892                 recycle_urp_inst(ri);
893         }
894
895         while ((ri = get_used_urp_inst(rp)) != NULL) {
896                 ri->rp = NULL;
897                 hlist_del(&ri->uflist);
898         }
899         spin_unlock_irqrestore(&uretprobe_lock, flags);
900
901         free_urp_inst(rp);
902 }
903 EXPORT_SYMBOL_GPL(__swap_unregister_uretprobe);
904
905 /**
906  * @brief Unregistets uretprobe. Main interface function, wrapper for
907  * __swap_unregister_uretprobe.
908  *
909  * @param rp Pointer to the uretprobe.
910  * @return Void.
911  */
912 void swap_unregister_uretprobe(struct uretprobe *rp)
913 {
914         __swap_unregister_uretprobe(rp, 1);
915 }
916 EXPORT_SYMBOL_GPL(swap_unregister_uretprobe);
917
918 /**
919  * @brief Unregisters all uprobes for task's thread group ID.
920  *
921  * @param task Pointer to the task_struct
922  * @return Void.
923  */
924 void swap_unregister_all_uprobes(struct task_struct *task)
925 {
926         struct hlist_head *head;
927         struct uprobe *p;
928         int i;
929         struct hlist_node *tnode;
930         DECLARE_NODE_PTR_FOR_HLIST(node);
931
932         for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
933                 head = &uprobe_table[i];
934                 swap_hlist_for_each_entry_safe(p, node, tnode, head, hlist) {
935                         if (p->task->tgid == task->tgid) {
936                                 printk(KERN_INFO "%s: delete uprobe at %p[%lx]"
937                                        " for %s/%d\n", __func__, p->addr,
938                                        (unsigned long)p->opcode,
939                                        task->comm, task->pid);
940                                 swap_unregister_uprobe(p);
941                         }
942                 }
943         }
944 }
945 EXPORT_SYMBOL_GPL(swap_unregister_all_uprobes);
946
947 /**
948  * @brief Arch-independent wrapper for arch_ujprobe_return.
949  *
950  * @return Void.
951  */
952 void swap_ujprobe_return(void)
953 {
954         arch_ujprobe_return();
955 }
956 EXPORT_SYMBOL_GPL(swap_ujprobe_return);
957
958
959 static struct urinst_info *urinst_info_create(struct uretprobe_instance *ri)
960 {
961         struct urinst_info *urinst;
962
963         urinst = kmalloc(sizeof(*urinst), GFP_ATOMIC);
964         if (urinst) {
965                 INIT_HLIST_NODE(&urinst->hlist);
966                 urinst->task = ri->task;
967                 urinst->sp = (unsigned long)ri->sp;
968                 urinst->tramp = arch_tramp_by_ri(ri);
969                 urinst->ret_addr = (unsigned long)ri->ret_addr;
970         } else {
971                 pr_err("Cannot allocate memory for urinst\n");
972         }
973
974         return urinst;
975 }
976
977 static void urinst_info_destroy(struct urinst_info *urinst)
978 {
979         kfree(urinst);
980 }
981
982 static void urinst_info_disarm(struct urinst_info *urinst, struct task_struct *task)
983 {
984         struct uretprobe_instance ri;
985         unsigned long tramp = urinst->tramp;
986
987         /* set necessary data*/
988         ri.task = urinst->task;
989         ri.sp = (kprobe_opcode_t *)urinst->sp;
990         ri.ret_addr = (kprobe_opcode_t *)urinst->ret_addr;
991
992         arch_disarm_urp_inst(&ri, task, tramp);
993 }
994
995 void urinst_info_get_current_hlist(struct hlist_head *head, bool recycle)
996 {
997         unsigned long flags;
998         struct task_struct *task = current;
999         struct uretprobe_instance *ri;
1000         struct hlist_head *hhead;
1001         struct hlist_node *n;
1002         struct hlist_node *last = NULL;
1003         DECLARE_NODE_PTR_FOR_HLIST(node);
1004
1005         spin_lock_irqsave(&uretprobe_lock, flags);
1006         hhead = uretprobe_inst_table_head(task->mm);
1007         swap_hlist_for_each_entry_safe(ri, node, n, hhead, hlist) {
1008                 if (task == ri->task) {
1009                         struct urinst_info *urinst;
1010
1011                         urinst = urinst_info_create(ri);
1012                         if (urinst) {
1013                                 if (last)
1014                                         hlist_add_after(last, &urinst->hlist);
1015                                 else
1016                                         hlist_add_head(&urinst->hlist, head);
1017
1018                                 last = &urinst->hlist;
1019                         }
1020
1021                         if (recycle)
1022                                 recycle_urp_inst(ri);
1023                 }
1024         }
1025         spin_unlock_irqrestore(&uretprobe_lock, flags);
1026 }
1027 EXPORT_SYMBOL_GPL(urinst_info_get_current_hlist);
1028
1029 void urinst_info_put_current_hlist(struct hlist_head *head,
1030                                   struct task_struct *task)
1031 {
1032         struct urinst_info *urinst;
1033         struct hlist_node *tmp;
1034         DECLARE_NODE_PTR_FOR_HLIST(node);
1035
1036         swap_hlist_for_each_entry_safe(urinst, node, tmp, head, hlist) {
1037                 /* check on disarm */
1038                 if (task)
1039                         urinst_info_disarm(urinst, task);
1040
1041                 hlist_del(&urinst->hlist);
1042                 urinst_info_destroy(urinst);
1043         }
1044 }
1045 EXPORT_SYMBOL_GPL(urinst_info_put_current_hlist);
1046
1047
1048 static int once(void)
1049 {
1050         init_uprobe_table();
1051         init_uprobes_insn_slots();
1052         init_uretprobe_inst_table();
1053
1054         return 0;
1055 }
1056
1057 SWAP_LIGHT_INIT_MODULE(once, swap_arch_init_uprobes, swap_arch_exit_uprobes,
1058                        NULL, NULL);
1059
1060 MODULE_LICENSE("GPL");