ARM: tizen_tm1_defconfig: Enable missing features related with CGROUPS
[profile/mobile/platform/kernel/linux-3.10-sc7730.git] / kernel / swap / uprobe / swap_uprobes.c
1 /**
2  * uprobe/swap_uprobes.c
3  * @author Alexey Gerenkov <a.gerenkov@samsung.com> User-Space Probes initial
4  * implementation; Support x86/ARM/MIPS for both user and kernel spaces.
5  * @author Ekaterina Gorelkina <e.gorelkina@samsung.com>: redesign module for
6  * separating core and arch parts
7  *
8  * @section LICENSE
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23  *
24  * @section COPYRIGHT
25  *
26  * Copyright (C) Samsung Electronics, 2006-2010
27  *
28  * @section DESCRIPTION
29  *
30  * Uprobes implementation.
31  */
32
33
34 #include <linux/hash.h>
35 #include <linux/mempolicy.h>
36 #include <linux/module.h>
37
38 #include <master/swap_initializer.h>
39 #include <kprobe/swap_slots.h>
40 #include <kprobe/swap_kdebug.h>
41 #include <kprobe/swap_kprobes_deps.h>
42
43 #include <swap-asm/swap_uprobes.h>
44
45 #include "swap_uprobes.h"
46
47
48 enum {
49         UPROBE_HASH_BITS  = 10,
50         UPROBE_TABLE_SIZE = (1 << UPROBE_HASH_BITS)
51 };
52
53 static DEFINE_RWLOCK(st_lock);
54 static struct hlist_head slot_table[UPROBE_TABLE_SIZE];
55 struct hlist_head uprobe_table[UPROBE_TABLE_SIZE];
56
57 DEFINE_SPINLOCK(uretprobe_lock);        /* Protects uretprobe_inst_table */
58 static struct hlist_head uretprobe_inst_table[UPROBE_TABLE_SIZE];
59
60 #define DEBUG_PRINT_HASH_TABLE 0
61
62 #if DEBUG_PRINT_HASH_TABLE
63 void print_uprobe_hash_table(void)
64 {
65         int i;
66         struct hlist_head *head;
67         struct kprobe *p;
68         DECLARE_NODE_PTR_FOR_HLIST(node);
69
70         /* print uprobe table */
71         for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
72                 head = &uprobe_insn_slot_table[i];
73                 swap_hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
74                         printk(KERN_INFO "####### find U tgid=%u, addr=%x\n",
75                                         p->tgid, p->addr);
76                 }
77         }
78 }
79 #endif
80
81
82 struct uinst_info *uinst_info_create(unsigned long vaddr,
83                                      kprobe_opcode_t opcode)
84 {
85         struct uinst_info *uinst;
86
87         uinst = kmalloc(sizeof(*uinst), GFP_ATOMIC);
88         if (uinst) {
89                 INIT_HLIST_NODE(&uinst->hlist);
90                 uinst->vaddr = vaddr;
91                 uinst->opcode = opcode;
92         } else {
93                 pr_err("Cannot allocate memory for uinst\n");
94         }
95
96         return uinst;
97 }
98 EXPORT_SYMBOL_GPL(uinst_info_create);
99
100 void uinst_info_destroy(struct uinst_info *uinst)
101 {
102         kfree(uinst);
103 }
104 EXPORT_SYMBOL_GPL(uinst_info_destroy);
105
106 void uinst_info_disarm(struct uinst_info *uinst, struct task_struct *task)
107 {
108         int ret = write_proc_vm_atomic(task, uinst->vaddr,
109                                        &uinst->opcode, sizeof(uinst->opcode));
110         if (!ret) {
111                 printk("uinst_info_disarm: failed to write memory "
112                        "tgid=%u, vaddr=%08lx!\n", task->tgid, uinst->vaddr);
113         }
114 }
115 EXPORT_SYMBOL_GPL(uinst_info_disarm);
116
117 /*
118  * Keep all fields in the uprobe consistent
119  */
120 static inline void copy_uprobe(struct kprobe *old_p, struct kprobe *p)
121 {
122         memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
123         memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
124 }
125
126 /*
127  * Aggregate handlers for multiple uprobes support - these handlers
128  * take care of invoking the individual uprobe handlers on p->list
129  */
130 static int aggr_pre_uhandler(struct kprobe *p, struct pt_regs *regs)
131 {
132         struct kprobe *kp;
133         int ret;
134
135         list_for_each_entry_rcu(kp, &p->list, list) {
136                 if (kp->pre_handler) {
137                         ret = kp->pre_handler(kp, regs);
138                         if (ret)
139                                 return ret;
140                 }
141         }
142
143         return 0;
144 }
145
146 static void aggr_post_uhandler(struct kprobe *p, struct pt_regs *regs,
147                                unsigned long flags)
148 {
149         struct kprobe *kp;
150
151         list_for_each_entry_rcu(kp, &p->list, list) {
152                 if (kp->post_handler)
153                         kp->post_handler(kp, regs, flags);
154         }
155 }
156
157 static int aggr_fault_uhandler(struct kprobe *p,
158                                struct pt_regs *regs,
159                                int trapnr)
160 {
161         return 0;
162 }
163
164 static int aggr_break_uhandler(struct kprobe *p, struct pt_regs *regs)
165 {
166         return 0;
167 }
168
169 /*
170  * Add the new probe to old_p->list. Fail if this is the
171  * second ujprobe at the address - two ujprobes can't coexist
172  */
173 static int add_new_uprobe(struct kprobe *old_p, struct kprobe *p)
174 {
175         if (p->break_handler) {
176                 if (old_p->break_handler)
177                         return -EEXIST;
178
179                 list_add_tail_rcu(&p->list, &old_p->list);
180                 old_p->break_handler = aggr_break_uhandler;
181         } else {
182                 list_add_rcu(&p->list, &old_p->list);
183         }
184
185         if (p->post_handler && !old_p->post_handler)
186                 old_p->post_handler = aggr_post_uhandler;
187
188         return 0;
189 }
190
191 /*
192  * Fill in the required fields of the "manager uprobe". Replace the
193  * earlier uprobe in the hlist with the manager uprobe
194  */
195 static inline void add_aggr_uprobe(struct kprobe *ap, struct kprobe *p)
196 {
197         copy_uprobe(p, ap);
198
199         ap->addr = p->addr;
200         ap->pre_handler = aggr_pre_uhandler;
201         ap->fault_handler = aggr_fault_uhandler;
202
203         if (p->post_handler)
204                 ap->post_handler = aggr_post_uhandler;
205
206         if (p->break_handler)
207                 ap->break_handler = aggr_break_uhandler;
208
209         INIT_LIST_HEAD(&ap->list);
210         list_add_rcu(&p->list, &ap->list);
211
212         hlist_replace_rcu(&p->hlist, &ap->hlist);
213 }
214
215 /*
216  * This is the second or subsequent uprobe at the address - handle
217  * the intricacies
218  */
219 static int register_aggr_uprobe(struct kprobe *old_p, struct kprobe *p)
220 {
221         int ret = 0;
222         struct kprobe *ap;
223
224         if (old_p->pre_handler == aggr_pre_uhandler) {
225                 copy_uprobe(old_p, p);
226                 ret = add_new_uprobe(old_p, p);
227         } else {
228                 struct uprobe *uap = kzalloc(sizeof(*uap), GFP_KERNEL);
229                 if (!uap)
230                         return -ENOMEM;
231
232                 uap->task = kp2up(p)->task;
233                 ap = up2kp(uap);
234                 add_aggr_uprobe(ap, old_p);
235                 copy_uprobe(ap, p);
236                 ret = add_new_uprobe(ap, p);
237         }
238
239         return ret;
240 }
241
242 static int arm_uprobe(struct uprobe *p)
243 {
244         return arch_arm_uprobe(p);
245 }
246
247 /**
248  * @brief Disarms uprobe.
249  *
250  * @param p Pointer to the uprobe's kprobe.
251  * @param task Pointer to the target task.
252  * @return Void.
253  */
254 void disarm_uprobe(struct kprobe *p, struct task_struct *task)
255 {
256         arch_disarm_uprobe(p, task);
257 }
258 EXPORT_SYMBOL_GPL(disarm_uprobe);
259
260 static void init_uprobes_insn_slots(void)
261 {
262         int i;
263         for (i = 0; i < UPROBE_TABLE_SIZE; ++i)
264                 INIT_HLIST_HEAD(&slot_table[i]);
265 }
266
267 static void init_uprobe_table(void)
268 {
269         int i;
270         for (i = 0; i < UPROBE_TABLE_SIZE; ++i)
271                 INIT_HLIST_HEAD(&uprobe_table[i]);
272 }
273
274 static void init_uretprobe_inst_table(void)
275 {
276         int i;
277         for (i = 0; i < UPROBE_TABLE_SIZE; ++i)
278                 INIT_HLIST_HEAD(&uretprobe_inst_table[i]);
279 }
280
281 /**
282  * @brief Gets uprobe's kprobe.
283  *
284  * @param addr Probe's address.
285  * @param tgid Probes's thread group ID.
286  * @return Pointer to the kprobe on success,\n
287  * NULL otherwise.
288  */
289 struct kprobe *get_ukprobe(void *addr, pid_t tgid)
290 {
291         struct hlist_head *head;
292         struct kprobe *p;
293         DECLARE_NODE_PTR_FOR_HLIST(node);
294
295         head = &uprobe_table[hash_ptr(addr, UPROBE_HASH_BITS)];
296         swap_hlist_for_each_entry_rcu(p, node, head, hlist) {
297                 if (p->addr == addr && kp2up(p)->task->tgid == tgid)
298                         return p;
299         }
300
301         return NULL;
302 }
303
304 /**
305  * @brief Adds uprobe to hlist when trampoline have been made.
306  *
307  * @param p Pointer to the uprobe's kprobe.
308  * @return Void.
309  */
310 void add_uprobe_table(struct kprobe *p)
311 {
312         write_lock(&st_lock);
313         hlist_add_head(&p->is_hlist,
314                        &slot_table[hash_ptr(p->ainsn.insn, UPROBE_HASH_BITS)]);
315         write_unlock(&st_lock);
316 }
317
318 static void del_uprobe_table(struct kprobe *p)
319 {
320         write_lock(&st_lock);
321         if (!hlist_unhashed(&p->is_hlist))
322                 hlist_del(&p->is_hlist);
323         write_unlock(&st_lock);
324 }
325
326 /**
327  * @brief Gets kprobe by insn slot.
328  *
329  * @param addr Probe's address.
330  * @param tgit Probe's thread group ID.
331  * @param regs Pointer to CPU registers data.
332  * @return Pointer to the kprobe on success,\n
333  * NULL otherwise.
334  */
335 struct kprobe *get_ukprobe_by_insn_slot(void *addr,
336                                         pid_t tgid,
337                                         struct pt_regs *regs)
338 {
339         struct hlist_head *head;
340         struct kprobe *p;
341         DECLARE_NODE_PTR_FOR_HLIST(node);
342
343         read_lock(&st_lock);
344         head = &slot_table[hash_ptr(addr, UPROBE_HASH_BITS)];
345         swap_hlist_for_each_entry(p, node, head, is_hlist) {
346                 if (p->ainsn.insn == addr && kp2up(p)->task->tgid == tgid) {
347                         read_unlock(&st_lock);
348                         return p;
349                 }
350         }
351         read_unlock(&st_lock);
352
353         return NULL;
354 }
355
356
357 static void remove_uprobe(struct uprobe *up)
358 {
359         del_uprobe_table(&up->kp);
360         arch_remove_uprobe(up);
361 }
362
363 static struct hlist_head *uretprobe_inst_table_head(void *hash_key)
364 {
365         return &uretprobe_inst_table[hash_ptr(hash_key, UPROBE_HASH_BITS)];
366 }
367
368 /* Called with uretprobe_lock held */
369 static void add_urp_inst(struct uretprobe_instance *ri)
370 {
371         /*
372          * Remove rp inst off the free list -
373          * Add it back when probed function returns
374          */
375         hlist_del(&ri->uflist);
376
377         /* Add rp inst onto table */
378         INIT_HLIST_NODE(&ri->hlist);
379         hlist_add_head(&ri->hlist, uretprobe_inst_table_head(ri->task->mm));
380
381         /* Also add this rp inst to the used list. */
382         INIT_HLIST_NODE(&ri->uflist);
383         hlist_add_head(&ri->uflist, &ri->rp->used_instances);
384 }
385
386 /* Called with uretprobe_lock held */
387 static void recycle_urp_inst(struct uretprobe_instance *ri)
388 {
389         if (ri->rp) {
390                 hlist_del(&ri->hlist);
391                 /* remove rp inst off the used list */
392                 hlist_del(&ri->uflist);
393                 /* put rp inst back onto the free list */
394                 INIT_HLIST_NODE(&ri->uflist);
395                 hlist_add_head(&ri->uflist, &ri->rp->free_instances);
396         }
397 }
398
399 /* Called with uretprobe_lock held */
400 static struct uretprobe_instance *get_used_urp_inst(struct uretprobe *rp)
401 {
402         struct uretprobe_instance *ri;
403         DECLARE_NODE_PTR_FOR_HLIST(node);
404
405         swap_hlist_for_each_entry(ri, node, &rp->used_instances, uflist) {
406                 return ri;
407         }
408
409         return NULL;
410 }
411
412 /**
413  * @brief Gets free uretprobe instanse for the specified uretprobe without
414  * allocation. Called with uretprobe_lock held.
415  *
416  * @param rp Pointer to the uretprobe.
417  * @return Pointer to the uretprobe_instance on success,\n
418  * NULL otherwise.
419  */
420 struct uretprobe_instance *get_free_urp_inst_no_alloc(struct uretprobe *rp)
421 {
422         struct uretprobe_instance *ri;
423         DECLARE_NODE_PTR_FOR_HLIST(node);
424
425         swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
426                 return ri;
427         }
428
429         return NULL;
430 }
431
432 /* Called with uretprobe_lock held */
433 static void free_urp_inst(struct uretprobe *rp)
434 {
435         struct uretprobe_instance *ri;
436         while ((ri = get_free_urp_inst_no_alloc(rp)) != NULL) {
437                 hlist_del(&ri->uflist);
438                 kfree(ri);
439         }
440 }
441
442 #define COMMON_URP_NR 10
443
444 static int alloc_nodes_uretprobe(struct uretprobe *rp)
445 {
446         int alloc_nodes;
447         struct uretprobe_instance *inst;
448         int i;
449
450 #if 1 /* def CONFIG_PREEMPT */
451         rp->maxactive += max(COMMON_URP_NR, 2 * NR_CPUS);
452 #else
453         rp->maxacpptive += NR_CPUS;
454 #endif
455         alloc_nodes = COMMON_URP_NR;
456
457         for (i = 0; i < alloc_nodes; ++i) {
458                 inst = kmalloc(sizeof(*inst) + rp->data_size, GFP_ATOMIC);
459                 if (inst == NULL) {
460                         free_urp_inst(rp);
461                         return -ENOMEM;
462                 }
463                 INIT_HLIST_NODE(&inst->uflist);
464                 hlist_add_head(&inst->uflist, &rp->free_instances);
465         }
466
467         return 0;
468 }
469
470 /* Called with uretprobe_lock held */
471 static struct uretprobe_instance *get_free_urp_inst(struct uretprobe *rp)
472 {
473         struct uretprobe_instance *ri;
474         DECLARE_NODE_PTR_FOR_HLIST(node);
475
476         swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
477                 return ri;
478         }
479
480         if (!alloc_nodes_uretprobe(rp)) {
481                 swap_hlist_for_each_entry(ri, node,
482                                           &rp->free_instances, uflist) {
483                         return ri;
484                 }
485         }
486
487         return NULL;
488 }
489 /* =================================================================== */
490
491 /**
492  * @brief Registers uprobe.
493  *
494  * @param up Pointer to the uprobe to register.
495  * @return 0 on success,\n
496  * negative error code on error.
497  */
498 int swap_register_uprobe(struct uprobe *up)
499 {
500         int ret = 0;
501         struct kprobe *p, *old_p;
502
503         p = &up->kp;
504         if (!p->addr)
505                 return -EINVAL;
506
507         p->ainsn.insn = NULL;
508         p->mod_refcounted = 0;
509         p->nmissed = 0;
510         INIT_LIST_HEAD(&p->list);
511 #ifdef KPROBES_PROFILE
512         p->start_tm.tv_sec = p->start_tm.tv_usec = 0;
513         p->hnd_tm_sum.tv_sec = p->hnd_tm_sum.tv_usec = 0;
514         p->count = 0;
515 #endif
516
517         /* get the first item */
518         old_p = get_ukprobe(p->addr, kp2up(p)->task->tgid);
519         if (old_p) {
520                 struct task_struct *task = up->task;
521
522                 /* TODO: add support many uprobes on address */
523                 printk(KERN_INFO "uprobe on task[%u %u %s] vaddr=%p is there\n",
524                        task->tgid, task->pid, task->comm, p->addr);
525                 ret = -EINVAL;
526                 goto out;
527
528                 ret = register_aggr_uprobe(old_p, p);
529                 DBPRINTF("goto out\n", ret);
530                 goto out;
531         }
532
533         INIT_HLIST_NODE(&p->is_hlist);
534
535         ret = arch_prepare_uprobe(up);
536         if (ret) {
537                 DBPRINTF("goto out\n", ret);
538                 goto out;
539         }
540
541         DBPRINTF("before out ret = 0x%x\n", ret);
542
543         /* TODO: add uprobe (must be in function) */
544         INIT_HLIST_NODE(&p->hlist);
545         hlist_add_head_rcu(&p->hlist,
546                            &uprobe_table[hash_ptr(p->addr, UPROBE_HASH_BITS)]);
547
548         ret = arm_uprobe(up);
549         if (ret) {
550                 hlist_del_rcu(&p->hlist);
551                 synchronize_rcu();
552                 remove_uprobe(up);
553         }
554
555 out:
556         DBPRINTF("out ret = 0x%x\n", ret);
557         return ret;
558 }
559 EXPORT_SYMBOL_GPL(swap_register_uprobe);
560
561 /**
562  * @brief Unregisters uprobe.
563  *
564  * @param up Pointer to the uprobe.
565  * @param disarm Disarm flag. When true uprobe is disarmed.
566  * @return Void.
567  */
568 void __swap_unregister_uprobe(struct uprobe *up, int disarm)
569 {
570         struct kprobe *p, *old_p, *list_p;
571         int cleanup_p;
572
573         p = &up->kp;
574         old_p = get_ukprobe(p->addr, kp2up(p)->task->tgid);
575         if (unlikely(!old_p))
576                 return;
577
578         if (p != old_p) {
579                 list_for_each_entry_rcu(list_p, &old_p->list, list) {
580                         if (list_p == p) {
581                                 /* uprobe p is a valid probe */
582                                 goto valid_p;
583                         }
584                 }
585
586                 return;
587         }
588
589 valid_p:
590         if ((old_p == p) || ((old_p->pre_handler == aggr_pre_uhandler) &&
591             (p->list.next == &old_p->list) && (p->list.prev == &old_p->list))) {
592                 /* Only probe on the hash list */
593                 if (disarm)
594                         disarm_uprobe(&up->kp, up->task);
595
596                 hlist_del_rcu(&old_p->hlist);
597                 cleanup_p = 1;
598         } else {
599                 list_del_rcu(&p->list);
600                 cleanup_p = 0;
601         }
602
603         if (cleanup_p) {
604                 if (p != old_p) {
605                         list_del_rcu(&p->list);
606                         kfree(old_p);
607                 }
608
609                 if (!in_atomic())
610                         synchronize_sched();
611
612                 remove_uprobe(up);
613         } else {
614                 if (p->break_handler)
615                         old_p->break_handler = NULL;
616
617                 if (p->post_handler) {
618                         list_for_each_entry_rcu(list_p, &old_p->list, list) {
619                                 if (list_p->post_handler) {
620                                         cleanup_p = 2;
621                                         break;
622                                 }
623                         }
624
625                         if (cleanup_p == 0)
626                                 old_p->post_handler = NULL;
627                 }
628         }
629 }
630 EXPORT_SYMBOL_GPL(__swap_unregister_uprobe);
631
632 /**
633  * @brief Unregisters uprobe. Main interface function, wrapper for
634  * __swap_unregister_uprobe.
635  *
636  * @param up Pointer to the uprobe.
637  * @return Void.
638  */
639 void swap_unregister_uprobe(struct uprobe *up)
640 {
641         __swap_unregister_uprobe(up, 1);
642 }
643
644 /**
645  * @brief Registers ujprobe.
646  *
647  * @param uj Pointer to the ujprobe function.
648  * @return 0 on success,\n
649  * error code on error.
650  */
651 int swap_register_ujprobe(struct ujprobe *jp)
652 {
653         int ret = 0;
654
655         /* Todo: Verify probepoint is a function entry point */
656         jp->up.kp.pre_handler = setjmp_upre_handler;
657         jp->up.kp.break_handler = longjmp_break_uhandler;
658
659         ret = swap_register_uprobe(&jp->up);
660
661         return ret;
662 }
663 EXPORT_SYMBOL_GPL(swap_register_ujprobe);
664
665 /**
666  * @brief Unregisters ujprobe.
667  *
668  * @param jp Pointer to the ujprobe.
669  * @param disarm Disarm flag, passed to __swap_unregister_uprobe.
670  * @return Void.
671  */
672 void __swap_unregister_ujprobe(struct ujprobe *jp, int disarm)
673 {
674         __swap_unregister_uprobe(&jp->up, disarm);
675 }
676 EXPORT_SYMBOL_GPL(__swap_unregister_ujprobe);
677
678 /**
679  * @brief Unregisters ujprobe. Main interface function, wrapper for
680  * __swap_unregister_ujprobe.
681  *
682  * @param jp Pointer to the jprobe.
683  * @return Void.
684  */
685 void swap_unregister_ujprobe(struct ujprobe *jp)
686 {
687         __swap_unregister_ujprobe(jp, 1);
688 }
689 EXPORT_SYMBOL_GPL(swap_unregister_ujprobe);
690
691 /**
692  * @brief Trampoline uprobe handler.
693  *
694  * @param p Pointer to the uprobe's kprobe.
695  * @param regs Pointer to CPU register data.
696  * @return 1
697  */
698 int trampoline_uprobe_handler(struct kprobe *p, struct pt_regs *regs)
699 {
700         struct uretprobe_instance *ri = NULL;
701         struct kprobe *kp;
702         struct hlist_head *head;
703         unsigned long flags, tramp_addr, orig_ret_addr = 0;
704         struct hlist_node *tmp;
705         DECLARE_NODE_PTR_FOR_HLIST(node);
706
707         tramp_addr = arch_get_trampoline_addr(p, regs);
708         spin_lock_irqsave(&uretprobe_lock, flags);
709
710         head = uretprobe_inst_table_head(current->mm);
711
712         /*
713          * It is possible to have multiple instances associated with a given
714          * task either because an multiple functions in the call path
715          * have a return probe installed on them, and/or more then one
716          * return probe was registered for a target function.
717          *
718          * We can handle this because:
719          *     - instances are always inserted at the head of the list
720          *     - when multiple return probes are registered for the same
721          *       function, the first instance's ret_addr will point to the
722          *       real return address, and all the rest will point to
723          *       uretprobe_trampoline
724          */
725         swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
726                 if (ri->task != current) {
727                         /* another task is sharing our hash bucket */
728                         continue;
729                 }
730
731                 kp = NULL;
732                 if (ri->rp) {
733                         kp = up2kp(&ri->rp->up);
734
735                         if (ri->rp->handler)
736                                 ri->rp->handler(ri, regs);
737                 }
738
739                 orig_ret_addr = (unsigned long)ri->ret_addr;
740                 recycle_urp_inst(ri);
741
742                 if ((orig_ret_addr != tramp_addr && kp == p) || kp == NULL) {
743                         /*
744                          * This is the real return address. Any other
745                          * instances associated with this task are for
746                          * other calls deeper on the call stack
747                          */
748                         break;
749                 }
750         }
751
752         spin_unlock_irqrestore(&uretprobe_lock, flags);
753         /* orig_ret_addr is NULL when there is no need to restore anything
754          * (all the magic is performed inside handler) */
755         if (likely(orig_ret_addr))
756                 arch_set_orig_ret_addr(orig_ret_addr, regs);
757
758         return 1;
759 }
760
761 static int pre_handler_uretprobe(struct kprobe *p, struct pt_regs *regs)
762 {
763         struct uprobe *up = container_of(p, struct uprobe, kp);
764         struct uretprobe *rp = container_of(up, struct uretprobe, up);
765 #ifdef CONFIG_ARM
766         int noret = thumb_mode(regs) ? rp->thumb_noret : rp->arm_noret;
767 #endif
768         struct uretprobe_instance *ri;
769         unsigned long flags;
770
771 #ifdef CONFIG_ARM
772         if (noret)
773                 return 0;
774 #endif
775
776         /* TODO: consider to only swap the
777          * RA after the last pre_handler fired */
778         spin_lock_irqsave(&uretprobe_lock, flags);
779
780         /* TODO: test - remove retprobe after func entry but before its exit */
781         ri = get_free_urp_inst(rp);
782         if (ri != NULL) {
783                 int ret;
784
785                 ri->rp = rp;
786                 ri->task = current;
787 #ifdef CONFIG_ARM
788                 ri->preload_thumb = 0;
789 #endif
790
791                 if (rp->entry_handler)
792                         rp->entry_handler(ri, regs);
793
794                 ret = arch_prepare_uretprobe(ri, regs);
795                 add_urp_inst(ri);
796                 if (ret) {
797                         recycle_urp_inst(ri);
798                         ++rp->nmissed;
799                 }
800         } else {
801                 ++rp->nmissed;
802         }
803
804         spin_unlock_irqrestore(&uretprobe_lock, flags);
805
806         return 0;
807 }
808
809 /**
810  * @brief Registers uretprobe.
811  *
812  * @param rp Pointer to the uretprobe.
813  * @return 0 on success,\n
814  * negative error code on error.
815  */
816 int swap_register_uretprobe(struct uretprobe *rp)
817 {
818         int i, ret = 0;
819         struct uretprobe_instance *inst;
820
821         DBPRINTF("START\n");
822
823         rp->up.kp.pre_handler = pre_handler_uretprobe;
824         rp->up.kp.post_handler = NULL;
825         rp->up.kp.fault_handler = NULL;
826         rp->up.kp.break_handler = NULL;
827
828         /* Pre-allocate memory for max kretprobe instances */
829         if (rp->maxactive <= 0) {
830 #if 1 /* def CONFIG_PREEMPT */
831                 rp->maxactive = max(10, 2 * NR_CPUS);
832 #else
833                 rp->maxactive = NR_CPUS;
834 #endif
835         }
836
837         INIT_HLIST_HEAD(&rp->used_instances);
838         INIT_HLIST_HEAD(&rp->free_instances);
839
840         for (i = 0; i < rp->maxactive; i++) {
841                 inst = kmalloc(sizeof(*inst) + rp->data_size, GFP_KERNEL);
842                 if (inst == NULL) {
843                         free_urp_inst(rp);
844                         return -ENOMEM;
845                 }
846
847                 INIT_HLIST_NODE(&inst->uflist);
848                 hlist_add_head(&inst->uflist, &rp->free_instances);
849         }
850
851         rp->nmissed = 0;
852
853         /* Establish function entry probe point */
854         ret = swap_register_uprobe(&rp->up);
855         if (ret)
856                 return ret;
857
858         arch_opcode_analysis_uretprobe(rp);
859
860         return 0;
861 }
862 EXPORT_SYMBOL_GPL(swap_register_uretprobe);
863
864 /**
865  * @brief Unregisters uretprobe.
866  *
867  * @param rp Pointer to the ureprobe.
868  * @param disarm Disarm flag, passed to __swap_unregister_uprobe
869  * @return Void.
870  */
871 void __swap_unregister_uretprobe(struct uretprobe *rp, int disarm)
872 {
873         unsigned long flags;
874         struct uretprobe_instance *ri;
875
876         __swap_unregister_uprobe(&rp->up, disarm);
877
878         spin_lock_irqsave(&uretprobe_lock, flags);
879         while ((ri = get_used_urp_inst(rp)) != NULL) {
880                 bool is_current = ri->task == current;
881
882                 if (is_current)
883                         spin_unlock_irqrestore(&uretprobe_lock, flags);
884
885                 /* FIXME: arch_disarm_urp_inst() for no current context */
886                 if (arch_disarm_urp_inst(ri, ri->task, 0) != 0)
887                         printk(KERN_INFO "%s (%d/%d): "
888                                "cannot disarm urp instance (%08lx)\n",
889                                ri->task->comm, ri->task->tgid, ri->task->pid,
890                                (unsigned long)rp->up.kp.addr);
891
892                 if (is_current)
893                         spin_lock_irqsave(&uretprobe_lock, flags);
894
895                 recycle_urp_inst(ri);
896         }
897         while ((ri = get_used_urp_inst(rp)) != NULL) {
898                 ri->rp = NULL;
899                 hlist_del(&ri->uflist);
900         }
901         spin_unlock_irqrestore(&uretprobe_lock, flags);
902
903         free_urp_inst(rp);
904 }
905 EXPORT_SYMBOL_GPL(__swap_unregister_uretprobe);
906
907 /**
908  * @brief Unregistets uretprobe. Main interface function, wrapper for
909  * __swap_unregister_uretprobe.
910  *
911  * @param rp Pointer to the uretprobe.
912  * @return Void.
913  */
914 void swap_unregister_uretprobe(struct uretprobe *rp)
915 {
916         __swap_unregister_uretprobe(rp, 1);
917 }
918 EXPORT_SYMBOL_GPL(swap_unregister_uretprobe);
919
920 /**
921  * @brief Unregisters all uprobes for task's thread group ID.
922  *
923  * @param task Pointer to the task_struct
924  * @return Void.
925  */
926 void swap_unregister_all_uprobes(struct task_struct *task)
927 {
928         struct hlist_head *head;
929         struct kprobe *p;
930         int i;
931         struct hlist_node *tnode;
932         DECLARE_NODE_PTR_FOR_HLIST(node);
933
934         for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
935                 head = &uprobe_table[i];
936                 swap_hlist_for_each_entry_safe(p, node, tnode, head, hlist) {
937                         if (kp2up(p)->task->tgid == task->tgid) {
938                                 struct uprobe *up =
939                                         container_of(p, struct uprobe, kp);
940                                 printk(KERN_INFO "%s: delete uprobe at %p[%lx]"
941                                        " for %s/%d\n", __func__, p->addr,
942                                        (unsigned long)p->opcode,
943                                        task->comm, task->pid);
944                                 swap_unregister_uprobe(up);
945                         }
946                 }
947         }
948 }
949 EXPORT_SYMBOL_GPL(swap_unregister_all_uprobes);
950
951 /**
952  * @brief Arch-independent wrapper for arch_ujprobe_return.
953  *
954  * @return Void.
955  */
956 void swap_ujprobe_return(void)
957 {
958         arch_ujprobe_return();
959 }
960 EXPORT_SYMBOL_GPL(swap_ujprobe_return);
961
962
963 static struct urinst_info *urinst_info_create(struct uretprobe_instance *ri)
964 {
965         struct urinst_info *urinst;
966
967         urinst = kmalloc(sizeof(*urinst), GFP_ATOMIC);
968         if (urinst) {
969                 INIT_HLIST_NODE(&urinst->hlist);
970                 urinst->task = ri->task;
971                 urinst->sp = (unsigned long)ri->sp;
972                 urinst->tramp = arch_tramp_by_ri(ri);
973                 urinst->ret_addr = (unsigned long)ri->ret_addr;
974         } else {
975                 pr_err("Cannot allocate memory for urinst\n");
976         }
977
978         return urinst;
979 }
980
981 static void urinst_info_destroy(struct urinst_info *urinst)
982 {
983         kfree(urinst);
984 }
985
986 static void urinst_info_disarm(struct urinst_info *urinst, struct task_struct *task)
987 {
988         struct uretprobe_instance ri;
989         unsigned long tramp = urinst->tramp;
990
991         /* set necessary data*/
992         ri.task = urinst->task;
993         ri.sp = (kprobe_opcode_t *)urinst->sp;
994         ri.ret_addr = (kprobe_opcode_t *)urinst->ret_addr;
995
996         arch_disarm_urp_inst(&ri, task, tramp);
997 }
998
999 void urinst_info_get_current_hlist(struct hlist_head *head, bool recycle)
1000 {
1001         unsigned long flags;
1002         struct task_struct *task = current;
1003         struct uretprobe_instance *ri;
1004         struct hlist_head *hhead;
1005         struct hlist_node *n;
1006         struct hlist_node *last = NULL;
1007         DECLARE_NODE_PTR_FOR_HLIST(node);
1008
1009         spin_lock_irqsave(&uretprobe_lock, flags);
1010         hhead = uretprobe_inst_table_head(task->mm);
1011         swap_hlist_for_each_entry_safe(ri, node, n, hhead, hlist) {
1012                 if (task == ri->task) {
1013                         struct urinst_info *urinst;
1014
1015                         urinst = urinst_info_create(ri);
1016                         if (urinst) {
1017                                 if (last)
1018                                         hlist_add_after(last, &urinst->hlist);
1019                                 else
1020                                         hlist_add_head(&urinst->hlist, head);
1021
1022                                 last = &urinst->hlist;
1023                         }
1024
1025                         if (recycle)
1026                                 recycle_urp_inst(ri);
1027                 }
1028         }
1029         spin_unlock_irqrestore(&uretprobe_lock, flags);
1030 }
1031 EXPORT_SYMBOL_GPL(urinst_info_get_current_hlist);
1032
1033 void urinst_info_put_current_hlist(struct hlist_head *head,
1034                                   struct task_struct *task)
1035 {
1036         struct urinst_info *urinst;
1037         struct hlist_node *tmp;
1038         DECLARE_NODE_PTR_FOR_HLIST(node);
1039
1040         swap_hlist_for_each_entry_safe(urinst, node, tmp, head, hlist) {
1041                 /* check on disarm */
1042                 if (task)
1043                         urinst_info_disarm(urinst, task);
1044
1045                 hlist_del(&urinst->hlist);
1046                 urinst_info_destroy(urinst);
1047         }
1048 }
1049 EXPORT_SYMBOL_GPL(urinst_info_put_current_hlist);
1050
1051
1052 static int once(void)
1053 {
1054         init_uprobe_table();
1055         init_uprobes_insn_slots();
1056         init_uretprobe_inst_table();
1057
1058         return 0;
1059 }
1060
1061 SWAP_LIGHT_INIT_MODULE(once, swap_arch_init_uprobes, swap_arch_exit_uprobes,
1062                        NULL, NULL);
1063
1064 MODULE_LICENSE("GPL");