2 * Dynamic Binary Instrumentation Module based on KProbes
3 * modules/uprobe/swap_uprobes.h
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) Samsung Electronics, 2006-2010
21 * 2008-2009 Alexey Gerenkov <a.gerenkov@samsung.com> User-Space
22 * Probes initial implementation; Support x86/ARM/MIPS for both user and kernel spaces.
23 * 2010 Ekaterina Gorelkina <e.gorelkina@samsung.com>: redesign module for separating core and arch parts
28 #include "swap_uprobes.h"
29 #include <kprobe/dbi_kdebug.h>
31 #include <uprobe/arch/asm/swap_uprobes.h>
33 #include <linux/hash.h>
34 #include <linux/mempolicy.h>
35 #include <linux/module.h>
36 #include <kprobe/swap_slots.h>
37 #include <kprobe/dbi_kprobes_deps.h>
40 UPROBE_HASH_BITS = 10,
41 UPROBE_TABLE_SIZE = (1 << UPROBE_HASH_BITS)
44 struct hlist_head uprobe_insn_slot_table[UPROBE_TABLE_SIZE];
45 struct hlist_head uprobe_table[UPROBE_TABLE_SIZE];
47 DEFINE_SPINLOCK(uretprobe_lock); /* Protects uretprobe_inst_table */
48 static struct hlist_head uretprobe_inst_table[UPROBE_TABLE_SIZE];
50 #define DEBUG_PRINT_HASH_TABLE 0
52 #if DEBUG_PRINT_HASH_TABLE
53 void print_kprobe_hash_table(void)
56 struct hlist_head *head;
58 DECLARE_NODE_PTR_FOR_HLIST(node);
61 for (i = 0; i < KPROBE_TABLE_SIZE; ++i) {
62 head = &kprobe_table[i];
63 swap_hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
64 printk("####### find K tgid=%u, addr=%x\n",
70 void print_kretprobe_hash_table(void)
73 struct hlist_head *head;
75 DECLARE_NODE_PTR_FOR_HLIST(node);
78 for (i = 0; i < KPROBE_TABLE_SIZE; ++i) {
79 head = &kretprobe_inst_table[i];
80 swap_hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
81 printk("####### find KR tgid=%u, addr=%x\n",
87 void print_uprobe_hash_table(void)
90 struct hlist_head *head;
92 DECLARE_NODE_PTR_FOR_HLIST(node);
95 for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
96 head = &uprobe_insn_slot_table[i];
97 swap_hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
98 printk("####### find U tgid=%u, addr=%x\n",
106 * Keep all fields in the uprobe consistent
108 static inline void copy_uprobe(struct kprobe *old_p, struct kprobe *p)
110 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
111 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
113 p->safe_arm = old_p->safe_arm;
114 p->safe_thumb = old_p->safe_thumb;
119 * Aggregate handlers for multiple uprobes support - these handlers
120 * take care of invoking the individual uprobe handlers on p->list
122 static int aggr_pre_uhandler(struct kprobe *p, struct pt_regs *regs)
127 list_for_each_entry_rcu(kp, &p->list, list) {
128 if (kp->pre_handler) {
129 ret = kp->pre_handler(kp, regs);
139 static void aggr_post_uhandler(struct kprobe *p, struct pt_regs *regs, unsigned long flags)
143 list_for_each_entry_rcu(kp, &p->list, list) {
144 if (kp->post_handler) {
145 kp->post_handler(kp, regs, flags);
150 static int aggr_fault_uhandler(struct kprobe *p, struct pt_regs *regs, int trapnr)
155 static int aggr_break_uhandler(struct kprobe *p, struct pt_regs *regs)
161 * Add the new probe to old_p->list. Fail if this is the
162 * second ujprobe at the address - two ujprobes can't coexist
164 static int add_new_uprobe(struct kprobe *old_p, struct kprobe *p)
166 if (p->break_handler) {
167 if (old_p->break_handler) {
171 list_add_tail_rcu(&p->list, &old_p->list);
172 old_p->break_handler = aggr_break_uhandler;
174 list_add_rcu (&p->list, &old_p->list);
177 if (p->post_handler && !old_p->post_handler) {
178 old_p->post_handler = aggr_post_uhandler;
185 * Fill in the required fields of the "manager uprobe". Replace the
186 * earlier uprobe in the hlist with the manager uprobe
188 static inline void add_aggr_uprobe(struct kprobe *ap, struct kprobe *p)
193 ap->pre_handler = aggr_pre_uhandler;
194 ap->fault_handler = aggr_fault_uhandler;
196 if (p->post_handler) {
197 ap->post_handler = aggr_post_uhandler;
200 if (p->break_handler) {
201 ap->break_handler = aggr_break_uhandler;
204 INIT_LIST_HEAD(&ap->list);
205 list_add_rcu(&p->list, &ap->list);
207 hlist_replace_rcu(&p->hlist, &ap->hlist);
211 * This is the second or subsequent uprobe at the address - handle
214 static int register_aggr_uprobe(struct kprobe *old_p, struct kprobe *p)
219 if (old_p->pre_handler == aggr_pre_uhandler) {
220 copy_uprobe(old_p, p);
221 ret = add_new_uprobe(old_p, p);
223 struct uprobe *uap = kzalloc(sizeof(*uap), GFP_KERNEL);
228 uap->task = kp2up(p)->task;
230 add_aggr_uprobe(ap, old_p);
232 ret = add_new_uprobe(ap, p);
238 static void arm_uprobe(struct uprobe *p)
240 kprobe_opcode_t insn = BREAKPOINT_INSTRUCTION;
241 int ret = write_proc_vm_atomic(p->task, (unsigned long)p->kp.addr,
242 &insn, sizeof(insn));
244 panic("arm_uprobe: failed to write memory "
245 "tgid=%u addr=%p!\n", p->task->tgid, p->kp.addr);
249 void disarm_uprobe(struct kprobe *p, struct task_struct *task)
251 int ret = write_proc_vm_atomic(task, (unsigned long)p->addr,
252 &p->opcode, sizeof(p->opcode));
254 panic("disarm_uprobe: failed to write memory "
255 "tgid=%u, addr=%p!\n", task->tgid, p->addr);
258 EXPORT_SYMBOL_GPL(disarm_uprobe);
260 static void init_uprobes_insn_slots(void)
263 for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
264 INIT_HLIST_HEAD(&uprobe_insn_slot_table[i]);
268 static void init_uprobe_table(void)
271 for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
272 INIT_HLIST_HEAD(&uprobe_table[i]);
276 static void init_uretprobe_inst_table(void)
279 for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
280 INIT_HLIST_HEAD (&uretprobe_inst_table[i]);
284 struct kprobe *get_ukprobe(void *addr, pid_t tgid)
286 struct hlist_head *head;
288 DECLARE_NODE_PTR_FOR_HLIST(node);
290 head = &uprobe_table[hash_ptr(addr, UPROBE_HASH_BITS)];
291 swap_hlist_for_each_entry_rcu(p, node, head, hlist) {
292 if (p->addr == addr && kp2up(p)->task->tgid == tgid) {
300 void add_uprobe_table(struct kprobe *p)
302 INIT_HLIST_NODE(&p->is_hlist);
303 hlist_add_head_rcu(&p->is_hlist, &uprobe_insn_slot_table[hash_ptr(p->ainsn.insn, UPROBE_HASH_BITS)]);
306 struct kprobe *get_ukprobe_by_insn_slot(void *addr, pid_t tgid, struct pt_regs *regs)
308 struct hlist_head *head;
310 DECLARE_NODE_PTR_FOR_HLIST(node);
312 /* TODO: test - two processes invokes instrumented function */
313 head = &uprobe_insn_slot_table[hash_ptr(addr, UPROBE_HASH_BITS)];
314 swap_hlist_for_each_entry_rcu(p, node, head, is_hlist) {
315 if (p->ainsn.insn == addr && kp2up(p)->task->tgid == tgid) {
324 static void remove_uprobe(struct uprobe *up)
326 struct kprobe *p = up2kp(up);
328 swap_slot_free(up->sm, p->ainsn.insn);
331 static struct hlist_head *uretprobe_inst_table_head(void *hash_key)
333 return &uretprobe_inst_table[hash_ptr (hash_key, UPROBE_HASH_BITS)];
336 /* Called with uretprobe_lock held */
337 static void add_urp_inst(struct uretprobe_instance *ri)
340 * Remove rp inst off the free list -
341 * Add it back when probed function returns
343 hlist_del(&ri->uflist);
345 /* Add rp inst onto table */
346 INIT_HLIST_NODE(&ri->hlist);
347 hlist_add_head(&ri->hlist, uretprobe_inst_table_head(ri->task->mm));
349 /* Also add this rp inst to the used list. */
350 INIT_HLIST_NODE(&ri->uflist);
351 hlist_add_head(&ri->uflist, &ri->rp->used_instances);
354 /* Called with uretprobe_lock held */
355 static void recycle_urp_inst(struct uretprobe_instance *ri)
358 hlist_del(&ri->hlist);
359 /* remove rp inst off the used list */
360 hlist_del(&ri->uflist);
361 /* put rp inst back onto the free list */
362 INIT_HLIST_NODE(&ri->uflist);
363 hlist_add_head(&ri->uflist, &ri->rp->free_instances);
367 /* Called with uretprobe_lock held */
368 static struct uretprobe_instance *get_used_urp_inst(struct uretprobe *rp)
370 struct uretprobe_instance *ri;
371 DECLARE_NODE_PTR_FOR_HLIST(node);
373 swap_hlist_for_each_entry(ri, node, &rp->used_instances, uflist) {
380 /* Called with uretprobe_lock held */
381 struct uretprobe_instance *get_free_urp_inst_no_alloc(struct uretprobe *rp)
383 struct uretprobe_instance *ri;
384 DECLARE_NODE_PTR_FOR_HLIST(node);
386 swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
393 /* Called with uretprobe_lock held */
394 static void free_urp_inst(struct uretprobe *rp)
396 struct uretprobe_instance *ri;
397 while ((ri = get_free_urp_inst_no_alloc(rp)) != NULL) {
398 hlist_del(&ri->uflist);
403 #define COMMON_URP_NR 10
405 static int alloc_nodes_uretprobe(struct uretprobe *rp)
408 struct uretprobe_instance *inst;
411 #if 1//def CONFIG_PREEMPT
412 rp->maxactive += max(COMMON_URP_NR, 2 * NR_CPUS);
414 rp->maxacpptive += NR_CPUS;
416 alloc_nodes = COMMON_URP_NR;
418 for (i = 0; i < alloc_nodes; ++i) {
419 inst = kmalloc(sizeof(*inst), GFP_ATOMIC);
424 INIT_HLIST_NODE(&inst->uflist);
425 hlist_add_head(&inst->uflist, &rp->free_instances);
431 /* Called with uretprobe_lock held */
432 static struct uretprobe_instance *get_free_urp_inst(struct uretprobe *rp)
434 struct uretprobe_instance *ri;
435 DECLARE_NODE_PTR_FOR_HLIST(node);
437 swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
441 if (!alloc_nodes_uretprobe(rp)) {
442 swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
449 // ===================================================================
451 int dbi_register_uprobe(struct uprobe *up)
454 struct kprobe *p, *old_p;
461 DBPRINTF("p->addr = 0x%p p = 0x%p\n", p->addr, p);
463 // thumb address = address-1;
464 #if defined(CONFIG_ARM)
465 // TODO: must be corrected in 'bundle'
466 if ((unsigned long) p->addr & 0x01) {
467 p->addr = (kprobe_opcode_t *)((unsigned long)p->addr & 0xfffffffe);
471 p->ainsn.insn = NULL;
472 p->mod_refcounted = 0;
474 INIT_LIST_HEAD(&p->list);
475 #ifdef KPROBES_PROFILE
476 p->start_tm.tv_sec = p->start_tm.tv_usec = 0;
477 p->hnd_tm_sum.tv_sec = p->hnd_tm_sum.tv_usec = 0;
481 // get the first item
482 old_p = get_ukprobe(p->addr, kp2up(p)->task->tgid);
485 p->safe_arm = old_p->safe_arm;
486 p->safe_thumb = old_p->safe_thumb;
488 ret = register_aggr_uprobe(old_p, p);
489 DBPRINTF("goto out\n", ret);
493 ret = arch_prepare_uprobe(up);
495 DBPRINTF("goto out\n", ret);
499 DBPRINTF ("before out ret = 0x%x\n", ret);
501 // TODO: add uprobe (must be in function)
502 INIT_HLIST_NODE(&p->hlist);
503 hlist_add_head_rcu(&p->hlist, &uprobe_table[hash_ptr(p->addr, UPROBE_HASH_BITS)]);
507 DBPRINTF("out ret = 0x%x\n", ret);
511 void __dbi_unregister_uprobe(struct uprobe *up, int disarm)
513 struct kprobe *p, *old_p, *list_p;
517 old_p = get_ukprobe(p->addr, kp2up(p)->task->tgid);
518 if (unlikely(!old_p)) {
523 list_for_each_entry_rcu(list_p, &old_p->list, list) {
525 /* uprobe p is a valid probe */
534 if ((old_p == p) || ((old_p->pre_handler == aggr_pre_uhandler) &&
535 (p->list.next == &old_p->list) && (p->list.prev == &old_p->list))) {
536 /* Only probe on the hash list */
538 disarm_uprobe(&up->kp, up->task);
540 hlist_del_rcu(&old_p->hlist);
543 list_del_rcu(&p->list);
549 list_del_rcu(&p->list);
559 if (p->break_handler) {
560 old_p->break_handler = NULL;
563 if (p->post_handler) {
564 list_for_each_entry_rcu (list_p, &old_p->list, list) {
565 if (list_p->post_handler) {
571 if (cleanup_p == 0) {
572 old_p->post_handler = NULL;
577 EXPORT_SYMBOL_GPL(__dbi_unregister_uprobe);
579 void dbi_unregister_uprobe(struct uprobe *up)
581 __dbi_unregister_uprobe(up, 1);
584 int dbi_register_ujprobe(struct ujprobe *jp)
588 /* Todo: Verify probepoint is a function entry point */
589 jp->up.kp.pre_handler = setjmp_upre_handler;
590 jp->up.kp.break_handler = longjmp_break_uhandler;
592 ret = dbi_register_uprobe(&jp->up);
597 void __dbi_unregister_ujprobe(struct ujprobe *jp, int disarm)
599 __dbi_unregister_uprobe(&jp->up, disarm);
601 * Here is an attempt to unregister even those probes that have not been
602 * installed (hence not added to the hlist).
603 * So if we try to delete them from the hlist we will get NULL pointer
604 * dereference error. That is why we check whether this node
605 * really belongs to the hlist.
607 if (!(hlist_unhashed(&jp->up.kp.is_hlist))) {
608 hlist_del_rcu(&jp->up.kp.is_hlist);
611 EXPORT_SYMBOL_GPL(__dbi_unregister_ujprobe);
613 void dbi_unregister_ujprobe(struct ujprobe *jp)
615 __dbi_unregister_ujprobe(jp, 1);
618 int trampoline_uprobe_handler(struct kprobe *p, struct pt_regs *regs)
620 struct uretprobe_instance *ri = NULL;
622 struct hlist_head *head;
623 unsigned long flags, tramp_addr, orig_ret_addr = 0;
624 struct hlist_node *tmp;
625 DECLARE_NODE_PTR_FOR_HLIST(node);
627 tramp_addr = arch_get_trampoline_addr(p, regs);
628 spin_lock_irqsave(&uretprobe_lock, flags);
630 head = uretprobe_inst_table_head(current->mm);
633 * It is possible to have multiple instances associated with a given
634 * task either because an multiple functions in the call path
635 * have a return probe installed on them, and/or more then one
636 * return probe was registered for a target function.
638 * We can handle this because:
639 * - instances are always inserted at the head of the list
640 * - when multiple return probes are registered for the same
641 * function, the first instance's ret_addr will point to the
642 * real return address, and all the rest will point to
643 * uretprobe_trampoline
645 swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
646 if (ri->task != current) {
647 /* another task is sharing our hash bucket */
653 kp = up2kp(&ri->rp->up);
656 ri->rp->handler(ri, regs);
659 orig_ret_addr = (unsigned long)ri->ret_addr;
660 recycle_urp_inst(ri);
662 if ((orig_ret_addr != tramp_addr && kp == p) || kp == NULL) {
664 * This is the real return address. Any other
665 * instances associated with this task are for
666 * other calls deeper on the call stack
672 spin_unlock_irqrestore(&uretprobe_lock, flags);
673 arch_set_orig_ret_addr(orig_ret_addr, regs);
678 static int pre_handler_uretprobe(struct kprobe *p, struct pt_regs *regs)
680 struct uprobe *up = container_of(p, struct uprobe, kp);
681 struct uretprobe *rp = container_of(up, struct uretprobe, up);
683 int noret = thumb_mode(regs) ? rp->thumb_noret : rp->arm_noret;
685 struct uretprobe_instance *ri;
693 /* TODO: consider to only swap the RA after the last pre_handler fired */
694 spin_lock_irqsave(&uretprobe_lock, flags);
696 /* TODO: test - remove retprobe after func entry but before its exit */
697 if ((ri = get_free_urp_inst(rp)) != NULL) {
701 if (rp->entry_handler)
702 rp->entry_handler(ri, regs);
704 arch_prepare_uretprobe(ri, regs);
711 spin_unlock_irqrestore(&uretprobe_lock, flags);
716 int dbi_register_uretprobe(struct uretprobe *rp)
719 struct uretprobe_instance *inst;
721 DBPRINTF ("START\n");
723 rp->up.kp.pre_handler = pre_handler_uretprobe;
724 rp->up.kp.post_handler = NULL;
725 rp->up.kp.fault_handler = NULL;
726 rp->up.kp.break_handler = NULL;
728 /* Pre-allocate memory for max kretprobe instances */
729 if (rp->maxactive <= 0) {
730 #if 1//def CONFIG_PREEMPT
731 rp->maxactive = max(10, 2 * NR_CPUS);
733 rp->maxactive = NR_CPUS;
737 INIT_HLIST_HEAD(&rp->used_instances);
738 INIT_HLIST_HEAD(&rp->free_instances);
740 for (i = 0; i < rp->maxactive; i++) {
741 inst = kmalloc(sizeof(*inst), GFP_ATOMIC);
747 INIT_HLIST_NODE(&inst->uflist);
748 hlist_add_head(&inst->uflist, &rp->free_instances);
753 /* Establish function entry probe point */
754 ret = dbi_register_uprobe(&rp->up);
758 arch_opcode_analysis_uretprobe(rp);
763 int dbi_disarm_urp_inst_for_task(struct task_struct *parent, struct task_struct *task)
766 struct uretprobe_instance *ri;
767 struct hlist_head *head;
768 struct hlist_node *tmp;
769 DECLARE_NODE_PTR_FOR_HLIST(node);
771 spin_lock_irqsave(&uretprobe_lock, flags);
773 head = uretprobe_inst_table_head(parent->mm);
774 swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
775 if (parent == ri->task) {
776 arch_disarm_urp_inst(ri, task);
780 spin_unlock_irqrestore(&uretprobe_lock, flags);
784 EXPORT_SYMBOL_GPL(dbi_disarm_urp_inst_for_task);
786 void dbi_discard_pending_uretprobes(struct task_struct *task)
789 struct uretprobe_instance *ri;
790 struct hlist_head *head;
791 struct hlist_node *tmp;
792 DECLARE_NODE_PTR_FOR_HLIST(node);
794 spin_lock_irqsave(&uretprobe_lock, flags);
796 head = uretprobe_inst_table_head(task->mm);
797 swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
798 if (ri->task == task) {
799 printk("%s (%d/%d): pending urp inst: %08lx\n",
800 task->comm, task->tgid, task->pid,
801 (unsigned long)ri->rp->up.kp.addr);
802 arch_disarm_urp_inst(ri, task);
803 recycle_urp_inst(ri);
807 spin_unlock_irqrestore(&uretprobe_lock, flags);
809 EXPORT_SYMBOL_GPL(dbi_discard_pending_uretprobes);
811 void __dbi_unregister_uretprobe(struct uretprobe *rp, int disarm)
814 struct uretprobe_instance *ri;
816 __dbi_unregister_uprobe(&rp->up, disarm);
817 spin_lock_irqsave (&uretprobe_lock, flags);
819 while ((ri = get_used_urp_inst(rp)) != NULL) {
820 if (arch_disarm_urp_inst(ri, ri->task) != 0)
821 printk("%s (%d/%d): cannot disarm urp instance (%08lx)\n",
822 ri->task->comm, ri->task->tgid, ri->task->pid,
823 (unsigned long)rp->up.kp.addr);
824 recycle_urp_inst(ri);
827 if (hlist_empty(&rp->used_instances)) {
828 struct kprobe *p = &rp->up.kp;
830 if (!(hlist_unhashed(&p->is_hlist))) {
831 hlist_del_rcu(&p->is_hlist);
835 while ((ri = get_used_urp_inst(rp)) != NULL) {
837 hlist_del(&ri->uflist);
840 spin_unlock_irqrestore(&uretprobe_lock, flags);
843 EXPORT_SYMBOL_GPL(__dbi_unregister_uretprobe);
845 void dbi_unregister_uretprobe(struct uretprobe *rp)
847 __dbi_unregister_uretprobe(rp, 1);
850 void dbi_unregister_all_uprobes(struct task_struct *task)
852 struct hlist_head *head;
855 struct hlist_node *tnode;
856 DECLARE_NODE_PTR_FOR_HLIST(node);
858 for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
859 head = &uprobe_table[i];
860 swap_hlist_for_each_entry_safe(p, node, tnode, head, hlist) {
861 if (kp2up(p)->task->tgid == task->tgid) {
862 struct uprobe *up = container_of(p, struct uprobe, kp);
863 printk("dbi_unregister_all_uprobes: delete uprobe at %p[%lx] for %s/%d\n",
864 p->addr, (unsigned long)p->opcode, task->comm, task->pid);
865 dbi_unregister_uprobe(up);
871 void swap_ujprobe_return(void)
873 arch_ujprobe_return();
875 EXPORT_SYMBOL_GPL(swap_ujprobe_return);
877 static int __init init_uprobes(void)
880 init_uprobes_insn_slots();
881 init_uretprobe_inst_table();
883 return swap_arch_init_uprobes();
886 static void __exit exit_uprobes(void)
888 swap_arch_exit_uprobes();
891 EXPORT_SYMBOL_GPL(dbi_register_ujprobe);
892 EXPORT_SYMBOL_GPL(dbi_unregister_ujprobe);
893 EXPORT_SYMBOL_GPL(dbi_register_uretprobe);
894 EXPORT_SYMBOL_GPL(dbi_unregister_uretprobe);
895 EXPORT_SYMBOL_GPL(dbi_unregister_all_uprobes);
897 module_init(init_uprobes);
898 module_exit(exit_uprobes);
900 MODULE_LICENSE ("GPL");