4 #include <linux/version.h>
5 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
6 #include <linux/config.h>
11 #include <linux/hash.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/moduleloader.h>
16 #include <linux/kallsyms.h>
17 //#include <linux/freezer.h>
18 #include <linux/seq_file.h>
19 #ifdef CONFIG_DEBUG_FS
20 #include <linux/debugfs.h>
22 #include <asm-generic/sections.h>
23 #include <asm/cacheflush.h>
24 #include <asm/errno.h>
25 #include <linux/spinlock.h>
26 #include <linux/version.h>
27 #include <linux/highmem.h> // kmap_atomic, kunmap_atomic, copy_from_user_page, copy_to_user_page
28 #include <linux/pagemap.h> // page_cache_release
29 #include <linux/vmalloc.h> // vmalloc, vfree
30 #if defined(CONFIG_X86)
31 #include <linux/kdebug.h> // register_die_notifier, unregister_die_notifier
33 #include <linux/hugetlb.h> // follow_hugetlb_page, is_vm_hugetlb_page
37 //#define arch_remove_kprobe(p) do { } while (0)
44 static spinlock_t die_notifier_lock = SPIN_LOCK_UNLOCKED;
46 int src_register_die_notifier(struct notifier_block *nb)
51 spin_lock_irqsave(&die_notifier_lock, flags);
52 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
53 err = atomic_notifier_chain_register(&panic_notifier_list, nb);
55 err = notifier_chain_register(&panic_notifier_list, nb);
57 spin_unlock_irqrestore(&die_notifier_lock, flags);
63 int get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
64 unsigned long start, int len, int write, int force,
65 struct page **pages, struct vm_area_struct **vmas);
67 * hlist_replace_rcu - replace old entry by new one
68 * @old : the element to be replaced
69 * @new : the new element to insert
71 * The @old entry will be replaced with the @new entry atomically.
74 src_hlist_replace_rcu (struct hlist_node *old, struct hlist_node *new)
76 struct hlist_node *next = old->next;
79 new->pprev = old->pprev;
82 new->next->pprev = &new->next;
85 old->pprev = LIST_POISON2;
88 #define KPROBE_HASH_BITS 6
89 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
93 * Some oddball architectures like 64bit powerpc have function descriptors
94 * so this must be overridable.
96 #ifndef kprobe_lookup_name
97 #define kprobe_lookup_name(name, addr) \
98 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
101 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
102 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
103 static struct hlist_head uprobe_insn_slot_table[KPROBE_TABLE_SIZE];
104 static atomic_t kprobe_count;
106 //DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
107 DEFINE_SPINLOCK (kretprobe_lock); /* Protects kretprobe_inst_table */
108 static DEFINE_PER_CPU (struct kprobe *, kprobe_instance) = NULL;
109 unsigned long handled_exceptions;
111 /* We have preemption disabled.. so it is safe to use __ versions */
113 set_kprobe_instance (struct kprobe *kp)
115 __get_cpu_var (kprobe_instance) = kp;
119 reset_kprobe_instance (void)
121 __get_cpu_var (kprobe_instance) = NULL;
125 * This routine is called either:
126 * - under the kprobe_mutex - during kprobe_[un]register()
128 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
130 struct kprobe __kprobes *
131 get_kprobe (void *addr, int tgid, struct task_struct *ctask)
133 struct hlist_head *head;
134 struct hlist_node *node;
135 struct kprobe *p, *retVal = NULL;
136 int ret = 0, uprobe_found;
137 struct page *page = 0, *tpage = 0;
138 struct vm_area_struct *vma = 0;
139 struct task_struct *task = 0;
143 if (ctask && ctask->active_mm)
145 ret = get_user_pages_uprobe (ctask, ctask->active_mm, (unsigned long) addr, 1, 0, 0, &tpage, NULL);
147 DBPRINTF ("get_user_pages for task %d at %p failed!", current->pid, addr);
150 paddr = page_address (tpage);
151 page_cache_release (tpage);
155 // DBPRINTF("task %d has no mm!", ctask->pid);
157 //TODO: test - two processes invokes instrumented function
158 head = &kprobe_table[hash_ptr (addr, KPROBE_HASH_BITS)];
159 hlist_for_each_entry_rcu (p, node, head, hlist)
161 //if looking for kernel probe and this is kernel probe with the same addr OR
162 //if looking for the user space probe and this is user space probe probe with the same addr and pid
163 DBPRINTF ("get_kprobe[%d]: check probe at %p/%p, task %d/%d", nCount, addr, p->addr, tgid, p->tgid);
169 if (!tgid || uprobe_found)
173 DBPRINTF ("get_kprobe[%d]: found user space probe at %p for task %d", nCount, p->addr, p->tgid);
175 DBPRINTF ("get_kprobe[%d]: found kernel probe at %p", nCount, p->addr);
179 else if (tgid != p->tgid)
181 // if looking for the user space probe and this is user space probe
182 // with another addr and pid but with the same offset whithin the page
183 // it could be that it is the same probe (with address from other user space)
184 // we should handle it as usual probe but without notification to user
185 if (paddr && tgid && (((unsigned long) addr & ~PAGE_MASK) == ((unsigned long) p->addr & ~PAGE_MASK))
188 DBPRINTF ("get_kprobe[%d]: found user space probe at %p in task %d. possibly for addr %p in task %d", nCount, p->addr, p->tgid, addr, tgid);
189 // this probe has the same offset in the page
190 // look in the probes for the other pids
191 // get page for user space probe addr
193 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
194 task = find_task_by_pid (p->tgid);
195 #else //lif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
196 task = pid_task(find_pid_ns(p->tgid, &init_pid_ns), PIDTYPE_PID);
199 get_task_struct (task);
203 DBPRINTF ("task for pid %d not found! Dead probe?", p->tgid);
208 if (page_present (task->active_mm, (unsigned long) p->addr))
210 ret = get_user_pages_uprobe (task, task->active_mm, (unsigned long) p->addr, 1, 0, 0, &page, &vma);
212 DBPRINTF ("get_user_pages for task %d at %p failed!", p->tgid, p->addr);
219 DBPRINTF ("task %d has no mm!", task->pid);
222 put_task_struct (task);
225 if (paddr == page_address (page))
227 retVal = p; // we found the probe in other process address space
228 DBPRINTF ("get_kprobe[%d]: found user space probe at %p in task %d for addr %p in task %d", nCount, p->addr, p->tgid, addr, tgid);
229 panic ("user space probe from another process");
231 page_cache_release (page);
238 DBPRINTF ("get_kprobe[%d]: probe %p", nCount, retVal);
242 struct kprobe __kprobes *
243 get_kprobe_by_insn_slot (void *addr, int tgid, struct task_struct *ctask)
245 struct hlist_head *head;
246 struct hlist_node *node;
247 struct kprobe *p, *retVal = NULL;
250 //TODO: test - two processes invokes instrumented function
251 head = &uprobe_insn_slot_table[hash_ptr (addr, KPROBE_HASH_BITS)];
252 hlist_for_each_entry_rcu (p, node, head, is_hlist)
254 //if looking for kernel probe and this is kernel probe with the same addr OR
255 //if looking for the user space probe and this is user space probe probe with the same addr and pid
256 DBPRINTF ("get_kprobe[%d]: check probe at %p/%p, task %d/%d", nCount, addr, p->ainsn.insn, tgid, p->tgid);
257 if (p->ainsn.insn == addr)
262 if (!tgid || uprobe_found)
266 DBPRINTF ("get_kprobe[%d]: found user space probe at %p for task %d", nCount, p->addr, p->tgid);
268 DBPRINTF ("get_kprobe[%d]: found kernel probe at %p", nCount, p->addr);
274 DBPRINTF ("get_kprobe[%d]: probe %p", nCount, retVal);
279 * Aggregate handlers for multiple kprobes support - these handlers
280 * take care of invoking the individual kprobe handlers on p->list
283 aggr_pre_handler (struct kprobe *p, struct pt_regs *regs /*,
284 struct vm_area_struct **vma,
285 struct page **page, unsigned long **kaddr */ )
290 list_for_each_entry_rcu (kp, &p->list, list)
294 set_kprobe_instance (kp);
295 ret = kp->pre_handler (kp, regs);
299 reset_kprobe_instance ();
304 static void __kprobes
305 aggr_post_handler (struct kprobe *p, struct pt_regs *regs, unsigned long flags)
309 list_for_each_entry_rcu (kp, &p->list, list)
311 if (kp->post_handler)
313 set_kprobe_instance (kp);
314 kp->post_handler (kp, regs, flags);
315 reset_kprobe_instance ();
323 aggr_fault_handler (struct kprobe *p, struct pt_regs *regs, int trapnr)
325 struct kprobe *cur = __get_cpu_var (kprobe_instance);
328 * if we faulted "during" the execution of a user specified
329 * probe handler, invoke just that probe's fault handler
331 if (cur && cur->fault_handler)
333 if (cur->fault_handler (cur, regs, trapnr))
341 aggr_break_handler (struct kprobe *p, struct pt_regs *regs /*,
342 struct vm_area_struct **vma,
343 struct page **page, unsigned long **kaddr */ )
345 struct kprobe *cur = __get_cpu_var (kprobe_instance);
347 DBPRINTF ("cur = 0x%p\n", cur);
349 DBPRINTF ("cur = 0x%p cur->break_handler = 0x%p\n", cur, cur->break_handler);
351 if (cur && cur->break_handler)
353 if (cur->break_handler (cur, regs /*, vma, page, kaddr */ ))
356 reset_kprobe_instance ();
360 /* Walks the list and increments nmissed count for multiprobe case */
362 kprobes_inc_nmissed_count (struct kprobe *p)
365 if (p->pre_handler != aggr_pre_handler)
371 list_for_each_entry_rcu (kp, &p->list, list) kp->nmissed++;
376 /* Called with kretprobe_lock held */
377 struct kretprobe_instance __kprobes *
378 get_free_rp_inst (struct kretprobe *rp)
380 struct hlist_node *node;
381 struct kretprobe_instance *ri;
382 hlist_for_each_entry (ri, node, &rp->free_instances, uflist)
387 /* Called with kretprobe_lock held */
388 static struct kretprobe_instance __kprobes *
389 get_used_rp_inst (struct kretprobe *rp)
391 struct hlist_node *node;
392 struct kretprobe_instance *ri;
393 hlist_for_each_entry (ri, node, &rp->used_instances, uflist) return ri;
397 /* Called with kretprobe_lock held */
399 add_rp_inst (struct kretprobe_instance *ri)
402 * Remove rp inst off the free list -
403 * Add it back when probed function returns
405 hlist_del (&ri->uflist);
407 /* Add rp inst onto table */
408 INIT_HLIST_NODE (&ri->hlist);
409 hlist_add_head (&ri->hlist, &kretprobe_inst_table[hash_ptr (ri->task, KPROBE_HASH_BITS)]);
411 /* Also add this rp inst to the used list. */
412 INIT_HLIST_NODE (&ri->uflist);
413 hlist_add_head (&ri->uflist, &ri->rp->used_instances);
416 /* Called with kretprobe_lock held */
418 recycle_rp_inst (struct kretprobe_instance *ri, struct hlist_head *head)
420 /* remove rp inst off the rprobe_inst_table */
421 hlist_del (&ri->hlist);
424 /* remove rp inst off the used list */
425 hlist_del (&ri->uflist);
426 /* put rp inst back onto the free list */
427 INIT_HLIST_NODE (&ri->uflist);
428 hlist_add_head (&ri->uflist, &ri->rp->free_instances);
432 hlist_add_head (&ri->hlist, head);
435 struct hlist_head __kprobes *
436 kretprobe_inst_table_head (struct task_struct *tsk)
438 return &kretprobe_inst_table[hash_ptr (tsk, KPROBE_HASH_BITS)];
442 * This function is called from finish_task_switch when task tk becomes dead,
443 * so that we can recycle any function-return probe instances associated
444 * with this task. These left over instances represent probed functions
445 * that have been called but will never return.
447 /*void __kprobes kprobe_flush_task(struct task_struct *tk)
449 struct kretprobe_instance *ri;
450 struct hlist_head *head, empty_rp;
451 struct hlist_node *node, *tmp;
452 unsigned long flags = 0;
454 INIT_HLIST_HEAD(&empty_rp);
455 spin_lock_irqsave(&kretprobe_lock, flags);
456 head = kretprobe_inst_table_head(tk);
457 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
459 recycle_rp_inst(ri, &empty_rp);
461 spin_unlock_irqrestore(&kretprobe_lock, flags);
463 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
464 hlist_del(&ri->hlist);
470 free_rp_inst (struct kretprobe *rp)
472 struct kretprobe_instance *ri;
473 while ((ri = get_free_rp_inst (rp)) != NULL)
475 hlist_del (&ri->uflist);
481 * Keep all fields in the kprobe consistent
484 copy_kprobe (struct kprobe *old_p, struct kprobe *p)
486 memcpy (&p->opcode, &old_p->opcode, sizeof (kprobe_opcode_t));
487 memcpy (&p->ainsn, &old_p->ainsn, sizeof (struct arch_specific_insn));
488 p->tgid = old_p->tgid;
489 p->ss_addr = old_p->ss_addr;
490 //p->spid = old_p->spid;
494 * Add the new probe to old_p->list. Fail if this is the
495 * second jprobe at the address - two jprobes can't coexist
498 add_new_kprobe (struct kprobe *old_p, struct kprobe *p)
500 if (p->break_handler)
502 if (old_p->break_handler)
504 list_add_tail_rcu (&p->list, &old_p->list);
505 old_p->break_handler = aggr_break_handler;
508 list_add_rcu (&p->list, &old_p->list);
509 if (p->post_handler && !old_p->post_handler)
510 old_p->post_handler = aggr_post_handler;
515 * Fill in the required fields of the "manager kprobe". Replace the
516 * earlier kprobe in the hlist with the manager kprobe
519 add_aggr_kprobe (struct kprobe *ap, struct kprobe *p)
522 flush_insn_slot (ap);
524 ap->pre_handler = aggr_pre_handler;
525 ap->fault_handler = aggr_fault_handler;
527 ap->post_handler = aggr_post_handler;
528 if (p->break_handler)
529 ap->break_handler = aggr_break_handler;
531 INIT_LIST_HEAD (&ap->list);
532 list_add_rcu (&p->list, &ap->list);
534 src_hlist_replace_rcu (&p->hlist, &ap->hlist);
538 * This is the second or subsequent kprobe at the address - handle
542 register_aggr_kprobe (struct kprobe *old_p, struct kprobe *p)
546 DBPRINTF ("start\n");
548 DBPRINTF ("p = %p old_p = %p \n", p, old_p);
549 if (old_p->pre_handler == aggr_pre_handler)
551 DBPRINTF ("aggr_pre_handler \n");
553 copy_kprobe (old_p, p);
554 ret = add_new_kprobe (old_p, p);
558 DBPRINTF ("kzalloc\n");
561 ap = kzalloc (sizeof (struct kprobe), GFP_KERNEL);
563 ap = kmalloc (sizeof (struct kprobe), GFP_KERNEL);
565 memset (ap, 0, sizeof (struct kprobe));
569 add_aggr_kprobe (ap, old_p);
571 DBPRINTF ("ap = %p p = %p old_p = %p \n", ap, p, old_p);
572 ret = add_new_kprobe (ap, p);
578 __register_kprobe (struct kprobe *p, unsigned long called_from, int atomic)
580 struct kprobe *old_p;
581 // struct module *probed_mod;
584 * If we have a symbol_name argument look it up,
585 * and add it to the address. That way the addr
586 * field can either be global or relative to a symbol.
592 kprobe_lookup_name (p->symbol_name, p->addr);
597 DBPRINTF ("p->addr = 0x%p\n", p->addr);
598 p->addr = (kprobe_opcode_t *) (((char *) p->addr) + p->offset);
599 DBPRINTF ("p->addr = 0x%p p = 0x%p\n", p->addr, p);
601 /* if ((!kernel_text_address((unsigned long) p->addr)) ||
602 in_kprobes_functions((unsigned long) p->addr))
605 #ifdef KPROBES_PROFILE
606 p->start_tm.tv_sec = p->start_tm.tv_usec = 0;
607 p->hnd_tm_sum.tv_sec = p->hnd_tm_sum.tv_usec = 0;
610 p->mod_refcounted = 0;
617 // Check are we probing a module
618 if ((probed_mod = module_text_address((unsigned long) p->addr))) {
619 struct module *calling_mod = module_text_address(called_from);
620 // We must allow modules to probe themself and
621 // in this case avoid incrementing the module refcount,
622 // so as to allow unloading of self probing modules.
624 if (calling_mod && (calling_mod != probed_mod)) {
625 if (unlikely(!try_module_get(probed_mod)))
627 p->mod_refcounted = 1;
633 // mutex_lock(&kprobe_mutex);
634 old_p = get_kprobe (p->addr, 0, NULL);
637 ret = register_aggr_kprobe (old_p, p);
639 atomic_inc (&kprobe_count);
643 if ((ret = arch_prepare_kprobe (p)) != 0)
646 DBPRINTF ("before out ret = 0x%x\n", ret);
648 INIT_HLIST_NODE (&p->hlist);
649 hlist_add_head_rcu (&p->hlist, &kprobe_table[hash_ptr (p->addr, KPROBE_HASH_BITS)]);
651 /* if (atomic_add_return(1, &kprobe_count) == \
652 (ARCH_INACTIVE_KPROBE_COUNT + 1))
653 register_page_fault_notifier(&kprobe_page_fault_nb);*/
658 // mutex_unlock(&kprobe_mutex);
660 if (ret && probed_mod)
661 module_put(probed_mod);
663 DBPRINTF ("out ret = 0x%x\n", ret);
669 __register_uprobe (struct kprobe *p, struct task_struct *task, int atomic, unsigned long called_from)
672 struct kprobe *old_p;
677 DBPRINTF ("p->addr = 0x%p p = 0x%p\n", p->addr, p);
679 p->mod_refcounted = 0;
681 #ifdef KPROBES_PROFILE
682 p->start_tm.tv_sec = p->start_tm.tv_usec = 0;
683 p->hnd_tm_sum.tv_sec = p->hnd_tm_sum.tv_usec = 0;
687 // get the first item
688 old_p = get_kprobe (p->addr, p->tgid, NULL);
691 ret = register_aggr_kprobe (old_p, p);
693 atomic_inc (&kprobe_count);
696 if ((ret = arch_prepare_uprobe (p, task, atomic)) != 0)
701 DBPRINTF ("before out ret = 0x%x\n", ret);
703 INIT_HLIST_NODE (&p->hlist);
704 hlist_add_head_rcu (&p->hlist, &kprobe_table[hash_ptr (p->addr, KPROBE_HASH_BITS)]);
706 INIT_HLIST_NODE (&p->is_hlist);
707 hlist_add_head_rcu (&p->is_hlist, &uprobe_insn_slot_table[hash_ptr (p->ainsn.insn, KPROBE_HASH_BITS)]);
709 arch_arm_uprobe (p, task);
711 DBPRINTF ("out ret = 0x%x\n", ret);
717 unregister_uprobe (struct kprobe *p, struct task_struct *task, int atomic)
719 unregister_kprobe (p, task, atomic);
723 register_kprobe (struct kprobe *p, int atomic)
725 return __register_kprobe (p, (unsigned long) __builtin_return_address (0), atomic);
729 unregister_kprobe (struct kprobe *p, struct task_struct *task, int atomic)
731 // struct module *mod;
732 struct kprobe *old_p, *list_p;
733 int cleanup_p, pid = 0;
735 // mutex_lock(&kprobe_mutex);
739 old_p = get_kprobe (p->addr, pid, NULL);
740 DBPRINTF ("unregister_kprobe p=%p old_p=%p", p, old_p);
741 if (unlikely (!old_p))
743 // mutex_unlock(&kprobe_mutex);
748 list_for_each_entry_rcu (list_p, &old_p->list, list)
750 /* kprobe p is a valid probe */
752 // mutex_unlock(&kprobe_mutex);
756 DBPRINTF ("unregister_kprobe valid_p");
757 if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) &&
758 (p->list.next == &old_p->list) && (p->list.prev == &old_p->list)))
760 /* Only probe on the hash list */
761 DBPRINTF ("unregister_kprobe disarm pid=%d", pid);
763 arch_disarm_uprobe (p, task);//vma, page, kaddr);
765 arch_disarm_kprobe (p);
766 hlist_del_rcu (&old_p->hlist);
771 list_del_rcu (&p->list);
774 DBPRINTF ("unregister_kprobe cleanup_p=%d", cleanup_p);
775 // mutex_unlock(&kprobe_mutex);
777 // synchronize_sched();
779 if (p->mod_refcounted &&
780 (mod = module_text_address((unsigned long)p->addr)))
787 list_del_rcu (&p->list);
790 arch_remove_kprobe (p, task);
794 /// mutex_lock(&kprobe_mutex);
795 if (p->break_handler)
796 old_p->break_handler = NULL;
799 list_for_each_entry_rcu (list_p, &old_p->list, list)
801 if (list_p->post_handler)
808 old_p->post_handler = NULL;
810 // mutex_unlock(&kprobe_mutex);
813 /* Call unregister_page_fault_notifier()
814 * if no probes are active
816 // mutex_lock(&kprobe_mutex);
817 /* if (atomic_add_return(-1, &kprobe_count) == \
818 ARCH_INACTIVE_KPROBE_COUNT)
819 unregister_page_fault_notifier(&kprobe_page_fault_nb);*/
820 // mutex_unlock(&kprobe_mutex);
825 register_ujprobe (struct task_struct *task, struct mm_struct *mm, struct jprobe *jp, int atomic)
831 /* Todo: Verify probepoint is a function entry point */
832 jp->kp.pre_handler = setjmp_pre_handler;
833 jp->kp.break_handler = longjmp_break_handler;
835 ret = __register_uprobe (&jp->kp, task, atomic,
836 (unsigned long) __builtin_return_address (0));
845 unregister_ujprobe (struct task_struct *task, struct jprobe *jp, int atomic)
847 unregister_uprobe (&jp->kp, task, atomic);
851 register_jprobe (struct jprobe *jp, int atomic)
853 /* Todo: Verify probepoint is a function entry point */
854 jp->kp.pre_handler = setjmp_pre_handler;
855 jp->kp.break_handler = longjmp_break_handler;
857 return __register_kprobe (&jp->kp, (unsigned long) __builtin_return_address (0), atomic);
861 unregister_jprobe (struct jprobe *jp, int atomic)
863 unregister_kprobe (&jp->kp, 0, atomic);
867 * This kprobe pre_handler is registered with every kretprobe. When probe
868 * hits it will set up the return probe.
871 pre_handler_kretprobe (struct kprobe *p, struct pt_regs *regs /*, struct vm_area_struct **vma,
872 struct page **page, unsigned long **kaddr */ )
874 struct kretprobe *rp = container_of (p, struct kretprobe, kp);
875 unsigned long flags = 0;
876 DBPRINTF ("START\n");
878 /*TODO: consider to only swap the RA after the last pre_handler fired */
879 spin_lock_irqsave (&kretprobe_lock, flags);
881 __arch_prepare_kretprobe (rp, regs);
882 spin_unlock_irqrestore (&kretprobe_lock, flags);
887 struct kretprobe *sched_rp;
890 register_kretprobe (struct kretprobe *rp, int atomic)
893 struct kretprobe_instance *inst;
897 rp->kp.pre_handler = pre_handler_kretprobe;
898 rp->kp.post_handler = NULL;
899 rp->kp.fault_handler = NULL;
900 rp->kp.break_handler = NULL;
904 /* Pre-allocate memory for max kretprobe instances */
905 if(rp->kp.addr == sched_addr)
906 rp->maxactive = 1000;//max (100, 2 * NR_CPUS);
907 else if (rp->maxactive <= 0)
909 #if 1//def CONFIG_PREEMPT
910 rp->maxactive = max (10, 2 * NR_CPUS);
912 rp->maxactive = NR_CPUS;
915 INIT_HLIST_HEAD (&rp->used_instances);
916 INIT_HLIST_HEAD (&rp->free_instances);
917 for (i = 0; i < rp->maxactive; i++)
919 inst = kmalloc (sizeof (struct kretprobe_instance), GFP_KERNEL);
925 INIT_HLIST_NODE (&inst->uflist);
926 hlist_add_head (&inst->uflist, &rp->free_instances);
929 DBPRINTF ("addr=%p, *addr=[%lx %lx %lx]", rp->kp.addr, (unsigned long) (*(rp->kp.addr)), (unsigned long) (*(rp->kp.addr + 1)), (unsigned long) (*(rp->kp.addr + 2)));
931 /* Establish function entry probe point */
932 if ((ret = __register_kprobe (&rp->kp, (unsigned long) __builtin_return_address (0), atomic)) != 0)
935 DBPRINTF ("addr=%p, *addr=[%lx %lx %lx]", rp->kp.addr, (unsigned long) (*(rp->kp.addr)), (unsigned long) (*(rp->kp.addr + 1)), (unsigned long) (*(rp->kp.addr + 2)));
936 if(rp->kp.addr == sched_addr)
943 unregister_kretprobe (struct kretprobe *rp, int atomic)
946 struct kretprobe_instance *ri;
948 //printk("addr=%p, *addr=[%lx %lx %lx]\n", rp->kp.addr,
949 // *(rp->kp.addr), *(rp->kp.addr+1), *(rp->kp.addr+2));
950 unregister_kprobe (&rp->kp, 0, atomic);
952 if(rp->kp.addr == sched_addr)
955 //printk("addr=%p, *addr=[%lx %lx %lx]\n", rp->kp.addr,
956 // *(rp->kp.addr), *(rp->kp.addr+1), *(rp->kp.addr+2));
958 spin_lock_irqsave (&kretprobe_lock, flags);
959 while ((ri = get_used_rp_inst (rp)) != NULL)
962 hlist_del (&ri->uflist);
964 spin_unlock_irqrestore (&kretprobe_lock, flags);
969 register_uretprobe (struct task_struct *task, struct mm_struct *mm, struct kretprobe *rp, int atomic)
972 struct kretprobe_instance *inst;
973 /*struct page *pages[2] = {0, 0};
974 struct vm_area_struct *vmas[2] = {0, 0};
975 unsigned long *kaddrs[2] = {0, 0}; */
981 DBPRINTF ("START\n");
983 rp->kp.pre_handler = pre_handler_kretprobe;
984 rp->kp.post_handler = NULL;
985 rp->kp.fault_handler = NULL;
986 rp->kp.break_handler = NULL;
990 /* Pre-allocate memory for max kretprobe instances */
991 if (rp->maxactive <= 0)
993 #if 1//def CONFIG_PREEMPT
994 rp->maxactive = max (10, 2 * NR_CPUS);
996 rp->maxactive = NR_CPUS;
999 INIT_HLIST_HEAD (&rp->used_instances);
1000 INIT_HLIST_HEAD (&rp->free_instances);
1001 for (i = 0; i < rp->maxactive; i++)
1003 inst = kmalloc (sizeof (struct kretprobe_instance), GFP_KERNEL);
1010 INIT_HLIST_NODE (&inst->uflist);
1011 hlist_add_head (&inst->uflist, &rp->free_instances);
1016 ret = get_user_pages_uprobe (task, mm, (unsigned long) rp->kp.addr, 1, 1, 1, &pages[0], &vmas[0]);
1019 DBPRINTF ("get_user_pages for %p failed!", rp->kp.addr);
1024 kaddrs[0] = kmap_atomic (pages[0], KM_USER0) + ((unsigned long) rp->kp.addr & ~PAGE_MASK);
1026 kaddrs[0] = kmap (pages[0]) + ((unsigned long) rp->kp.addr & ~PAGE_MASK);
1027 // if 2nd instruction is on the 2nd page
1028 if ((((unsigned long) (rp->kp.addr + 1)) & ~PAGE_MASK) == 0)
1030 ret = get_user_pages_uprobe (task, mm, (unsigned long) (rp->kp.addr + 1), 1, 1, 1, &pages[1], &vmas[1]);
1033 DBPRINTF ("get_user_pages for %p failed!", rp->kp.addr + 1);
1038 kaddrs[1] = kmap_atomic (pages[1], KM_USER1) + ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK);
1040 kaddrs[1] = kmap (pages[1]) + ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK);
1044 // 2nd instruction is on the 1st page too
1046 pages[1] = pages[0];
1047 kaddrs[1] = kaddrs[0] + 1;
1050 /* Establish function exit probe point */
1051 if ((ret = arch_prepare_uretprobe (rp, task/*vmas, pages, kaddrs */ )) != 0)
1053 /* Establish function entry probe point */
1054 if ((ret = __register_uprobe (&rp->kp, task, atomic,
1055 (unsigned long) __builtin_return_address (0))) != 0)
1061 arch_arm_uretprobe (rp, task);//vmas[1], pages[1], kaddrs[1]);
1064 set_page_dirty (pages[1]);
1066 set_page_dirty_lock (pages[1]);
1075 kunmap_atomic (kaddrs[0] - ((unsigned long) rp->kp.addr & ~PAGE_MASK), KM_USER0);
1079 page_cache_release (pages[0]);
1081 if ((pages[0] != pages[1]))
1088 kunmap_atomic (kaddrs[1] - ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK), KM_USER1);
1092 page_cache_release (pages[1]);
1095 /*else if( (pages[0] != pages[2]) ){
1098 if (atomic) kunmap_atomic(kaddrs[2], KM_USER1);
1099 else kunmap(pages[2]);
1101 page_cache_release(pages[2]);
1112 static struct kretprobe *__kprobes
1113 clone_kretprobe (struct kretprobe *rp)
1115 struct kprobe *old_p;
1116 struct kretprobe *clone = NULL;
1119 clone = kmalloc (sizeof (struct kretprobe), GFP_KERNEL);
1122 DBPRINTF ("failed to alloc memory for clone probe %p!", rp->kp.addr);
1125 memcpy (clone, rp, sizeof (struct kretprobe));
1126 clone->kp.pre_handler = pre_handler_kretprobe;
1127 clone->kp.post_handler = NULL;
1128 clone->kp.fault_handler = NULL;
1129 clone->kp.break_handler = NULL;
1130 old_p = get_kprobe (rp->kp.addr, rp->kp.tgid, NULL);
1133 ret = register_aggr_kprobe (old_p, &clone->kp);
1139 atomic_inc (&kprobe_count);
1146 unregister_uretprobe (struct task_struct *task, struct kretprobe *rp, int atomic)
1149 unsigned long flags;
1150 struct kretprobe_instance *ri;
1151 struct kretprobe *rp2 = NULL;
1152 /*struct mm_struct *mm;
1153 struct page *pages[2] = {0, 0};
1154 struct vm_area_struct *vmas[2] = {0, 0};
1155 unsigned long *kaddrs[2] = {0, 0}; */
1161 mm = atomic ? task->active_mm : get_task_mm (task);
1164 DBPRINTF ("task %u has no mm!", task->pid);
1170 down_read (&mm->mmap_sem);
1171 ret = get_user_pages_uprobe (task, mm, (unsigned long) rp->kp.addr, 1, 1, 1, &pages[0], &vmas[0]);
1175 DBPRINTF ("get_user_pages for %p failed!", rp->kp.addr);
1179 kaddrs[0] = kmap_atomic (pages[0], KM_USER0) + ((unsigned long) rp->kp.addr & ~PAGE_MASK);
1181 kaddrs[0] = kmap (pages[0]) + ((unsigned long) rp->kp.addr & ~PAGE_MASK);
1182 if ((((unsigned long) (rp->kp.addr + 1)) & ~PAGE_MASK) == 0)
1185 ret = get_user_pages_uprobe (task, mm, (unsigned long) (rp->kp.addr + 1), 1, 1, 1, &pages[1], &vmas[1]);
1188 DBPRINTF ("get_user_pages for %p failed!", rp->kp.addr + 1);
1192 kaddrs[1] = kmap_atomic (pages[1], KM_USER1) + ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK);
1194 kaddrs[1] = kmap (pages[1]) + ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK);
1199 pages[1] = pages[0];
1200 kaddrs[1] = kaddrs[0] + 1;
1204 DBPRINTF ("unregister_uretprobe1 addr %p [%lx %lx]", rp->kp.addr, *kaddrs[0], *kaddrs[1]);
1206 spin_lock_irqsave (&kretprobe_lock, flags);
1207 if (hlist_empty (&rp->used_instances))
1209 // if there are no used retprobe instances (i.e. function is not entered) - disarm retprobe
1210 arch_disarm_uretprobe (rp, task);//vmas[1], pages[1], kaddrs[1]);
1213 set_page_dirty (pages[1]);
1215 set_page_dirty_lock (pages[1]);
1220 rp2 = clone_kretprobe (rp);
1222 DBPRINTF ("unregister_uretprobe addr %p: failed to clone retprobe!", rp->kp.addr);
1225 DBPRINTF ("initiating deferred retprobe deletion addr %p", rp->kp.addr);
1226 printk ("initiating deferred retprobe deletion addr %p\n", rp->kp.addr);
1231 while ((ri = get_used_rp_inst (rp)) != NULL)
1235 hlist_del (&ri->uflist);
1237 spin_unlock_irqrestore (&kretprobe_lock, flags);
1240 unregister_uprobe (&rp->kp, task, atomic);
1241 //DBPRINTF("unregister_uretprobe3 addr %p [%lx %lx]",
1242 // rp->kp.addr, *kaddrs[0], *kaddrs[1]);
1250 kunmap_atomic (kaddrs[0] - ((unsigned long) rp->kp.addr & ~PAGE_MASK), KM_USER0);
1254 page_cache_release (pages[0]);
1256 if (pages[1] && (pages[0] != pages[1]))
1261 kunmap_atomic (kaddrs[1] - ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK), KM_USER1);
1265 page_cache_release (pages[1]);
1269 up_read (&mm->mmap_sem);
1279 unregister_all_uprobes (struct task_struct *task, int atomic)
1281 struct hlist_head *head;
1282 struct hlist_node *node, *tnode;
1286 for(i = 0; i < KPROBE_TABLE_SIZE; i++){
1287 head = &kprobe_table[i];
1288 hlist_for_each_entry_safe (p, node, tnode, head, hlist){
1289 if(p->tgid == task->tgid){
1290 printk("unregister_all_uprobes: delete uprobe at %pf for %s/%d\n", p->addr, task->comm, task->pid);
1291 unregister_uprobe (p, task, atomic);
1295 purge_garbage_uslots(task, atomic);
1299 #define GUP_FLAGS_WRITE 0x1
1300 #define GUP_FLAGS_FORCE 0x2
1301 #define GUP_FLAGS_IGNORE_VMA_PERMISSIONS 0x4
1302 #define GUP_FLAGS_IGNORE_SIGKILL 0x8
1304 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)
1305 static inline int use_zero_page(struct vm_area_struct *vma)
1308 * We don't want to optimize FOLL_ANON for make_pages_present()
1309 * when it tries to page in a VM_LOCKED region. As to VM_SHARED,
1310 * we want to get the page from the page tables to make sure
1311 * that we serialize and update with any other user of that
1314 if (vma->vm_flags & (VM_LOCKED | VM_SHARED))
1317 * And if we have a fault routine, it's not an anonymous region.
1319 return !vma->vm_ops || !vma->vm_ops->fault;
1323 int __get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
1324 unsigned long start, int len, int flags,
1325 struct page **pages, struct vm_area_struct **vmas)
1328 unsigned int vm_flags = 0;
1329 int write = !!(flags & GUP_FLAGS_WRITE);
1330 int force = !!(flags & GUP_FLAGS_FORCE);
1331 int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
1332 int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL);
1337 * Require read or write permissions.
1338 * If 'force' is set, we only require the "MAY" flags.
1340 vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
1341 vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
1345 struct vm_area_struct *vma;
1346 unsigned int foll_flags;
1348 //vma = find_extend_vma(mm, start);
1349 vma = find_vma(mm, start);
1350 if (!vma && in_gate_area(tsk, start)) {
1351 unsigned long pg = start & PAGE_MASK;
1352 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
1358 /* user gate pages are read-only */
1359 if (!ignore && write)
1360 return i ? : -EFAULT;
1362 pgd = pgd_offset_k(pg);
1364 pgd = pgd_offset_gate(mm, pg);
1365 BUG_ON(pgd_none(*pgd));
1366 pud = pud_offset(pgd, pg);
1367 BUG_ON(pud_none(*pud));
1368 pmd = pmd_offset(pud, pg);
1370 return i ? : -EFAULT;
1371 pte = pte_offset_map(pmd, pg);
1372 if (pte_none(*pte)) {
1374 return i ? : -EFAULT;
1377 struct page *page = vm_normal_page(gate_vma, start, *pte);
1392 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1393 (!ignore && !(vm_flags & vma->vm_flags)))
1394 return i ? : -EFAULT;
1396 if (is_vm_hugetlb_page(vma)) {
1397 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1398 i = follow_hugetlb_page(mm, vma, pages, vmas,
1401 i = follow_hugetlb_page(mm, vma, pages, vmas,
1402 &start, &len, i, write);
1407 foll_flags = FOLL_TOUCH;
1409 foll_flags |= FOLL_GET;
1411 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)
1412 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,30)
1413 if (!write && use_zero_page(vma))
1414 foll_flags |= FOLL_ANON;
1421 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)
1423 * If we have a pending SIGKILL, don't keep faulting
1424 * pages and potentially allocating memory, unless
1425 * current is handling munlock--e.g., on exit. In
1426 * that case, we are not allocating memory. Rather,
1427 * we're only unlocking already resident/mapped pages.
1429 if (unlikely(!ignore_sigkill &&
1430 fatal_signal_pending(current)))
1431 return i ? i : -ERESTARTSYS;
1435 foll_flags |= FOLL_WRITE;
1440 DBPRINTF ("pages = %p vma = %p\n", pages, vma);
1441 while (!(page = follow_page(vma, start, foll_flags))) {
1443 ret = handle_mm_fault(mm, vma, start,
1444 foll_flags & FOLL_WRITE);
1446 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1447 if (ret & VM_FAULT_WRITE)
1448 foll_flags &= ~FOLL_WRITE;
1450 switch (ret & ~VM_FAULT_WRITE) {
1451 case VM_FAULT_MINOR:
1454 case VM_FAULT_MAJOR:
1457 case VM_FAULT_SIGBUS:
1458 return i ? i : -EFAULT;
1460 return i ? i : -ENOMEM;
1466 if (ret & VM_FAULT_ERROR) {
1467 if (ret & VM_FAULT_OOM)
1468 return i ? i : -ENOMEM;
1469 else if (ret & VM_FAULT_SIGBUS)
1470 return i ? i : -EFAULT;
1473 if (ret & VM_FAULT_MAJOR)
1479 * The VM_FAULT_WRITE bit tells us that
1480 * do_wp_page has broken COW when necessary,
1481 * even if maybe_mkwrite decided not to set
1482 * pte_write. We can thus safely do subsequent
1483 * page lookups as if they were reads. But only
1484 * do so when looping for pte_write is futile:
1485 * in some cases userspace may also be wanting
1486 * to write to the gotten user page, which a
1487 * read fault here might prevent (a readonly
1488 * page might get reCOWed by userspace write).
1490 if ((ret & VM_FAULT_WRITE) &&
1491 !(vma->vm_flags & VM_WRITE))
1492 foll_flags &= ~FOLL_WRITE;
1500 return i ? i : PTR_ERR(page);
1504 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1505 flush_anon_page(page, start);
1507 flush_anon_page(vma, page, start);
1509 flush_dcache_page(page);
1516 } while (len && start < vma->vm_end);
1521 int get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
1522 unsigned long start, int len, int write, int force,
1523 struct page **pages, struct vm_area_struct **vmas)
1528 flags |= GUP_FLAGS_WRITE;
1530 flags |= GUP_FLAGS_FORCE;
1532 return __get_user_pages_uprobe(tsk, mm,
1538 access_process_vm_atomic (struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
1542 struct mm_struct *mm;
1543 struct vm_area_struct *vma;
1544 void *old_buf = buf;
1546 mm = get_task_mm(tsk);
1550 down_read(&mm->mmap_sem);
1551 /* ignore errors, just check how much was successfully transferred */
1553 int bytes, ret, offset;
1555 struct page *page = NULL;
1557 ret = get_user_pages_uprobe(tsk, mm, addr, 1,
1558 write, 1, &page, &vma);
1561 * Check if this is a VM_IO | VM_PFNMAP VMA, which
1562 * we can access using slightly different code.
1564 #ifdef CONFIG_HAVE_IOREMAP_PROT
1565 vma = find_vma(mm, addr);
1568 if (vma->vm_ops && vma->vm_ops->access)
1569 ret = vma->vm_ops->access(vma, addr, buf,
1577 offset = addr & (PAGE_SIZE-1);
1578 if (bytes > PAGE_SIZE-offset)
1579 bytes = PAGE_SIZE-offset;
1583 copy_to_user_page(vma, page, addr,
1584 maddr + offset, buf, bytes);
1585 set_page_dirty_lock(page);
1587 copy_from_user_page(vma, page, addr,
1588 buf, maddr + offset, bytes);
1591 page_cache_release(page);
1597 up_read(&mm->mmap_sem);
1600 return buf - old_buf;
1604 #ifdef CONFIG_DEBUG_FS
1605 const char *(*__real_kallsyms_lookup) (unsigned long addr, unsigned long *symbolsize, unsigned long *offset, char **modname, char *namebuf);
1607 kallsyms_lookup (unsigned long addr, unsigned long *symbolsize, unsigned long *offset, char **modname, char *namebuf)
1609 return __real_kallsyms_lookup (addr, symbolsize, offset, modname, namebuf);
1612 static void __kprobes
1613 report_probe (struct seq_file *pi, struct kprobe *p, const char *sym, int offset, char *modname)
1617 if (p->pre_handler == pre_handler_kretprobe)
1622 else if (p->pre_handler == setjmp_pre_handler)
1632 seq_printf (pi, "%p %s %s+0x%x %s\n", p->addr, kprobe_type, sym, offset, (modname ? modname : " "));
1634 seq_printf (pi, "%p %s %p\n", p->addr, kprobe_type, p->addr);
1637 static void __kprobes *
1638 kprobe_seq_start (struct seq_file *f, loff_t * pos)
1640 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
1643 static void __kprobes *
1644 kprobe_seq_next (struct seq_file *f, void *v, loff_t * pos)
1647 if (*pos >= KPROBE_TABLE_SIZE)
1652 static void __kprobes
1653 kprobe_seq_stop (struct seq_file *f, void *v)
1662 struct jprobe jprobe;
1663 struct kretprobe retprobe;
1664 unsigned long offset;
1667 static int __kprobes
1668 show_kprobe_addr (struct seq_file *pi, void *v)
1670 struct hlist_head *head;
1671 struct hlist_node *node;
1672 struct kprobe *p, *kp;
1673 const char *sym = NULL;
1674 unsigned int i = *(loff_t *) v;
1675 unsigned long size, offset = 0;
1676 char *modname, namebuf[128];
1678 head = &kprobe_table[i];
1680 hlist_for_each_entry_rcu (p, node, head, hlist)
1683 struct us_proc_ip *up = NULL;
1684 if (p->pre_handler == pre_handler_kretprobe){
1685 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
1686 up = container_of(rp, struct us_proc_ip, retprobe);
1688 else {//if (p->pre_handler == setjmp_pre_handler){
1689 struct jprobe *jp = container_of(p, struct jprobe, kp);
1690 up = container_of(jp, struct us_proc_ip, jprobe);
1694 printk("show_kprobe_addr: %s\n", sym);
1698 sym = kallsyms_lookup ((unsigned long) p->addr, &size, &offset, &modname, namebuf);
1699 if (p->pre_handler == aggr_pre_handler)
1701 list_for_each_entry_rcu (kp, &p->list, list) report_probe (pi, kp, sym, offset, modname);
1704 report_probe (pi, p, sym, offset, modname);
1706 //seq_printf (pi, "handled exceptions %lu\n", handled_exceptions);
1711 static struct seq_operations kprobes_seq_ops = {
1712 .start = kprobe_seq_start,
1713 .next = kprobe_seq_next,
1714 .stop = kprobe_seq_stop,
1715 .show = show_kprobe_addr
1718 static int __kprobes
1719 kprobes_open (struct inode *inode, struct file *filp)
1721 return seq_open (filp, &kprobes_seq_ops);
1724 static struct file_operations debugfs_kprobes_operations = {
1725 .open = kprobes_open,
1727 .llseek = seq_lseek,
1728 .release = seq_release,
1731 #ifdef KPROBES_PROFILE
1732 extern unsigned long nCount;
1733 extern struct timeval probe_enter_diff_sum;
1734 static void __kprobes *
1735 kprobe_prof_seq_start (struct seq_file *f, loff_t * pos)
1737 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
1740 static void __kprobes *
1741 kprobe_prof_seq_next (struct seq_file *f, void *v, loff_t * pos)
1744 if (*pos >= KPROBE_TABLE_SIZE)
1749 static void __kprobes
1750 kprobe_prof_seq_stop (struct seq_file *f, void *v)
1754 static void __kprobes
1755 report_probe_prof (struct seq_file *pi, struct kprobe *p, const char *sym, int offset, char *modname)
1759 if (p->pre_handler == pre_handler_kretprobe)
1764 else if (p->pre_handler == setjmp_pre_handler)
1775 seq_printf (pi, "%p %s %s+0x%x %s %lu.%06ld\n", p->addr, kprobe_type,
1776 sym, offset, (modname ? modname : " "), p->count ? p->hnd_tm_sum.tv_sec / p->count : 0, p->count ? p->hnd_tm_sum.tv_usec / p->count : 0);
1779 seq_printf (pi, "%p %s %p %lu.%06ld\n", p->addr, kprobe_type, p->addr, p->count ? p->hnd_tm_sum.tv_sec / p->count : 0, p->count ? p->hnd_tm_sum.tv_usec / p->count : 0);
1782 static int __kprobes
1783 show_kprobe_prof (struct seq_file *pi, void *v)
1785 struct hlist_head *head;
1786 struct hlist_node *node;
1787 struct kprobe *p; //, *kp;
1788 const char *sym = NULL;
1789 unsigned int i = *(loff_t *) v;
1790 unsigned long size, offset = 0;
1791 char *modname, namebuf[128];
1792 static struct timeval utv, ktv;
1793 static unsigned long ucount, kcount;
1795 head = &kprobe_table[i];
1797 hlist_for_each_entry_rcu (p, node, head, hlist)
1799 sym = kallsyms_lookup ((unsigned long) p->addr, &size, &offset, &modname, namebuf);
1800 /*if (p->pre_handler == aggr_pre_handler) {
1801 list_for_each_entry_rcu(kp, &p->list, list)
1802 report_probe_prof(pi, kp, sym, offset, modname);
1804 report_probe_prof (pi, p, sym, offset, modname);
1809 set_normalized_timeval (&utv, utv.tv_sec + p->hnd_tm_sum.tv_sec, utv.tv_usec + p->hnd_tm_sum.tv_usec);
1814 //seq_printf(pi, "kernel probe handling %lu %lu.%06ld\n",
1815 // p->count, p->hnd_tm_sum.tv_sec, p->hnd_tm_sum.tv_usec);
1816 //seq_printf(pi, "kernel probe handling2 %lu %lu.%06ld\n",
1817 // kcount, ktv.tv_sec, ktv.tv_usec);
1818 set_normalized_timeval (&ktv, ktv.tv_sec + p->hnd_tm_sum.tv_sec, ktv.tv_usec + p->hnd_tm_sum.tv_usec);
1820 //seq_printf(pi, "kernel probe handling3 %lu %lu.%06ld\n",
1821 // kcount, ktv.tv_sec, ktv.tv_usec);
1825 if (i == (KPROBE_TABLE_SIZE - 1))
1827 seq_printf (pi, "Average kernel probe handling %lu.%06ld\n", kcount ? ktv.tv_sec / kcount : 0, kcount ? ktv.tv_usec / kcount : 0);
1828 seq_printf (pi, "Average user probe handling %lu.%06ld\n", ucount ? utv.tv_sec / ucount : 0, ucount ? utv.tv_usec / ucount : 0);
1829 seq_printf (pi, "Average probe period %lu.%06ld\n", nCount ? probe_enter_diff_sum.tv_sec / nCount : 0, nCount ? probe_enter_diff_sum.tv_usec / nCount : 0);
1830 utv.tv_sec = utv.tv_usec = ktv.tv_sec = ktv.tv_usec = 0;
1831 ucount = kcount = 0;
1837 static struct seq_operations kprobes_prof_seq_ops = {
1838 .start = kprobe_prof_seq_start,
1839 .next = kprobe_prof_seq_next,
1840 .stop = kprobe_prof_seq_stop,
1841 .show = show_kprobe_prof
1844 static int __kprobes
1845 kprobes_prof_open (struct inode *inode, struct file *filp)
1847 return seq_open (filp, &kprobes_prof_seq_ops);
1850 static struct file_operations debugfs_kprobes_prof_operations = {
1851 .open = kprobes_prof_open,
1853 .llseek = seq_lseek,
1854 .release = seq_release,
1858 int __kprobes debugfs_kprobe_init (void);
1859 static struct dentry *dbg_dir, *dbg_file;
1860 #ifdef KPROBES_PROFILE
1861 static struct dentry *dbg_file_prof;
1865 debugfs_kprobe_init (void)
1867 //struct dentry *dir, *file;
1869 dbg_dir = debugfs_create_dir ("kprobes", NULL);
1873 dbg_file = debugfs_create_file ("list", 0444, dbg_dir, 0, &debugfs_kprobes_operations);
1876 debugfs_remove (dbg_dir);
1881 #ifdef KPROBES_PROFILE
1882 dbg_file_prof = debugfs_create_file ("prof", 0444, dbg_dir, 0, &debugfs_kprobes_prof_operations);
1885 debugfs_remove (dbg_file);
1886 debugfs_remove (dbg_dir);
1894 //late_initcall(debugfs_kprobe_init);
1895 extern unsigned long (*kallsyms_search) (const char *name);
1896 #endif /* CONFIG_DEBUG_FS */
1898 #if defined(CONFIG_X86)
1899 static struct notifier_block kprobe_exceptions_nb = {
1900 .notifier_call = kprobe_exceptions_notify,
1910 /* FIXME allocate the probe table, currently defined statically */
1911 /* initialize all list heads */
1912 for (i = 0; i < KPROBE_TABLE_SIZE; i++)
1914 INIT_HLIST_HEAD (&kprobe_table[i]);
1915 INIT_HLIST_HEAD (&kretprobe_inst_table[i]);
1916 INIT_HLIST_HEAD (&uprobe_insn_slot_table[i]);
1918 atomic_set (&kprobe_count, 0);
1920 err = arch_init_kprobes ();
1922 DBPRINTF ("init_kprobes: arch_init_kprobes - %d", err);
1923 #if defined(CONFIG_X86)
1925 err = register_die_notifier (&kprobe_exceptions_nb);
1926 DBPRINTF ("init_kprobes: register_die_notifier - %d", err);
1927 #endif // CONFIG_X86
1929 #ifdef CONFIG_DEBUG_FS
1932 __real_kallsyms_lookup = (void *) kallsyms_search ("kallsyms_lookup");
1933 if (!__real_kallsyms_lookup)
1935 DBPRINTF ("kallsyms_lookup is not found! Oops. Where is the kernel?");
1938 err = debugfs_kprobe_init ();
1939 DBPRINTF ("init_kprobes: debugfs_kprobe_init - %d", err);
1941 #endif /* CONFIG_DEBUG_FS */
1949 #ifdef CONFIG_DEBUG_FS
1950 #ifdef KPROBES_PROFILE
1952 debugfs_remove (dbg_file_prof);
1955 debugfs_remove (dbg_file);
1957 debugfs_remove (dbg_dir);
1958 #endif /* CONFIG_DEBUG_FS */
1960 #if defined(CONFIG_X86)
1961 unregister_die_notifier (&kprobe_exceptions_nb);
1962 #endif // CONFIG_X86
1963 arch_exit_kprobes ();
1966 module_init (init_kprobes);
1967 module_exit (exit_kprobes);
1969 EXPORT_SYMBOL_GPL (register_kprobe);
1970 EXPORT_SYMBOL_GPL (unregister_kprobe);
1971 EXPORT_SYMBOL_GPL (register_jprobe);
1972 EXPORT_SYMBOL_GPL (unregister_jprobe);
1973 EXPORT_SYMBOL_GPL (register_ujprobe);
1974 EXPORT_SYMBOL_GPL (unregister_ujprobe);
1975 EXPORT_SYMBOL_GPL (jprobe_return);
1976 EXPORT_SYMBOL_GPL (uprobe_return);
1977 EXPORT_SYMBOL_GPL (register_kretprobe);
1978 EXPORT_SYMBOL_GPL (unregister_kretprobe);
1979 EXPORT_SYMBOL_GPL (register_uretprobe);
1980 EXPORT_SYMBOL_GPL (unregister_uretprobe);
1981 EXPORT_SYMBOL_GPL (unregister_all_uprobes);
1982 EXPORT_SYMBOL_GPL (access_process_vm_atomic);
1983 #if LINUX_VERSION_CODE != KERNEL_VERSION(2,6,23)
1984 EXPORT_SYMBOL_GPL (access_process_vm);
1986 #ifdef KERNEL_HAS_ISPAGEPRESENT
1987 EXPORT_SYMBOL_GPL (is_page_present);
1989 EXPORT_SYMBOL_GPL (page_present);