4 #include <linux/version.h>
5 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
6 #include <linux/config.h>
11 #include <linux/hash.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/moduleloader.h>
16 #include <linux/kallsyms.h>
17 //#include <linux/freezer.h>
18 #include <linux/seq_file.h>
19 #ifdef CONFIG_DEBUG_FS
20 #include <linux/debugfs.h>
22 #include <asm-generic/sections.h>
23 #include <asm/cacheflush.h>
24 #include <asm/errno.h>
25 #include <linux/spinlock.h>
26 #include <linux/version.h>
27 #include <linux/highmem.h> // kmap_atomic, kunmap_atomic, copy_from_user_page, copy_to_user_page
28 #include <linux/pagemap.h> // page_cache_release
29 #include <linux/vmalloc.h> // vmalloc, vfree
30 #if defined(CONFIG_X86)
31 #include <linux/kdebug.h> // register_die_notifier, unregister_die_notifier
33 #include <linux/hugetlb.h> // follow_hugetlb_page, is_vm_hugetlb_page
37 //#define arch_remove_kprobe(p) do { } while (0)
44 static spinlock_t die_notifier_lock = SPIN_LOCK_UNLOCKED;
46 int src_register_die_notifier(struct notifier_block *nb)
51 spin_lock_irqsave(&die_notifier_lock, flags);
52 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
53 err = atomic_notifier_chain_register(&panic_notifier_list, nb);
55 err = notifier_chain_register(&panic_notifier_list, nb);
57 spin_unlock_irqrestore(&die_notifier_lock, flags);
63 * hlist_replace_rcu - replace old entry by new one
64 * @old : the element to be replaced
65 * @new : the new element to insert
67 * The @old entry will be replaced with the @new entry atomically.
70 src_hlist_replace_rcu (struct hlist_node *old, struct hlist_node *new)
72 struct hlist_node *next = old->next;
75 new->pprev = old->pprev;
78 new->next->pprev = &new->next;
81 old->pprev = LIST_POISON2;
84 #define KPROBE_HASH_BITS 6
85 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
89 * Some oddball architectures like 64bit powerpc have function descriptors
90 * so this must be overridable.
92 #ifndef kprobe_lookup_name
93 #define kprobe_lookup_name(name, addr) \
94 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
97 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
98 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
99 static struct hlist_head uprobe_insn_slot_table[KPROBE_TABLE_SIZE];
100 static atomic_t kprobe_count;
102 //DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
103 DEFINE_SPINLOCK (kretprobe_lock); /* Protects kretprobe_inst_table */
104 static DEFINE_PER_CPU (struct kprobe *, kprobe_instance) = NULL;
105 unsigned long handled_exceptions;
107 /* We have preemption disabled.. so it is safe to use __ versions */
109 set_kprobe_instance (struct kprobe *kp)
111 __get_cpu_var (kprobe_instance) = kp;
115 reset_kprobe_instance (void)
117 __get_cpu_var (kprobe_instance) = NULL;
121 * This routine is called either:
122 * - under the kprobe_mutex - during kprobe_[un]register()
124 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
126 struct kprobe __kprobes *
127 get_kprobe (void *addr, int tgid, struct task_struct *ctask)
129 struct hlist_head *head;
130 struct hlist_node *node;
131 struct kprobe *p, *retVal = NULL;
132 int ret = 0, uprobe_found;
133 struct page *page = 0, *tpage = 0;
134 struct vm_area_struct *vma = 0;
135 struct task_struct *task = 0;
139 if (ctask && ctask->active_mm)
141 ret = get_user_pages_atomic (ctask, ctask->active_mm, (unsigned long) addr, 1, 0, 0, &tpage, NULL);
143 DBPRINTF ("get_user_pages for task %d at %p failed!", current->pid, addr);
146 paddr = page_address (tpage);
147 page_cache_release (tpage);
151 // DBPRINTF("task %d has no mm!", ctask->pid);
153 //TODO: test - two processes invokes instrumented function
154 head = &kprobe_table[hash_ptr (addr, KPROBE_HASH_BITS)];
155 hlist_for_each_entry_rcu (p, node, head, hlist)
157 //if looking for kernel probe and this is kernel probe with the same addr OR
158 //if looking for the user space probe and this is user space probe probe with the same addr and pid
159 DBPRINTF ("get_kprobe[%d]: check probe at %p/%p, task %d/%d", nCount, addr, p->addr, tgid, p->tgid);
165 if (!tgid || uprobe_found)
169 DBPRINTF ("get_kprobe[%d]: found user space probe at %p for task %d", nCount, p->addr, p->tgid);
171 DBPRINTF ("get_kprobe[%d]: found kernel probe at %p", nCount, p->addr);
175 else if (tgid != p->tgid)
177 // if looking for the user space probe and this is user space probe
178 // with another addr and pid but with the same offset whithin the page
179 // it could be that it is the same probe (with address from other user space)
180 // we should handle it as usual probe but without notification to user
181 if (paddr && tgid && (((unsigned long) addr & ~PAGE_MASK) == ((unsigned long) p->addr & ~PAGE_MASK))
184 DBPRINTF ("get_kprobe[%d]: found user space probe at %p in task %d. possibly for addr %p in task %d", nCount, p->addr, p->tgid, addr, tgid);
185 // this probe has the same offset in the page
186 // look in the probes for the other pids
187 // get page for user space probe addr
189 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
190 task = find_task_by_pid (p->tgid);
191 #else //lif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
192 task = pid_task(find_pid_ns(p->tgid, &init_pid_ns), PIDTYPE_PID);
195 get_task_struct (task);
199 DBPRINTF ("task for pid %d not found! Dead probe?", p->tgid);
204 if (page_present (task->active_mm, (unsigned long) p->addr))
206 ret = get_user_pages_atomic (task, task->active_mm, (unsigned long) p->addr, 1, 0, 0, &page, &vma);
208 DBPRINTF ("get_user_pages for task %d at %p failed!", p->tgid, p->addr);
215 DBPRINTF ("task %d has no mm!", task->pid);
218 put_task_struct (task);
221 if (paddr == page_address (page))
223 retVal = p; // we found the probe in other process address space
224 DBPRINTF ("get_kprobe[%d]: found user space probe at %p in task %d for addr %p in task %d", nCount, p->addr, p->tgid, addr, tgid);
225 panic ("user space probe from another process");
227 page_cache_release (page);
234 DBPRINTF ("get_kprobe[%d]: probe %p", nCount, retVal);
238 struct kprobe __kprobes *
239 get_kprobe_by_insn_slot (void *addr, int tgid, struct task_struct *ctask)
241 struct hlist_head *head;
242 struct hlist_node *node;
243 struct kprobe *p, *retVal = NULL;
246 //TODO: test - two processes invokes instrumented function
247 head = &uprobe_insn_slot_table[hash_ptr (addr, KPROBE_HASH_BITS)];
248 hlist_for_each_entry_rcu (p, node, head, is_hlist)
250 //if looking for kernel probe and this is kernel probe with the same addr OR
251 //if looking for the user space probe and this is user space probe probe with the same addr and pid
252 DBPRINTF ("get_kprobe[%d]: check probe at %p/%p, task %d/%d", nCount, addr, p->ainsn.insn, tgid, p->tgid);
253 if (p->ainsn.insn == addr)
258 if (!tgid || uprobe_found)
262 DBPRINTF ("get_kprobe[%d]: found user space probe at %p for task %d", nCount, p->addr, p->tgid);
264 DBPRINTF ("get_kprobe[%d]: found kernel probe at %p", nCount, p->addr);
270 DBPRINTF ("get_kprobe[%d]: probe %p", nCount, retVal);
275 * Aggregate handlers for multiple kprobes support - these handlers
276 * take care of invoking the individual kprobe handlers on p->list
279 aggr_pre_handler (struct kprobe *p, struct pt_regs *regs /*,
280 struct vm_area_struct **vma,
281 struct page **page, unsigned long **kaddr */ )
286 list_for_each_entry_rcu (kp, &p->list, list)
290 set_kprobe_instance (kp);
291 ret = kp->pre_handler (kp, regs);
295 reset_kprobe_instance ();
300 static void __kprobes
301 aggr_post_handler (struct kprobe *p, struct pt_regs *regs, unsigned long flags)
305 list_for_each_entry_rcu (kp, &p->list, list)
307 if (kp->post_handler)
309 set_kprobe_instance (kp);
310 kp->post_handler (kp, regs, flags);
311 reset_kprobe_instance ();
319 aggr_fault_handler (struct kprobe *p, struct pt_regs *regs, int trapnr)
321 struct kprobe *cur = __get_cpu_var (kprobe_instance);
324 * if we faulted "during" the execution of a user specified
325 * probe handler, invoke just that probe's fault handler
327 if (cur && cur->fault_handler)
329 if (cur->fault_handler (cur, regs, trapnr))
337 aggr_break_handler (struct kprobe *p, struct pt_regs *regs /*,
338 struct vm_area_struct **vma,
339 struct page **page, unsigned long **kaddr */ )
341 struct kprobe *cur = __get_cpu_var (kprobe_instance);
343 DBPRINTF ("cur = 0x%p\n", cur);
345 DBPRINTF ("cur = 0x%p cur->break_handler = 0x%p\n", cur, cur->break_handler);
347 if (cur && cur->break_handler)
349 if (cur->break_handler (cur, regs /*, vma, page, kaddr */ ))
352 reset_kprobe_instance ();
356 /* Walks the list and increments nmissed count for multiprobe case */
358 kprobes_inc_nmissed_count (struct kprobe *p)
361 if (p->pre_handler != aggr_pre_handler)
367 list_for_each_entry_rcu (kp, &p->list, list) kp->nmissed++;
372 /* Called with kretprobe_lock held */
373 struct kretprobe_instance __kprobes *
374 get_free_rp_inst (struct kretprobe *rp)
376 struct hlist_node *node;
377 struct kretprobe_instance *ri;
378 hlist_for_each_entry (ri, node, &rp->free_instances, uflist)
383 /* Called with kretprobe_lock held */
384 static struct kretprobe_instance __kprobes *
385 get_used_rp_inst (struct kretprobe *rp)
387 struct hlist_node *node;
388 struct kretprobe_instance *ri;
389 hlist_for_each_entry (ri, node, &rp->used_instances, uflist) return ri;
393 /* Called with kretprobe_lock held */
395 add_rp_inst (struct kretprobe_instance *ri)
398 * Remove rp inst off the free list -
399 * Add it back when probed function returns
401 hlist_del (&ri->uflist);
403 /* Add rp inst onto table */
404 INIT_HLIST_NODE (&ri->hlist);
405 hlist_add_head (&ri->hlist, &kretprobe_inst_table[hash_ptr (ri->task, KPROBE_HASH_BITS)]);
407 /* Also add this rp inst to the used list. */
408 INIT_HLIST_NODE (&ri->uflist);
409 hlist_add_head (&ri->uflist, &ri->rp->used_instances);
412 /* Called with kretprobe_lock held */
414 recycle_rp_inst (struct kretprobe_instance *ri, struct hlist_head *head)
416 /* remove rp inst off the rprobe_inst_table */
417 hlist_del (&ri->hlist);
420 /* remove rp inst off the used list */
421 hlist_del (&ri->uflist);
422 /* put rp inst back onto the free list */
423 INIT_HLIST_NODE (&ri->uflist);
424 hlist_add_head (&ri->uflist, &ri->rp->free_instances);
428 hlist_add_head (&ri->hlist, head);
431 struct hlist_head __kprobes *
432 kretprobe_inst_table_head (struct task_struct *tsk)
434 return &kretprobe_inst_table[hash_ptr (tsk, KPROBE_HASH_BITS)];
438 * This function is called from finish_task_switch when task tk becomes dead,
439 * so that we can recycle any function-return probe instances associated
440 * with this task. These left over instances represent probed functions
441 * that have been called but will never return.
443 /*void __kprobes kprobe_flush_task(struct task_struct *tk)
445 struct kretprobe_instance *ri;
446 struct hlist_head *head, empty_rp;
447 struct hlist_node *node, *tmp;
448 unsigned long flags = 0;
450 INIT_HLIST_HEAD(&empty_rp);
451 spin_lock_irqsave(&kretprobe_lock, flags);
452 head = kretprobe_inst_table_head(tk);
453 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
455 recycle_rp_inst(ri, &empty_rp);
457 spin_unlock_irqrestore(&kretprobe_lock, flags);
459 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
460 hlist_del(&ri->hlist);
466 free_rp_inst (struct kretprobe *rp)
468 struct kretprobe_instance *ri;
469 while ((ri = get_free_rp_inst (rp)) != NULL)
471 hlist_del (&ri->uflist);
477 * Keep all fields in the kprobe consistent
480 copy_kprobe (struct kprobe *old_p, struct kprobe *p)
482 memcpy (&p->opcode, &old_p->opcode, sizeof (kprobe_opcode_t));
483 memcpy (&p->ainsn, &old_p->ainsn, sizeof (struct arch_specific_insn));
484 p->tgid = old_p->tgid;
485 p->ss_addr = old_p->ss_addr;
486 //p->spid = old_p->spid;
490 * Add the new probe to old_p->list. Fail if this is the
491 * second jprobe at the address - two jprobes can't coexist
494 add_new_kprobe (struct kprobe *old_p, struct kprobe *p)
496 if (p->break_handler)
498 if (old_p->break_handler)
500 list_add_tail_rcu (&p->list, &old_p->list);
501 old_p->break_handler = aggr_break_handler;
504 list_add_rcu (&p->list, &old_p->list);
505 if (p->post_handler && !old_p->post_handler)
506 old_p->post_handler = aggr_post_handler;
511 * Fill in the required fields of the "manager kprobe". Replace the
512 * earlier kprobe in the hlist with the manager kprobe
515 add_aggr_kprobe (struct kprobe *ap, struct kprobe *p)
518 flush_insn_slot (ap);
520 ap->pre_handler = aggr_pre_handler;
521 ap->fault_handler = aggr_fault_handler;
523 ap->post_handler = aggr_post_handler;
524 if (p->break_handler)
525 ap->break_handler = aggr_break_handler;
527 INIT_LIST_HEAD (&ap->list);
528 list_add_rcu (&p->list, &ap->list);
530 src_hlist_replace_rcu (&p->hlist, &ap->hlist);
534 * This is the second or subsequent kprobe at the address - handle
538 register_aggr_kprobe (struct kprobe *old_p, struct kprobe *p)
542 DBPRINTF ("start\n");
544 DBPRINTF ("p = %p old_p = %p \n", p, old_p);
545 if (old_p->pre_handler == aggr_pre_handler)
547 DBPRINTF ("aggr_pre_handler \n");
549 copy_kprobe (old_p, p);
550 ret = add_new_kprobe (old_p, p);
554 DBPRINTF ("kzalloc\n");
557 ap = kzalloc (sizeof (struct kprobe), GFP_KERNEL);
559 ap = kmalloc (sizeof (struct kprobe), GFP_KERNEL);
561 memset (ap, 0, sizeof (struct kprobe));
565 add_aggr_kprobe (ap, old_p);
567 DBPRINTF ("ap = %p p = %p old_p = %p \n", ap, p, old_p);
568 ret = add_new_kprobe (ap, p);
574 __register_kprobe (struct kprobe *p, unsigned long called_from, int atomic)
576 struct kprobe *old_p;
577 // struct module *probed_mod;
580 * If we have a symbol_name argument look it up,
581 * and add it to the address. That way the addr
582 * field can either be global or relative to a symbol.
588 kprobe_lookup_name (p->symbol_name, p->addr);
593 DBPRINTF ("p->addr = 0x%p\n", p->addr);
594 p->addr = (kprobe_opcode_t *) (((char *) p->addr) + p->offset);
595 DBPRINTF ("p->addr = 0x%p p = 0x%p\n", p->addr, p);
597 /* if ((!kernel_text_address((unsigned long) p->addr)) ||
598 in_kprobes_functions((unsigned long) p->addr))
601 #ifdef KPROBES_PROFILE
602 p->start_tm.tv_sec = p->start_tm.tv_usec = 0;
603 p->hnd_tm_sum.tv_sec = p->hnd_tm_sum.tv_usec = 0;
606 p->mod_refcounted = 0;
613 // Check are we probing a module
614 if ((probed_mod = module_text_address((unsigned long) p->addr))) {
615 struct module *calling_mod = module_text_address(called_from);
616 // We must allow modules to probe themself and
617 // in this case avoid incrementing the module refcount,
618 // so as to allow unloading of self probing modules.
620 if (calling_mod && (calling_mod != probed_mod)) {
621 if (unlikely(!try_module_get(probed_mod)))
623 p->mod_refcounted = 1;
629 // mutex_lock(&kprobe_mutex);
630 old_p = get_kprobe (p->addr, 0, NULL);
633 ret = register_aggr_kprobe (old_p, p);
635 atomic_inc (&kprobe_count);
639 if ((ret = arch_prepare_kprobe (p)) != 0)
642 DBPRINTF ("before out ret = 0x%x\n", ret);
644 INIT_HLIST_NODE (&p->hlist);
645 hlist_add_head_rcu (&p->hlist, &kprobe_table[hash_ptr (p->addr, KPROBE_HASH_BITS)]);
647 /* if (atomic_add_return(1, &kprobe_count) == \
648 (ARCH_INACTIVE_KPROBE_COUNT + 1))
649 register_page_fault_notifier(&kprobe_page_fault_nb);*/
654 // mutex_unlock(&kprobe_mutex);
656 if (ret && probed_mod)
657 module_put(probed_mod);
659 DBPRINTF ("out ret = 0x%x\n", ret);
665 __register_uprobe (struct kprobe *p, struct task_struct *task, int atomic, unsigned long called_from)
668 struct kprobe *old_p;
673 DBPRINTF ("p->addr = 0x%p p = 0x%p\n", p->addr, p);
675 p->mod_refcounted = 0;
677 #ifdef KPROBES_PROFILE
678 p->start_tm.tv_sec = p->start_tm.tv_usec = 0;
679 p->hnd_tm_sum.tv_sec = p->hnd_tm_sum.tv_usec = 0;
683 // get the first item
684 old_p = get_kprobe (p->addr, p->tgid, NULL);
687 ret = register_aggr_kprobe (old_p, p);
689 atomic_inc (&kprobe_count);
692 if ((ret = arch_prepare_uprobe (p, task, atomic)) != 0)
697 DBPRINTF ("before out ret = 0x%x\n", ret);
699 INIT_HLIST_NODE (&p->hlist);
700 hlist_add_head_rcu (&p->hlist, &kprobe_table[hash_ptr (p->addr, KPROBE_HASH_BITS)]);
702 INIT_HLIST_NODE (&p->is_hlist);
703 hlist_add_head_rcu (&p->is_hlist, &uprobe_insn_slot_table[hash_ptr (p->ainsn.insn, KPROBE_HASH_BITS)]);
705 arch_arm_uprobe (p, task);
707 DBPRINTF ("out ret = 0x%x\n", ret);
713 unregister_uprobe (struct kprobe *p, struct task_struct *task, int atomic)
715 unregister_kprobe (p, task, atomic);
719 register_kprobe (struct kprobe *p, int atomic)
721 return __register_kprobe (p, (unsigned long) __builtin_return_address (0), atomic);
725 unregister_kprobe (struct kprobe *p, struct task_struct *task, int atomic)
727 // struct module *mod;
728 struct kprobe *old_p, *list_p;
729 int cleanup_p, pid = 0;
731 // mutex_lock(&kprobe_mutex);
735 old_p = get_kprobe (p->addr, pid, NULL);
736 DBPRINTF ("unregister_kprobe p=%p old_p=%p", p, old_p);
737 if (unlikely (!old_p))
739 // mutex_unlock(&kprobe_mutex);
744 list_for_each_entry_rcu (list_p, &old_p->list, list)
746 /* kprobe p is a valid probe */
748 // mutex_unlock(&kprobe_mutex);
752 DBPRINTF ("unregister_kprobe valid_p");
753 if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) &&
754 (p->list.next == &old_p->list) && (p->list.prev == &old_p->list)))
756 /* Only probe on the hash list */
757 DBPRINTF ("unregister_kprobe disarm pid=%d", pid);
759 arch_disarm_uprobe (p, task);//vma, page, kaddr);
761 arch_disarm_kprobe (p);
762 hlist_del_rcu (&old_p->hlist);
767 list_del_rcu (&p->list);
770 DBPRINTF ("unregister_kprobe cleanup_p=%d", cleanup_p);
771 // mutex_unlock(&kprobe_mutex);
773 // synchronize_sched();
775 if (p->mod_refcounted &&
776 (mod = module_text_address((unsigned long)p->addr)))
783 list_del_rcu (&p->list);
786 arch_remove_kprobe (p, task);
790 /// mutex_lock(&kprobe_mutex);
791 if (p->break_handler)
792 old_p->break_handler = NULL;
795 list_for_each_entry_rcu (list_p, &old_p->list, list)
797 if (list_p->post_handler)
804 old_p->post_handler = NULL;
806 // mutex_unlock(&kprobe_mutex);
809 /* Call unregister_page_fault_notifier()
810 * if no probes are active
812 // mutex_lock(&kprobe_mutex);
813 /* if (atomic_add_return(-1, &kprobe_count) == \
814 ARCH_INACTIVE_KPROBE_COUNT)
815 unregister_page_fault_notifier(&kprobe_page_fault_nb);*/
816 // mutex_unlock(&kprobe_mutex);
821 register_ujprobe (struct task_struct *task, struct mm_struct *mm, struct jprobe *jp, int atomic)
827 /* Todo: Verify probepoint is a function entry point */
828 jp->kp.pre_handler = setjmp_pre_handler;
829 jp->kp.break_handler = longjmp_break_handler;
831 ret = __register_uprobe (&jp->kp, task, atomic,
832 (unsigned long) __builtin_return_address (0));
841 unregister_ujprobe (struct task_struct *task, struct jprobe *jp, int atomic)
843 unregister_uprobe (&jp->kp, task, atomic);
847 register_jprobe (struct jprobe *jp, int atomic)
849 /* Todo: Verify probepoint is a function entry point */
850 jp->kp.pre_handler = setjmp_pre_handler;
851 jp->kp.break_handler = longjmp_break_handler;
853 return __register_kprobe (&jp->kp, (unsigned long) __builtin_return_address (0), atomic);
857 unregister_jprobe (struct jprobe *jp, int atomic)
859 unregister_kprobe (&jp->kp, 0, atomic);
863 * This kprobe pre_handler is registered with every kretprobe. When probe
864 * hits it will set up the return probe.
867 pre_handler_kretprobe (struct kprobe *p, struct pt_regs *regs /*, struct vm_area_struct **vma,
868 struct page **page, unsigned long **kaddr */ )
870 struct kretprobe *rp = container_of (p, struct kretprobe, kp);
871 unsigned long flags = 0;
872 DBPRINTF ("START\n");
874 /*TODO: consider to only swap the RA after the last pre_handler fired */
875 spin_lock_irqsave (&kretprobe_lock, flags);
877 __arch_prepare_kretprobe (rp, regs);
878 spin_unlock_irqrestore (&kretprobe_lock, flags);
883 struct kretprobe *sched_rp;
886 register_kretprobe (struct kretprobe *rp, int atomic)
889 struct kretprobe_instance *inst;
893 rp->kp.pre_handler = pre_handler_kretprobe;
894 rp->kp.post_handler = NULL;
895 rp->kp.fault_handler = NULL;
896 rp->kp.break_handler = NULL;
900 /* Pre-allocate memory for max kretprobe instances */
901 if(rp->kp.addr == sched_addr)
902 rp->maxactive = 1000;//max (100, 2 * NR_CPUS);
903 else if (rp->maxactive <= 0)
905 #if 1//def CONFIG_PREEMPT
906 rp->maxactive = max (10, 2 * NR_CPUS);
908 rp->maxactive = NR_CPUS;
911 INIT_HLIST_HEAD (&rp->used_instances);
912 INIT_HLIST_HEAD (&rp->free_instances);
913 for (i = 0; i < rp->maxactive; i++)
915 inst = kmalloc (sizeof (struct kretprobe_instance), GFP_KERNEL);
921 INIT_HLIST_NODE (&inst->uflist);
922 hlist_add_head (&inst->uflist, &rp->free_instances);
925 DBPRINTF ("addr=%p, *addr=[%lx %lx %lx]", rp->kp.addr, (unsigned long) (*(rp->kp.addr)), (unsigned long) (*(rp->kp.addr + 1)), (unsigned long) (*(rp->kp.addr + 2)));
927 /* Establish function entry probe point */
928 if ((ret = __register_kprobe (&rp->kp, (unsigned long) __builtin_return_address (0), atomic)) != 0)
931 DBPRINTF ("addr=%p, *addr=[%lx %lx %lx]", rp->kp.addr, (unsigned long) (*(rp->kp.addr)), (unsigned long) (*(rp->kp.addr + 1)), (unsigned long) (*(rp->kp.addr + 2)));
932 if(rp->kp.addr == sched_addr)
939 unregister_kretprobe (struct kretprobe *rp, int atomic)
942 struct kretprobe_instance *ri;
944 //printk("addr=%p, *addr=[%lx %lx %lx]\n", rp->kp.addr,
945 // *(rp->kp.addr), *(rp->kp.addr+1), *(rp->kp.addr+2));
946 unregister_kprobe (&rp->kp, 0, atomic);
948 if(rp->kp.addr == sched_addr)
951 //printk("addr=%p, *addr=[%lx %lx %lx]\n", rp->kp.addr,
952 // *(rp->kp.addr), *(rp->kp.addr+1), *(rp->kp.addr+2));
954 spin_lock_irqsave (&kretprobe_lock, flags);
955 while ((ri = get_used_rp_inst (rp)) != NULL)
958 hlist_del (&ri->uflist);
960 spin_unlock_irqrestore (&kretprobe_lock, flags);
965 register_uretprobe (struct task_struct *task, struct mm_struct *mm, struct kretprobe *rp, int atomic)
968 struct kretprobe_instance *inst;
969 /*struct page *pages[2] = {0, 0};
970 struct vm_area_struct *vmas[2] = {0, 0};
971 unsigned long *kaddrs[2] = {0, 0}; */
977 DBPRINTF ("START\n");
979 rp->kp.pre_handler = pre_handler_kretprobe;
980 rp->kp.post_handler = NULL;
981 rp->kp.fault_handler = NULL;
982 rp->kp.break_handler = NULL;
986 /* Pre-allocate memory for max kretprobe instances */
987 if (rp->maxactive <= 0)
989 #if 1//def CONFIG_PREEMPT
990 rp->maxactive = max (10, 2 * NR_CPUS);
992 rp->maxactive = NR_CPUS;
995 INIT_HLIST_HEAD (&rp->used_instances);
996 INIT_HLIST_HEAD (&rp->free_instances);
997 for (i = 0; i < rp->maxactive; i++)
999 inst = kmalloc (sizeof (struct kretprobe_instance), GFP_KERNEL);
1006 INIT_HLIST_NODE (&inst->uflist);
1007 hlist_add_head (&inst->uflist, &rp->free_instances);
1013 ret = get_user_pages_atomic (task, mm, (unsigned long) rp->kp.addr, 1, 1, 1, &pages[0], &vmas[0]);
1015 ret = get_user_pages (task, mm, (unsigned long) rp->kp.addr, 1, 1, 1, &pages[0], &vmas[0]);
1018 DBPRINTF ("get_user_pages for %p failed!", rp->kp.addr);
1023 kaddrs[0] = kmap_atomic (pages[0], KM_USER0) + ((unsigned long) rp->kp.addr & ~PAGE_MASK);
1025 kaddrs[0] = kmap (pages[0]) + ((unsigned long) rp->kp.addr & ~PAGE_MASK);
1026 // if 2nd instruction is on the 2nd page
1027 if ((((unsigned long) (rp->kp.addr + 1)) & ~PAGE_MASK) == 0)
1030 ret = get_user_pages_atomic (task, mm, (unsigned long) (rp->kp.addr + 1), 1, 1, 1, &pages[1], &vmas[1]);
1032 ret = get_user_pages (task, mm, (unsigned long) (rp->kp.addr + 1), 1, 1, 1, &pages[1], &vmas[1]);
1035 DBPRINTF ("get_user_pages for %p failed!", rp->kp.addr + 1);
1040 kaddrs[1] = kmap_atomic (pages[1], KM_USER1) + ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK);
1042 kaddrs[1] = kmap (pages[1]) + ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK);
1046 // 2nd instruction is on the 1st page too
1048 pages[1] = pages[0];
1049 kaddrs[1] = kaddrs[0] + 1;
1052 /* Establish function exit probe point */
1053 if ((ret = arch_prepare_uretprobe (rp, task/*vmas, pages, kaddrs */ )) != 0)
1055 /* Establish function entry probe point */
1056 if ((ret = __register_uprobe (&rp->kp, task, atomic,
1057 (unsigned long) __builtin_return_address (0))) != 0)
1063 arch_arm_uretprobe (rp, task);//vmas[1], pages[1], kaddrs[1]);
1066 set_page_dirty (pages[1]);
1068 set_page_dirty_lock (pages[1]);
1077 kunmap_atomic (kaddrs[0] - ((unsigned long) rp->kp.addr & ~PAGE_MASK), KM_USER0);
1081 page_cache_release (pages[0]);
1083 if ((pages[0] != pages[1]))
1090 kunmap_atomic (kaddrs[1] - ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK), KM_USER1);
1094 page_cache_release (pages[1]);
1097 /*else if( (pages[0] != pages[2]) ){
1100 if (atomic) kunmap_atomic(kaddrs[2], KM_USER1);
1101 else kunmap(pages[2]);
1103 page_cache_release(pages[2]);
1114 static struct kretprobe *__kprobes
1115 clone_kretprobe (struct kretprobe *rp)
1117 struct kprobe *old_p;
1118 struct kretprobe *clone = NULL;
1121 clone = kmalloc (sizeof (struct kretprobe), GFP_KERNEL);
1124 DBPRINTF ("failed to alloc memory for clone probe %p!", rp->kp.addr);
1127 memcpy (clone, rp, sizeof (struct kretprobe));
1128 clone->kp.pre_handler = pre_handler_kretprobe;
1129 clone->kp.post_handler = NULL;
1130 clone->kp.fault_handler = NULL;
1131 clone->kp.break_handler = NULL;
1132 old_p = get_kprobe (rp->kp.addr, rp->kp.tgid, NULL);
1135 ret = register_aggr_kprobe (old_p, &clone->kp);
1141 atomic_inc (&kprobe_count);
1148 unregister_uretprobe (struct task_struct *task, struct kretprobe *rp, int atomic)
1151 unsigned long flags;
1152 struct kretprobe_instance *ri;
1153 struct kretprobe *rp2 = NULL;
1154 /*struct mm_struct *mm;
1155 struct page *pages[2] = {0, 0};
1156 struct vm_area_struct *vmas[2] = {0, 0};
1157 unsigned long *kaddrs[2] = {0, 0}; */
1163 mm = atomic ? task->active_mm : get_task_mm (task);
1166 DBPRINTF ("task %u has no mm!", task->pid);
1173 ret = get_user_pages_atomic (task, mm, (unsigned long) rp->kp.addr, 1, 1, 1, &pages[0], &vmas[0]);
1176 down_read (&mm->mmap_sem);
1177 ret = get_user_pages (task, mm, (unsigned long) rp->kp.addr, 1, 1, 1, &pages[0], &vmas[0]);
1181 DBPRINTF ("get_user_pages for %p failed!", rp->kp.addr);
1185 kaddrs[0] = kmap_atomic (pages[0], KM_USER0) + ((unsigned long) rp->kp.addr & ~PAGE_MASK);
1187 kaddrs[0] = kmap (pages[0]) + ((unsigned long) rp->kp.addr & ~PAGE_MASK);
1188 if ((((unsigned long) (rp->kp.addr + 1)) & ~PAGE_MASK) == 0)
1191 ret = get_user_pages_atomic (task, mm, (unsigned long) (rp->kp.addr + 1), 1, 1, 1, &pages[1], &vmas[1]);
1193 ret = get_user_pages (task, mm, (unsigned long) (rp->kp.addr + 1), 1, 1, 1, &pages[1], &vmas[1]);
1196 DBPRINTF ("get_user_pages for %p failed!", rp->kp.addr + 1);
1200 kaddrs[1] = kmap_atomic (pages[1], KM_USER1) + ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK);
1202 kaddrs[1] = kmap (pages[1]) + ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK);
1207 pages[1] = pages[0];
1208 kaddrs[1] = kaddrs[0] + 1;
1212 DBPRINTF ("unregister_uretprobe1 addr %p [%lx %lx]", rp->kp.addr, *kaddrs[0], *kaddrs[1]);
1214 spin_lock_irqsave (&kretprobe_lock, flags);
1215 if (hlist_empty (&rp->used_instances))
1217 // if there are no used retprobe instances (i.e. function is not entered) - disarm retprobe
1218 arch_disarm_uretprobe (rp, task);//vmas[1], pages[1], kaddrs[1]);
1221 set_page_dirty (pages[1]);
1223 set_page_dirty_lock (pages[1]);
1228 rp2 = clone_kretprobe (rp);
1230 DBPRINTF ("unregister_uretprobe addr %p: failed to clone retprobe!", rp->kp.addr);
1233 DBPRINTF ("initiating deferred retprobe deletion addr %p", rp->kp.addr);
1234 printk ("initiating deferred retprobe deletion addr %p\n", rp->kp.addr);
1239 while ((ri = get_used_rp_inst (rp)) != NULL)
1243 hlist_del (&ri->uflist);
1245 spin_unlock_irqrestore (&kretprobe_lock, flags);
1248 unregister_uprobe (&rp->kp, task, atomic);
1249 //DBPRINTF("unregister_uretprobe3 addr %p [%lx %lx]",
1250 // rp->kp.addr, *kaddrs[0], *kaddrs[1]);
1258 kunmap_atomic (kaddrs[0] - ((unsigned long) rp->kp.addr & ~PAGE_MASK), KM_USER0);
1262 page_cache_release (pages[0]);
1264 if (pages[1] && (pages[0] != pages[1]))
1269 kunmap_atomic (kaddrs[1] - ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK), KM_USER1);
1273 page_cache_release (pages[1]);
1277 up_read (&mm->mmap_sem);
1287 unregister_all_uprobes (struct task_struct *task, int atomic)
1289 struct hlist_head *head;
1290 struct hlist_node *node, *tnode;
1294 for(i = 0; i < KPROBE_TABLE_SIZE; i++){
1295 head = &kprobe_table[i];
1296 hlist_for_each_entry_safe (p, node, tnode, head, hlist){
1297 if(p->tgid == task->tgid){
1298 printk("unregister_all_uprobes: delete uprobe at %pf for %s/%d\n", p->addr, task->comm, task->pid);
1299 unregister_uprobe (p, task, atomic);
1303 purge_garbage_uslots(task, atomic);
1307 #define GUP_FLAGS_WRITE 0x1
1308 #define GUP_FLAGS_FORCE 0x2
1309 #define GUP_FLAGS_IGNORE_VMA_PERMISSIONS 0x4
1310 #define GUP_FLAGS_IGNORE_SIGKILL 0x8
1313 static inline int use_zero_page(struct vm_area_struct *vma)
1316 * We don't want to optimize FOLL_ANON for make_pages_present()
1317 * when it tries to page in a VM_LOCKED region. As to VM_SHARED,
1318 * we want to get the page from the page tables to make sure
1319 * that we serialize and update with any other user of that
1322 if (vma->vm_flags & (VM_LOCKED | VM_SHARED))
1325 * And if we have a fault routine, it's not an anonymous region.
1327 return !vma->vm_ops || !vma->vm_ops->fault;
1330 int __get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
1331 unsigned long start, int len, int flags,
1332 struct page **pages, struct vm_area_struct **vmas)
1335 unsigned int vm_flags = 0;
1336 int write = !!(flags & GUP_FLAGS_WRITE);
1337 int force = !!(flags & GUP_FLAGS_FORCE);
1338 int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
1339 int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL);
1344 * Require read or write permissions.
1345 * If 'force' is set, we only require the "MAY" flags.
1347 vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
1348 vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
1352 struct vm_area_struct *vma;
1353 unsigned int foll_flags;
1355 //vma = find_extend_vma(mm, start);
1356 vma = find_vma(mm, start);
1357 if (!vma && in_gate_area(tsk, start)) {
1358 unsigned long pg = start & PAGE_MASK;
1359 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
1365 /* user gate pages are read-only */
1366 if (!ignore && write)
1367 return i ? : -EFAULT;
1369 pgd = pgd_offset_k(pg);
1371 pgd = pgd_offset_gate(mm, pg);
1372 BUG_ON(pgd_none(*pgd));
1373 pud = pud_offset(pgd, pg);
1374 BUG_ON(pud_none(*pud));
1375 pmd = pmd_offset(pud, pg);
1377 return i ? : -EFAULT;
1378 pte = pte_offset_map(pmd, pg);
1379 if (pte_none(*pte)) {
1381 return i ? : -EFAULT;
1384 struct page *page = vm_normal_page(gate_vma, start, *pte);
1399 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1400 (!ignore && !(vm_flags & vma->vm_flags)))
1401 return i ? : -EFAULT;
1403 if (is_vm_hugetlb_page(vma)) {
1404 i = follow_hugetlb_page(mm, vma, pages, vmas,
1405 &start, &len, i, write);
1409 foll_flags = FOLL_TOUCH;
1411 foll_flags |= FOLL_GET;
1413 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,30)
1414 if (!write && use_zero_page(vma))
1415 foll_flags |= FOLL_ANON;
1422 * If we have a pending SIGKILL, don't keep faulting
1423 * pages and potentially allocating memory, unless
1424 * current is handling munlock--e.g., on exit. In
1425 * that case, we are not allocating memory. Rather,
1426 * we're only unlocking already resident/mapped pages.
1428 if (unlikely(!ignore_sigkill &&
1429 fatal_signal_pending(current)))
1430 return i ? i : -ERESTARTSYS;
1433 foll_flags |= FOLL_WRITE;
1438 DBPRINTF ("pages = %p vma = %p\n", pages, vma);
1439 while (!(page = follow_page(vma, start, foll_flags))) {
1441 ret = handle_mm_fault(mm, vma, start,
1442 foll_flags & FOLL_WRITE);
1443 if (ret & VM_FAULT_ERROR) {
1444 if (ret & VM_FAULT_OOM)
1445 return i ? i : -ENOMEM;
1446 else if (ret & VM_FAULT_SIGBUS)
1447 return i ? i : -EFAULT;
1450 if (ret & VM_FAULT_MAJOR)
1456 * The VM_FAULT_WRITE bit tells us that
1457 * do_wp_page has broken COW when necessary,
1458 * even if maybe_mkwrite decided not to set
1459 * pte_write. We can thus safely do subsequent
1460 * page lookups as if they were reads. But only
1461 * do so when looping for pte_write is futile:
1462 * in some cases userspace may also be wanting
1463 * to write to the gotten user page, which a
1464 * read fault here might prevent (a readonly
1465 * page might get reCOWed by userspace write).
1467 if ((ret & VM_FAULT_WRITE) &&
1468 !(vma->vm_flags & VM_WRITE))
1469 foll_flags &= ~FOLL_WRITE;
1474 return i ? i : PTR_ERR(page);
1478 flush_anon_page(vma, page, start);
1479 flush_dcache_page(page);
1486 } while (len && start < vma->vm_end);
1491 int get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
1492 unsigned long start, int len, int write, int force,
1493 struct page **pages, struct vm_area_struct **vmas)
1498 flags |= GUP_FLAGS_WRITE;
1500 flags |= GUP_FLAGS_FORCE;
1502 return __get_user_pages_uprobe(tsk, mm,
1508 access_process_vm_atomic (struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
1512 struct mm_struct *mm;
1513 struct vm_area_struct *vma;
1514 void *old_buf = buf;
1516 mm = get_task_mm(tsk);
1520 down_read(&mm->mmap_sem);
1521 /* ignore errors, just check how much was successfully transferred */
1523 int bytes, ret, offset;
1525 struct page *page = NULL;
1527 ret = get_user_pages_uprobe(tsk, mm, addr, 1,
1528 write, 1, &page, &vma);
1531 * Check if this is a VM_IO | VM_PFNMAP VMA, which
1532 * we can access using slightly different code.
1534 #ifdef CONFIG_HAVE_IOREMAP_PROT
1535 vma = find_vma(mm, addr);
1538 if (vma->vm_ops && vma->vm_ops->access)
1539 ret = vma->vm_ops->access(vma, addr, buf,
1547 offset = addr & (PAGE_SIZE-1);
1548 if (bytes > PAGE_SIZE-offset)
1549 bytes = PAGE_SIZE-offset;
1553 copy_to_user_page(vma, page, addr,
1554 maddr + offset, buf, bytes);
1555 set_page_dirty_lock(page);
1557 copy_from_user_page(vma, page, addr,
1558 buf, maddr + offset, bytes);
1561 page_cache_release(page);
1567 up_read(&mm->mmap_sem);
1570 return buf - old_buf;
1574 #ifdef CONFIG_DEBUG_FS
1575 const char *(*__real_kallsyms_lookup) (unsigned long addr, unsigned long *symbolsize, unsigned long *offset, char **modname, char *namebuf);
1577 kallsyms_lookup (unsigned long addr, unsigned long *symbolsize, unsigned long *offset, char **modname, char *namebuf)
1579 return __real_kallsyms_lookup (addr, symbolsize, offset, modname, namebuf);
1582 static void __kprobes
1583 report_probe (struct seq_file *pi, struct kprobe *p, const char *sym, int offset, char *modname)
1587 if (p->pre_handler == pre_handler_kretprobe)
1592 else if (p->pre_handler == setjmp_pre_handler)
1602 seq_printf (pi, "%p %s %s+0x%x %s\n", p->addr, kprobe_type, sym, offset, (modname ? modname : " "));
1604 seq_printf (pi, "%p %s %p\n", p->addr, kprobe_type, p->addr);
1607 static void __kprobes *
1608 kprobe_seq_start (struct seq_file *f, loff_t * pos)
1610 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
1613 static void __kprobes *
1614 kprobe_seq_next (struct seq_file *f, void *v, loff_t * pos)
1617 if (*pos >= KPROBE_TABLE_SIZE)
1622 static void __kprobes
1623 kprobe_seq_stop (struct seq_file *f, void *v)
1632 struct jprobe jprobe;
1633 struct kretprobe retprobe;
1634 unsigned long offset;
1637 static int __kprobes
1638 show_kprobe_addr (struct seq_file *pi, void *v)
1640 struct hlist_head *head;
1641 struct hlist_node *node;
1642 struct kprobe *p, *kp;
1643 const char *sym = NULL;
1644 unsigned int i = *(loff_t *) v;
1645 unsigned long size, offset = 0;
1646 char *modname, namebuf[128];
1648 head = &kprobe_table[i];
1650 hlist_for_each_entry_rcu (p, node, head, hlist)
1653 struct us_proc_ip *up = NULL;
1654 if (p->pre_handler == pre_handler_kretprobe){
1655 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
1656 up = container_of(rp, struct us_proc_ip, retprobe);
1658 else {//if (p->pre_handler == setjmp_pre_handler){
1659 struct jprobe *jp = container_of(p, struct jprobe, kp);
1660 up = container_of(jp, struct us_proc_ip, jprobe);
1664 printk("show_kprobe_addr: %s\n", sym);
1668 sym = kallsyms_lookup ((unsigned long) p->addr, &size, &offset, &modname, namebuf);
1669 if (p->pre_handler == aggr_pre_handler)
1671 list_for_each_entry_rcu (kp, &p->list, list) report_probe (pi, kp, sym, offset, modname);
1674 report_probe (pi, p, sym, offset, modname);
1676 //seq_printf (pi, "handled exceptions %lu\n", handled_exceptions);
1681 static struct seq_operations kprobes_seq_ops = {
1682 .start = kprobe_seq_start,
1683 .next = kprobe_seq_next,
1684 .stop = kprobe_seq_stop,
1685 .show = show_kprobe_addr
1688 static int __kprobes
1689 kprobes_open (struct inode *inode, struct file *filp)
1691 return seq_open (filp, &kprobes_seq_ops);
1694 static struct file_operations debugfs_kprobes_operations = {
1695 .open = kprobes_open,
1697 .llseek = seq_lseek,
1698 .release = seq_release,
1701 #ifdef KPROBES_PROFILE
1702 extern unsigned long nCount;
1703 extern struct timeval probe_enter_diff_sum;
1704 static void __kprobes *
1705 kprobe_prof_seq_start (struct seq_file *f, loff_t * pos)
1707 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
1710 static void __kprobes *
1711 kprobe_prof_seq_next (struct seq_file *f, void *v, loff_t * pos)
1714 if (*pos >= KPROBE_TABLE_SIZE)
1719 static void __kprobes
1720 kprobe_prof_seq_stop (struct seq_file *f, void *v)
1724 static void __kprobes
1725 report_probe_prof (struct seq_file *pi, struct kprobe *p, const char *sym, int offset, char *modname)
1729 if (p->pre_handler == pre_handler_kretprobe)
1734 else if (p->pre_handler == setjmp_pre_handler)
1745 seq_printf (pi, "%p %s %s+0x%x %s %lu.%06ld\n", p->addr, kprobe_type,
1746 sym, offset, (modname ? modname : " "), p->count ? p->hnd_tm_sum.tv_sec / p->count : 0, p->count ? p->hnd_tm_sum.tv_usec / p->count : 0);
1749 seq_printf (pi, "%p %s %p %lu.%06ld\n", p->addr, kprobe_type, p->addr, p->count ? p->hnd_tm_sum.tv_sec / p->count : 0, p->count ? p->hnd_tm_sum.tv_usec / p->count : 0);
1752 static int __kprobes
1753 show_kprobe_prof (struct seq_file *pi, void *v)
1755 struct hlist_head *head;
1756 struct hlist_node *node;
1757 struct kprobe *p; //, *kp;
1758 const char *sym = NULL;
1759 unsigned int i = *(loff_t *) v;
1760 unsigned long size, offset = 0;
1761 char *modname, namebuf[128];
1762 static struct timeval utv, ktv;
1763 static unsigned long ucount, kcount;
1765 head = &kprobe_table[i];
1767 hlist_for_each_entry_rcu (p, node, head, hlist)
1769 sym = kallsyms_lookup ((unsigned long) p->addr, &size, &offset, &modname, namebuf);
1770 /*if (p->pre_handler == aggr_pre_handler) {
1771 list_for_each_entry_rcu(kp, &p->list, list)
1772 report_probe_prof(pi, kp, sym, offset, modname);
1774 report_probe_prof (pi, p, sym, offset, modname);
1779 set_normalized_timeval (&utv, utv.tv_sec + p->hnd_tm_sum.tv_sec, utv.tv_usec + p->hnd_tm_sum.tv_usec);
1784 //seq_printf(pi, "kernel probe handling %lu %lu.%06ld\n",
1785 // p->count, p->hnd_tm_sum.tv_sec, p->hnd_tm_sum.tv_usec);
1786 //seq_printf(pi, "kernel probe handling2 %lu %lu.%06ld\n",
1787 // kcount, ktv.tv_sec, ktv.tv_usec);
1788 set_normalized_timeval (&ktv, ktv.tv_sec + p->hnd_tm_sum.tv_sec, ktv.tv_usec + p->hnd_tm_sum.tv_usec);
1790 //seq_printf(pi, "kernel probe handling3 %lu %lu.%06ld\n",
1791 // kcount, ktv.tv_sec, ktv.tv_usec);
1795 if (i == (KPROBE_TABLE_SIZE - 1))
1797 seq_printf (pi, "Average kernel probe handling %lu.%06ld\n", kcount ? ktv.tv_sec / kcount : 0, kcount ? ktv.tv_usec / kcount : 0);
1798 seq_printf (pi, "Average user probe handling %lu.%06ld\n", ucount ? utv.tv_sec / ucount : 0, ucount ? utv.tv_usec / ucount : 0);
1799 seq_printf (pi, "Average probe period %lu.%06ld\n", nCount ? probe_enter_diff_sum.tv_sec / nCount : 0, nCount ? probe_enter_diff_sum.tv_usec / nCount : 0);
1800 utv.tv_sec = utv.tv_usec = ktv.tv_sec = ktv.tv_usec = 0;
1801 ucount = kcount = 0;
1807 static struct seq_operations kprobes_prof_seq_ops = {
1808 .start = kprobe_prof_seq_start,
1809 .next = kprobe_prof_seq_next,
1810 .stop = kprobe_prof_seq_stop,
1811 .show = show_kprobe_prof
1814 static int __kprobes
1815 kprobes_prof_open (struct inode *inode, struct file *filp)
1817 return seq_open (filp, &kprobes_prof_seq_ops);
1820 static struct file_operations debugfs_kprobes_prof_operations = {
1821 .open = kprobes_prof_open,
1823 .llseek = seq_lseek,
1824 .release = seq_release,
1828 int __kprobes debugfs_kprobe_init (void);
1829 static struct dentry *dbg_dir, *dbg_file;
1830 #ifdef KPROBES_PROFILE
1831 static struct dentry *dbg_file_prof;
1835 debugfs_kprobe_init (void)
1837 //struct dentry *dir, *file;
1839 dbg_dir = debugfs_create_dir ("kprobes", NULL);
1843 dbg_file = debugfs_create_file ("list", 0444, dbg_dir, 0, &debugfs_kprobes_operations);
1846 debugfs_remove (dbg_dir);
1851 #ifdef KPROBES_PROFILE
1852 dbg_file_prof = debugfs_create_file ("prof", 0444, dbg_dir, 0, &debugfs_kprobes_prof_operations);
1855 debugfs_remove (dbg_file);
1856 debugfs_remove (dbg_dir);
1864 //late_initcall(debugfs_kprobe_init);
1865 extern unsigned long (*kallsyms_search) (const char *name);
1866 #endif /* CONFIG_DEBUG_FS */
1868 #if defined(CONFIG_X86)
1869 static struct notifier_block kprobe_exceptions_nb = {
1870 .notifier_call = kprobe_exceptions_notify,
1880 /* FIXME allocate the probe table, currently defined statically */
1881 /* initialize all list heads */
1882 for (i = 0; i < KPROBE_TABLE_SIZE; i++)
1884 INIT_HLIST_HEAD (&kprobe_table[i]);
1885 INIT_HLIST_HEAD (&kretprobe_inst_table[i]);
1886 INIT_HLIST_HEAD (&uprobe_insn_slot_table[i]);
1888 atomic_set (&kprobe_count, 0);
1890 err = arch_init_kprobes ();
1892 DBPRINTF ("init_kprobes: arch_init_kprobes - %d", err);
1893 #if defined(CONFIG_X86)
1895 err = register_die_notifier (&kprobe_exceptions_nb);
1896 DBPRINTF ("init_kprobes: register_die_notifier - %d", err);
1897 #endif // CONFIG_X86
1899 #ifdef CONFIG_DEBUG_FS
1902 __real_kallsyms_lookup = (void *) kallsyms_search ("kallsyms_lookup");
1903 if (!__real_kallsyms_lookup)
1905 DBPRINTF ("kallsyms_lookup is not found! Oops. Where is the kernel?");
1908 err = debugfs_kprobe_init ();
1909 DBPRINTF ("init_kprobes: debugfs_kprobe_init - %d", err);
1911 #endif /* CONFIG_DEBUG_FS */
1919 #ifdef CONFIG_DEBUG_FS
1920 #ifdef KPROBES_PROFILE
1922 debugfs_remove (dbg_file_prof);
1925 debugfs_remove (dbg_file);
1927 debugfs_remove (dbg_dir);
1928 #endif /* CONFIG_DEBUG_FS */
1930 #if defined(CONFIG_X86)
1931 unregister_die_notifier (&kprobe_exceptions_nb);
1932 #endif // CONFIG_X86
1933 arch_exit_kprobes ();
1936 module_init (init_kprobes);
1937 module_exit (exit_kprobes);
1939 EXPORT_SYMBOL_GPL (register_kprobe);
1940 EXPORT_SYMBOL_GPL (unregister_kprobe);
1941 EXPORT_SYMBOL_GPL (register_jprobe);
1942 EXPORT_SYMBOL_GPL (unregister_jprobe);
1943 EXPORT_SYMBOL_GPL (register_ujprobe);
1944 EXPORT_SYMBOL_GPL (unregister_ujprobe);
1945 EXPORT_SYMBOL_GPL (jprobe_return);
1946 EXPORT_SYMBOL_GPL (uprobe_return);
1947 EXPORT_SYMBOL_GPL (register_kretprobe);
1948 EXPORT_SYMBOL_GPL (unregister_kretprobe);
1949 EXPORT_SYMBOL_GPL (register_uretprobe);
1950 EXPORT_SYMBOL_GPL (unregister_uretprobe);
1951 EXPORT_SYMBOL_GPL (unregister_all_uprobes);
1952 EXPORT_SYMBOL_GPL (access_process_vm_atomic);
1953 #if LINUX_VERSION_CODE != KERNEL_VERSION(2,6,23)
1954 EXPORT_SYMBOL_GPL (access_process_vm);
1956 #ifdef KERNEL_HAS_ISPAGEPRESENT
1957 EXPORT_SYMBOL_GPL (is_page_present);
1959 EXPORT_SYMBOL_GPL (page_present);