4 #include <linux/version.h>
5 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
6 #include <linux/config.h>
11 #include <linux/hash.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/moduleloader.h>
16 #include <linux/kallsyms.h>
17 //#include <linux/freezer.h>
18 #include <linux/seq_file.h>
19 #ifdef CONFIG_DEBUG_FS
20 #include <linux/debugfs.h>
22 #include <asm-generic/sections.h>
23 #include <asm/cacheflush.h>
24 #include <asm/errno.h>
25 #include <linux/spinlock.h>
26 #include <linux/version.h>
27 #include <linux/highmem.h> // kmap_atomic, kunmap_atomic, copy_from_user_page, copy_to_user_page
28 #include <linux/pagemap.h> // page_cache_release
29 #include <linux/vmalloc.h> // vmalloc, vfree
30 #if defined(CONFIG_X86)
31 #include <linux/kdebug.h> // register_die_notifier, unregister_die_notifier
33 #include <linux/hugetlb.h> // follow_hugetlb_page, is_vm_hugetlb_page
37 //#define arch_remove_kprobe(p) do { } while (0)
44 static spinlock_t die_notifier_lock = SPIN_LOCK_UNLOCKED;
46 int src_register_die_notifier(struct notifier_block *nb)
51 spin_lock_irqsave(&die_notifier_lock, flags);
52 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
53 err = atomic_notifier_chain_register(&panic_notifier_list, nb);
55 err = notifier_chain_register(&panic_notifier_list, nb);
57 spin_unlock_irqrestore(&die_notifier_lock, flags);
63 * hlist_replace_rcu - replace old entry by new one
64 * @old : the element to be replaced
65 * @new : the new element to insert
67 * The @old entry will be replaced with the @new entry atomically.
70 src_hlist_replace_rcu (struct hlist_node *old, struct hlist_node *new)
72 struct hlist_node *next = old->next;
75 new->pprev = old->pprev;
78 new->next->pprev = &new->next;
81 old->pprev = LIST_POISON2;
84 #define KPROBE_HASH_BITS 6
85 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
89 * Some oddball architectures like 64bit powerpc have function descriptors
90 * so this must be overridable.
92 #ifndef kprobe_lookup_name
93 #define kprobe_lookup_name(name, addr) \
94 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
97 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
98 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
99 static struct hlist_head uprobe_insn_slot_table[KPROBE_TABLE_SIZE];
100 static atomic_t kprobe_count;
102 //DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
103 DEFINE_SPINLOCK (kretprobe_lock); /* Protects kretprobe_inst_table */
104 static DEFINE_PER_CPU (struct kprobe *, kprobe_instance) = NULL;
105 unsigned long handled_exceptions;
107 /* We have preemption disabled.. so it is safe to use __ versions */
109 set_kprobe_instance (struct kprobe *kp)
111 __get_cpu_var (kprobe_instance) = kp;
115 reset_kprobe_instance (void)
117 __get_cpu_var (kprobe_instance) = NULL;
121 * This routine is called either:
122 * - under the kprobe_mutex - during kprobe_[un]register()
124 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
126 struct kprobe __kprobes *
127 get_kprobe (void *addr, int tgid, struct task_struct *ctask)
129 struct hlist_head *head;
130 struct hlist_node *node;
131 struct kprobe *p, *retVal = NULL;
132 int ret = 0, uprobe_found;
133 struct page *page = 0, *tpage = 0;
134 struct vm_area_struct *vma = 0;
135 struct task_struct *task = 0;
139 if (ctask && ctask->active_mm)
141 ret = get_user_pages_atomic (ctask, ctask->active_mm, (unsigned long) addr, 1, 0, 0, &tpage, NULL);
143 DBPRINTF ("get_user_pages for task %d at %p failed!", current->pid, addr);
146 paddr = page_address (tpage);
147 page_cache_release (tpage);
151 // DBPRINTF("task %d has no mm!", ctask->pid);
153 //TODO: test - two processes invokes instrumented function
154 head = &kprobe_table[hash_ptr (addr, KPROBE_HASH_BITS)];
155 hlist_for_each_entry_rcu (p, node, head, hlist)
157 //if looking for kernel probe and this is kernel probe with the same addr OR
158 //if looking for the user space probe and this is user space probe probe with the same addr and pid
159 DBPRINTF ("get_kprobe[%d]: check probe at %p/%p, task %d/%d", nCount, addr, p->addr, tgid, p->tgid);
165 if (!tgid || uprobe_found)
169 DBPRINTF ("get_kprobe[%d]: found user space probe at %p for task %d", nCount, p->addr, p->tgid);
171 DBPRINTF ("get_kprobe[%d]: found kernel probe at %p", nCount, p->addr);
175 else if (tgid != p->tgid)
177 // if looking for the user space probe and this is user space probe
178 // with another addr and pid but with the same offset whithin the page
179 // it could be that it is the same probe (with address from other user space)
180 // we should handle it as usual probe but without notification to user
181 if (paddr && tgid && (((unsigned long) addr & ~PAGE_MASK) == ((unsigned long) p->addr & ~PAGE_MASK))
184 DBPRINTF ("get_kprobe[%d]: found user space probe at %p in task %d. possibly for addr %p in task %d", nCount, p->addr, p->tgid, addr, tgid);
185 // this probe has the same offset in the page
186 // look in the probes for the other pids
187 // get page for user space probe addr
189 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
190 task = find_task_by_pid (p->tgid);
191 #else //lif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
192 task = pid_task(find_pid_ns(p->tgid, &init_pid_ns), PIDTYPE_PID);
195 get_task_struct (task);
199 DBPRINTF ("task for pid %d not found! Dead probe?", p->tgid);
204 if (page_present (task->active_mm, (unsigned long) p->addr))
206 ret = get_user_pages_atomic (task, task->active_mm, (unsigned long) p->addr, 1, 0, 0, &page, &vma);
208 DBPRINTF ("get_user_pages for task %d at %p failed!", p->tgid, p->addr);
215 DBPRINTF ("task %d has no mm!", task->pid);
218 put_task_struct (task);
221 if (paddr == page_address (page))
223 retVal = p; // we found the probe in other process address space
224 DBPRINTF ("get_kprobe[%d]: found user space probe at %p in task %d for addr %p in task %d", nCount, p->addr, p->tgid, addr, tgid);
225 panic ("user space probe from another process");
227 page_cache_release (page);
234 DBPRINTF ("get_kprobe[%d]: probe %p", nCount, retVal);
238 struct kprobe __kprobes *
239 get_kprobe_by_insn_slot (void *addr, int tgid, struct task_struct *ctask)
241 struct hlist_head *head;
242 struct hlist_node *node;
243 struct kprobe *p, *retVal = NULL;
246 //TODO: test - two processes invokes instrumented function
247 head = &uprobe_insn_slot_table[hash_ptr (addr, KPROBE_HASH_BITS)];
248 hlist_for_each_entry_rcu (p, node, head, is_hlist)
250 //if looking for kernel probe and this is kernel probe with the same addr OR
251 //if looking for the user space probe and this is user space probe probe with the same addr and pid
252 DBPRINTF ("get_kprobe[%d]: check probe at %p/%p, task %d/%d", nCount, addr, p->ainsn.insn, tgid, p->tgid);
253 if (p->ainsn.insn == addr)
258 if (!tgid || uprobe_found)
262 DBPRINTF ("get_kprobe[%d]: found user space probe at %p for task %d", nCount, p->addr, p->tgid);
264 DBPRINTF ("get_kprobe[%d]: found kernel probe at %p", nCount, p->addr);
270 DBPRINTF ("get_kprobe[%d]: probe %p", nCount, retVal);
275 * Aggregate handlers for multiple kprobes support - these handlers
276 * take care of invoking the individual kprobe handlers on p->list
279 aggr_pre_handler (struct kprobe *p, struct pt_regs *regs /*,
280 struct vm_area_struct **vma,
281 struct page **page, unsigned long **kaddr */ )
286 list_for_each_entry_rcu (kp, &p->list, list)
290 set_kprobe_instance (kp);
291 ret = kp->pre_handler (kp, regs);
295 reset_kprobe_instance ();
300 static void __kprobes
301 aggr_post_handler (struct kprobe *p, struct pt_regs *regs, unsigned long flags)
305 list_for_each_entry_rcu (kp, &p->list, list)
307 if (kp->post_handler)
309 set_kprobe_instance (kp);
310 kp->post_handler (kp, regs, flags);
311 reset_kprobe_instance ();
319 aggr_fault_handler (struct kprobe *p, struct pt_regs *regs, int trapnr)
321 struct kprobe *cur = __get_cpu_var (kprobe_instance);
324 * if we faulted "during" the execution of a user specified
325 * probe handler, invoke just that probe's fault handler
327 if (cur && cur->fault_handler)
329 if (cur->fault_handler (cur, regs, trapnr))
337 aggr_break_handler (struct kprobe *p, struct pt_regs *regs /*,
338 struct vm_area_struct **vma,
339 struct page **page, unsigned long **kaddr */ )
341 struct kprobe *cur = __get_cpu_var (kprobe_instance);
343 DBPRINTF ("cur = 0x%p\n", cur);
345 DBPRINTF ("cur = 0x%p cur->break_handler = 0x%p\n", cur, cur->break_handler);
347 if (cur && cur->break_handler)
349 if (cur->break_handler (cur, regs /*, vma, page, kaddr */ ))
352 reset_kprobe_instance ();
356 /* Walks the list and increments nmissed count for multiprobe case */
358 kprobes_inc_nmissed_count (struct kprobe *p)
361 if (p->pre_handler != aggr_pre_handler)
367 list_for_each_entry_rcu (kp, &p->list, list) kp->nmissed++;
372 /* Called with kretprobe_lock held */
373 struct kretprobe_instance __kprobes *
374 get_free_rp_inst (struct kretprobe *rp)
376 struct hlist_node *node;
377 struct kretprobe_instance *ri;
378 hlist_for_each_entry (ri, node, &rp->free_instances, uflist)
383 /* Called with kretprobe_lock held */
384 static struct kretprobe_instance __kprobes *
385 get_used_rp_inst (struct kretprobe *rp)
387 struct hlist_node *node;
388 struct kretprobe_instance *ri;
389 hlist_for_each_entry (ri, node, &rp->used_instances, uflist) return ri;
393 /* Called with kretprobe_lock held */
395 add_rp_inst (struct kretprobe_instance *ri)
398 * Remove rp inst off the free list -
399 * Add it back when probed function returns
401 hlist_del (&ri->uflist);
403 /* Add rp inst onto table */
404 INIT_HLIST_NODE (&ri->hlist);
405 hlist_add_head (&ri->hlist, &kretprobe_inst_table[hash_ptr (ri->task, KPROBE_HASH_BITS)]);
407 /* Also add this rp inst to the used list. */
408 INIT_HLIST_NODE (&ri->uflist);
409 hlist_add_head (&ri->uflist, &ri->rp->used_instances);
412 /* Called with kretprobe_lock held */
414 recycle_rp_inst (struct kretprobe_instance *ri, struct hlist_head *head)
416 /* remove rp inst off the rprobe_inst_table */
417 hlist_del (&ri->hlist);
420 /* remove rp inst off the used list */
421 hlist_del (&ri->uflist);
422 /* put rp inst back onto the free list */
423 INIT_HLIST_NODE (&ri->uflist);
424 hlist_add_head (&ri->uflist, &ri->rp->free_instances);
428 hlist_add_head (&ri->hlist, head);
431 struct hlist_head __kprobes *
432 kretprobe_inst_table_head (struct task_struct *tsk)
434 return &kretprobe_inst_table[hash_ptr (tsk, KPROBE_HASH_BITS)];
438 * This function is called from finish_task_switch when task tk becomes dead,
439 * so that we can recycle any function-return probe instances associated
440 * with this task. These left over instances represent probed functions
441 * that have been called but will never return.
443 /*void __kprobes kprobe_flush_task(struct task_struct *tk)
445 struct kretprobe_instance *ri;
446 struct hlist_head *head, empty_rp;
447 struct hlist_node *node, *tmp;
448 unsigned long flags = 0;
450 INIT_HLIST_HEAD(&empty_rp);
451 spin_lock_irqsave(&kretprobe_lock, flags);
452 head = kretprobe_inst_table_head(tk);
453 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
455 recycle_rp_inst(ri, &empty_rp);
457 spin_unlock_irqrestore(&kretprobe_lock, flags);
459 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
460 hlist_del(&ri->hlist);
466 free_rp_inst (struct kretprobe *rp)
468 struct kretprobe_instance *ri;
469 while ((ri = get_free_rp_inst (rp)) != NULL)
471 hlist_del (&ri->uflist);
477 * Keep all fields in the kprobe consistent
480 copy_kprobe (struct kprobe *old_p, struct kprobe *p)
482 memcpy (&p->opcode, &old_p->opcode, sizeof (kprobe_opcode_t));
483 memcpy (&p->ainsn, &old_p->ainsn, sizeof (struct arch_specific_insn));
484 p->tgid = old_p->tgid;
485 p->ss_addr = old_p->ss_addr;
486 //p->spid = old_p->spid;
490 * Add the new probe to old_p->list. Fail if this is the
491 * second jprobe at the address - two jprobes can't coexist
494 add_new_kprobe (struct kprobe *old_p, struct kprobe *p)
496 if (p->break_handler)
498 if (old_p->break_handler)
500 list_add_tail_rcu (&p->list, &old_p->list);
501 old_p->break_handler = aggr_break_handler;
504 list_add_rcu (&p->list, &old_p->list);
505 if (p->post_handler && !old_p->post_handler)
506 old_p->post_handler = aggr_post_handler;
511 * Fill in the required fields of the "manager kprobe". Replace the
512 * earlier kprobe in the hlist with the manager kprobe
515 add_aggr_kprobe (struct kprobe *ap, struct kprobe *p)
518 flush_insn_slot (ap);
520 ap->pre_handler = aggr_pre_handler;
521 ap->fault_handler = aggr_fault_handler;
523 ap->post_handler = aggr_post_handler;
524 if (p->break_handler)
525 ap->break_handler = aggr_break_handler;
527 INIT_LIST_HEAD (&ap->list);
528 list_add_rcu (&p->list, &ap->list);
530 src_hlist_replace_rcu (&p->hlist, &ap->hlist);
534 * This is the second or subsequent kprobe at the address - handle
538 register_aggr_kprobe (struct kprobe *old_p, struct kprobe *p)
542 DBPRINTF ("start\n");
544 DBPRINTF ("p = %p old_p = %p \n", p, old_p);
545 if (old_p->pre_handler == aggr_pre_handler)
547 DBPRINTF ("aggr_pre_handler \n");
549 copy_kprobe (old_p, p);
550 ret = add_new_kprobe (old_p, p);
554 DBPRINTF ("kzalloc\n");
557 ap = kzalloc (sizeof (struct kprobe), GFP_KERNEL);
559 ap = kmalloc (sizeof (struct kprobe), GFP_KERNEL);
561 memset (ap, 0, sizeof (struct kprobe));
565 add_aggr_kprobe (ap, old_p);
567 DBPRINTF ("ap = %p p = %p old_p = %p \n", ap, p, old_p);
568 ret = add_new_kprobe (ap, p);
574 __register_kprobe (struct kprobe *p, unsigned long called_from, int atomic)
576 struct kprobe *old_p;
577 // struct module *probed_mod;
580 * If we have a symbol_name argument look it up,
581 * and add it to the address. That way the addr
582 * field can either be global or relative to a symbol.
588 kprobe_lookup_name (p->symbol_name, p->addr);
593 DBPRINTF ("p->addr = 0x%p\n", p->addr);
594 p->addr = (kprobe_opcode_t *) (((char *) p->addr) + p->offset);
595 DBPRINTF ("p->addr = 0x%p p = 0x%p\n", p->addr, p);
597 /* if ((!kernel_text_address((unsigned long) p->addr)) ||
598 in_kprobes_functions((unsigned long) p->addr))
601 #ifdef KPROBES_PROFILE
602 p->start_tm.tv_sec = p->start_tm.tv_usec = 0;
603 p->hnd_tm_sum.tv_sec = p->hnd_tm_sum.tv_usec = 0;
606 p->mod_refcounted = 0;
613 // Check are we probing a module
614 if ((probed_mod = module_text_address((unsigned long) p->addr))) {
615 struct module *calling_mod = module_text_address(called_from);
616 // We must allow modules to probe themself and
617 // in this case avoid incrementing the module refcount,
618 // so as to allow unloading of self probing modules.
620 if (calling_mod && (calling_mod != probed_mod)) {
621 if (unlikely(!try_module_get(probed_mod)))
623 p->mod_refcounted = 1;
629 // mutex_lock(&kprobe_mutex);
630 old_p = get_kprobe (p->addr, 0, NULL);
633 ret = register_aggr_kprobe (old_p, p);
635 atomic_inc (&kprobe_count);
639 if ((ret = arch_prepare_kprobe (p)) != 0)
642 DBPRINTF ("before out ret = 0x%x\n", ret);
644 INIT_HLIST_NODE (&p->hlist);
645 hlist_add_head_rcu (&p->hlist, &kprobe_table[hash_ptr (p->addr, KPROBE_HASH_BITS)]);
647 /* if (atomic_add_return(1, &kprobe_count) == \
648 (ARCH_INACTIVE_KPROBE_COUNT + 1))
649 register_page_fault_notifier(&kprobe_page_fault_nb);*/
654 // mutex_unlock(&kprobe_mutex);
656 if (ret && probed_mod)
657 module_put(probed_mod);
659 DBPRINTF ("out ret = 0x%x\n", ret);
665 __register_uprobe (struct kprobe *p, struct task_struct *task, int atomic, unsigned long called_from)
668 struct kprobe *old_p;
673 DBPRINTF ("p->addr = 0x%p p = 0x%p\n", p->addr, p);
675 p->mod_refcounted = 0;
677 #ifdef KPROBES_PROFILE
678 p->start_tm.tv_sec = p->start_tm.tv_usec = 0;
679 p->hnd_tm_sum.tv_sec = p->hnd_tm_sum.tv_usec = 0;
683 // get the first item
684 old_p = get_kprobe (p->addr, p->tgid, NULL);
687 ret = register_aggr_kprobe (old_p, p);
689 atomic_inc (&kprobe_count);
692 if ((ret = arch_prepare_uprobe (p, task, atomic)) != 0)
697 DBPRINTF ("before out ret = 0x%x\n", ret);
699 INIT_HLIST_NODE (&p->hlist);
700 hlist_add_head_rcu (&p->hlist, &kprobe_table[hash_ptr (p->addr, KPROBE_HASH_BITS)]);
702 INIT_HLIST_NODE (&p->is_hlist);
703 hlist_add_head_rcu (&p->is_hlist, &uprobe_insn_slot_table[hash_ptr (p->ainsn.insn, KPROBE_HASH_BITS)]);
705 arch_arm_uprobe (p, task);
707 DBPRINTF ("out ret = 0x%x\n", ret);
713 unregister_uprobe (struct kprobe *p, struct task_struct *task, int atomic)
715 unregister_kprobe (p, task, atomic);
719 register_kprobe (struct kprobe *p, int atomic)
721 return __register_kprobe (p, (unsigned long) __builtin_return_address (0), atomic);
725 unregister_kprobe (struct kprobe *p, struct task_struct *task, int atomic)
727 // struct module *mod;
728 struct kprobe *old_p, *list_p;
729 int cleanup_p, pid = 0;
731 // mutex_lock(&kprobe_mutex);
735 old_p = get_kprobe (p->addr, pid, NULL);
736 DBPRINTF ("unregister_kprobe p=%p old_p=%p", p, old_p);
737 if (unlikely (!old_p))
739 // mutex_unlock(&kprobe_mutex);
744 list_for_each_entry_rcu (list_p, &old_p->list, list)
746 /* kprobe p is a valid probe */
748 // mutex_unlock(&kprobe_mutex);
752 DBPRINTF ("unregister_kprobe valid_p");
753 if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) &&
754 (p->list.next == &old_p->list) && (p->list.prev == &old_p->list)))
756 /* Only probe on the hash list */
757 DBPRINTF ("unregister_kprobe disarm pid=%d", pid);
759 arch_disarm_uprobe (p, task);//vma, page, kaddr);
761 arch_disarm_kprobe (p);
762 hlist_del_rcu (&old_p->hlist);
767 list_del_rcu (&p->list);
770 DBPRINTF ("unregister_kprobe cleanup_p=%d", cleanup_p);
771 // mutex_unlock(&kprobe_mutex);
773 // synchronize_sched();
775 if (p->mod_refcounted &&
776 (mod = module_text_address((unsigned long)p->addr)))
783 list_del_rcu (&p->list);
786 arch_remove_kprobe (p, task);
790 /// mutex_lock(&kprobe_mutex);
791 if (p->break_handler)
792 old_p->break_handler = NULL;
795 list_for_each_entry_rcu (list_p, &old_p->list, list)
797 if (list_p->post_handler)
804 old_p->post_handler = NULL;
806 // mutex_unlock(&kprobe_mutex);
809 /* Call unregister_page_fault_notifier()
810 * if no probes are active
812 // mutex_lock(&kprobe_mutex);
813 /* if (atomic_add_return(-1, &kprobe_count) == \
814 ARCH_INACTIVE_KPROBE_COUNT)
815 unregister_page_fault_notifier(&kprobe_page_fault_nb);*/
816 // mutex_unlock(&kprobe_mutex);
821 register_ujprobe (struct task_struct *task, struct mm_struct *mm, struct jprobe *jp, int atomic)
826 /* Todo: Verify probepoint is a function entry point */
827 jp->kp.pre_handler = setjmp_pre_handler;
828 jp->kp.break_handler = longjmp_break_handler;
830 int ret = __register_uprobe (&jp->kp, task, atomic,
831 (unsigned long) __builtin_return_address (0));
840 unregister_ujprobe (struct task_struct *task, struct jprobe *jp, int atomic)
842 unregister_uprobe (&jp->kp, task, atomic);
846 register_jprobe (struct jprobe *jp, int atomic)
848 /* Todo: Verify probepoint is a function entry point */
849 jp->kp.pre_handler = setjmp_pre_handler;
850 jp->kp.break_handler = longjmp_break_handler;
852 return __register_kprobe (&jp->kp, (unsigned long) __builtin_return_address (0), atomic);
856 unregister_jprobe (struct jprobe *jp, int atomic)
858 unregister_kprobe (&jp->kp, 0, atomic);
862 * This kprobe pre_handler is registered with every kretprobe. When probe
863 * hits it will set up the return probe.
866 pre_handler_kretprobe (struct kprobe *p, struct pt_regs *regs /*, struct vm_area_struct **vma,
867 struct page **page, unsigned long **kaddr */ )
869 struct kretprobe *rp = container_of (p, struct kretprobe, kp);
870 unsigned long flags = 0;
871 DBPRINTF ("START\n");
873 /*TODO: consider to only swap the RA after the last pre_handler fired */
874 spin_lock_irqsave (&kretprobe_lock, flags);
876 __arch_prepare_kretprobe (rp, regs);
877 spin_unlock_irqrestore (&kretprobe_lock, flags);
882 struct kretprobe *sched_rp;
885 register_kretprobe (struct kretprobe *rp, int atomic)
888 struct kretprobe_instance *inst;
892 rp->kp.pre_handler = pre_handler_kretprobe;
893 rp->kp.post_handler = NULL;
894 rp->kp.fault_handler = NULL;
895 rp->kp.break_handler = NULL;
899 /* Pre-allocate memory for max kretprobe instances */
900 if(rp->kp.addr == sched_addr)
901 rp->maxactive = 1000;//max (100, 2 * NR_CPUS);
902 else if (rp->maxactive <= 0)
904 #if 1//def CONFIG_PREEMPT
905 rp->maxactive = max (10, 2 * NR_CPUS);
907 rp->maxactive = NR_CPUS;
910 INIT_HLIST_HEAD (&rp->used_instances);
911 INIT_HLIST_HEAD (&rp->free_instances);
912 for (i = 0; i < rp->maxactive; i++)
914 inst = kmalloc (sizeof (struct kretprobe_instance), GFP_KERNEL);
920 INIT_HLIST_NODE (&inst->uflist);
921 hlist_add_head (&inst->uflist, &rp->free_instances);
924 DBPRINTF ("addr=%p, *addr=[%lx %lx %lx]", rp->kp.addr, (unsigned long) (*(rp->kp.addr)), (unsigned long) (*(rp->kp.addr + 1)), (unsigned long) (*(rp->kp.addr + 2)));
926 /* Establish function entry probe point */
927 if ((ret = __register_kprobe (&rp->kp, (unsigned long) __builtin_return_address (0), atomic)) != 0)
930 DBPRINTF ("addr=%p, *addr=[%lx %lx %lx]", rp->kp.addr, (unsigned long) (*(rp->kp.addr)), (unsigned long) (*(rp->kp.addr + 1)), (unsigned long) (*(rp->kp.addr + 2)));
931 if(rp->kp.addr == sched_addr)
938 unregister_kretprobe (struct kretprobe *rp, int atomic)
941 struct kretprobe_instance *ri;
943 //printk("addr=%p, *addr=[%lx %lx %lx]\n", rp->kp.addr,
944 // *(rp->kp.addr), *(rp->kp.addr+1), *(rp->kp.addr+2));
945 unregister_kprobe (&rp->kp, 0, atomic);
947 if(rp->kp.addr == sched_addr)
950 //printk("addr=%p, *addr=[%lx %lx %lx]\n", rp->kp.addr,
951 // *(rp->kp.addr), *(rp->kp.addr+1), *(rp->kp.addr+2));
953 spin_lock_irqsave (&kretprobe_lock, flags);
954 while ((ri = get_used_rp_inst (rp)) != NULL)
957 hlist_del (&ri->uflist);
959 spin_unlock_irqrestore (&kretprobe_lock, flags);
964 register_uretprobe (struct task_struct *task, struct mm_struct *mm, struct kretprobe *rp, int atomic)
967 struct kretprobe_instance *inst;
968 /*struct page *pages[2] = {0, 0};
969 struct vm_area_struct *vmas[2] = {0, 0};
970 unsigned long *kaddrs[2] = {0, 0}; */
976 DBPRINTF ("START\n");
978 rp->kp.pre_handler = pre_handler_kretprobe;
979 rp->kp.post_handler = NULL;
980 rp->kp.fault_handler = NULL;
981 rp->kp.break_handler = NULL;
985 /* Pre-allocate memory for max kretprobe instances */
986 if (rp->maxactive <= 0)
988 #if 1//def CONFIG_PREEMPT
989 rp->maxactive = max (10, 2 * NR_CPUS);
991 rp->maxactive = NR_CPUS;
994 INIT_HLIST_HEAD (&rp->used_instances);
995 INIT_HLIST_HEAD (&rp->free_instances);
996 for (i = 0; i < rp->maxactive; i++)
998 inst = kmalloc (sizeof (struct kretprobe_instance), GFP_KERNEL);
1005 INIT_HLIST_NODE (&inst->uflist);
1006 hlist_add_head (&inst->uflist, &rp->free_instances);
1012 ret = get_user_pages_atomic (task, mm, (unsigned long) rp->kp.addr, 1, 1, 1, &pages[0], &vmas[0]);
1014 ret = get_user_pages (task, mm, (unsigned long) rp->kp.addr, 1, 1, 1, &pages[0], &vmas[0]);
1017 DBPRINTF ("get_user_pages for %p failed!", rp->kp.addr);
1022 kaddrs[0] = kmap_atomic (pages[0], KM_USER0) + ((unsigned long) rp->kp.addr & ~PAGE_MASK);
1024 kaddrs[0] = kmap (pages[0]) + ((unsigned long) rp->kp.addr & ~PAGE_MASK);
1025 // if 2nd instruction is on the 2nd page
1026 if ((((unsigned long) (rp->kp.addr + 1)) & ~PAGE_MASK) == 0)
1029 ret = get_user_pages_atomic (task, mm, (unsigned long) (rp->kp.addr + 1), 1, 1, 1, &pages[1], &vmas[1]);
1031 ret = get_user_pages (task, mm, (unsigned long) (rp->kp.addr + 1), 1, 1, 1, &pages[1], &vmas[1]);
1034 DBPRINTF ("get_user_pages for %p failed!", rp->kp.addr + 1);
1039 kaddrs[1] = kmap_atomic (pages[1], KM_USER1) + ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK);
1041 kaddrs[1] = kmap (pages[1]) + ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK);
1045 // 2nd instruction is on the 1st page too
1047 pages[1] = pages[0];
1048 kaddrs[1] = kaddrs[0] + 1;
1051 /* Establish function exit probe point */
1052 if ((ret = arch_prepare_uretprobe (rp, task/*vmas, pages, kaddrs */ )) != 0)
1054 /* Establish function entry probe point */
1055 if ((ret = __register_uprobe (&rp->kp, task, atomic,
1056 (unsigned long) __builtin_return_address (0))) != 0)
1062 arch_arm_uretprobe (rp, task);//vmas[1], pages[1], kaddrs[1]);
1065 set_page_dirty (pages[1]);
1067 set_page_dirty_lock (pages[1]);
1076 kunmap_atomic (kaddrs[0] - ((unsigned long) rp->kp.addr & ~PAGE_MASK), KM_USER0);
1080 page_cache_release (pages[0]);
1082 if ((pages[0] != pages[1]))
1089 kunmap_atomic (kaddrs[1] - ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK), KM_USER1);
1093 page_cache_release (pages[1]);
1096 /*else if( (pages[0] != pages[2]) ){
1099 if (atomic) kunmap_atomic(kaddrs[2], KM_USER1);
1100 else kunmap(pages[2]);
1102 page_cache_release(pages[2]);
1113 static struct kretprobe *__kprobes
1114 clone_kretprobe (struct kretprobe *rp)
1116 struct kprobe *old_p;
1117 struct kretprobe *clone = NULL;
1120 clone = kmalloc (sizeof (struct kretprobe), GFP_KERNEL);
1123 DBPRINTF ("failed to alloc memory for clone probe %p!", rp->kp.addr);
1126 memcpy (clone, rp, sizeof (struct kretprobe));
1127 clone->kp.pre_handler = pre_handler_kretprobe;
1128 clone->kp.post_handler = NULL;
1129 clone->kp.fault_handler = NULL;
1130 clone->kp.break_handler = NULL;
1131 old_p = get_kprobe (rp->kp.addr, rp->kp.tgid, NULL);
1134 ret = register_aggr_kprobe (old_p, &clone->kp);
1140 atomic_inc (&kprobe_count);
1147 unregister_uretprobe (struct task_struct *task, struct kretprobe *rp, int atomic)
1150 unsigned long flags;
1151 struct kretprobe_instance *ri;
1152 struct kretprobe *rp2 = NULL;
1153 /*struct mm_struct *mm;
1154 struct page *pages[2] = {0, 0};
1155 struct vm_area_struct *vmas[2] = {0, 0};
1156 unsigned long *kaddrs[2] = {0, 0}; */
1162 mm = atomic ? task->active_mm : get_task_mm (task);
1165 DBPRINTF ("task %u has no mm!", task->pid);
1172 ret = get_user_pages_atomic (task, mm, (unsigned long) rp->kp.addr, 1, 1, 1, &pages[0], &vmas[0]);
1175 down_read (&mm->mmap_sem);
1176 ret = get_user_pages (task, mm, (unsigned long) rp->kp.addr, 1, 1, 1, &pages[0], &vmas[0]);
1180 DBPRINTF ("get_user_pages for %p failed!", rp->kp.addr);
1184 kaddrs[0] = kmap_atomic (pages[0], KM_USER0) + ((unsigned long) rp->kp.addr & ~PAGE_MASK);
1186 kaddrs[0] = kmap (pages[0]) + ((unsigned long) rp->kp.addr & ~PAGE_MASK);
1187 if ((((unsigned long) (rp->kp.addr + 1)) & ~PAGE_MASK) == 0)
1190 ret = get_user_pages_atomic (task, mm, (unsigned long) (rp->kp.addr + 1), 1, 1, 1, &pages[1], &vmas[1]);
1192 ret = get_user_pages (task, mm, (unsigned long) (rp->kp.addr + 1), 1, 1, 1, &pages[1], &vmas[1]);
1195 DBPRINTF ("get_user_pages for %p failed!", rp->kp.addr + 1);
1199 kaddrs[1] = kmap_atomic (pages[1], KM_USER1) + ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK);
1201 kaddrs[1] = kmap (pages[1]) + ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK);
1206 pages[1] = pages[0];
1207 kaddrs[1] = kaddrs[0] + 1;
1211 DBPRINTF ("unregister_uretprobe1 addr %p [%lx %lx]", rp->kp.addr, *kaddrs[0], *kaddrs[1]);
1213 spin_lock_irqsave (&kretprobe_lock, flags);
1214 if (hlist_empty (&rp->used_instances))
1216 // if there are no used retprobe instances (i.e. function is not entered) - disarm retprobe
1217 arch_disarm_uretprobe (rp, task);//vmas[1], pages[1], kaddrs[1]);
1220 set_page_dirty (pages[1]);
1222 set_page_dirty_lock (pages[1]);
1227 rp2 = clone_kretprobe (rp);
1229 DBPRINTF ("unregister_uretprobe addr %p: failed to clone retprobe!", rp->kp.addr);
1232 DBPRINTF ("initiating deferred retprobe deletion addr %p", rp->kp.addr);
1233 printk ("initiating deferred retprobe deletion addr %p\n", rp->kp.addr);
1238 while ((ri = get_used_rp_inst (rp)) != NULL)
1242 hlist_del (&ri->uflist);
1244 spin_unlock_irqrestore (&kretprobe_lock, flags);
1247 unregister_uprobe (&rp->kp, task, atomic);
1248 //DBPRINTF("unregister_uretprobe3 addr %p [%lx %lx]",
1249 // rp->kp.addr, *kaddrs[0], *kaddrs[1]);
1257 kunmap_atomic (kaddrs[0] - ((unsigned long) rp->kp.addr & ~PAGE_MASK), KM_USER0);
1261 page_cache_release (pages[0]);
1263 if (pages[1] && (pages[0] != pages[1]))
1268 kunmap_atomic (kaddrs[1] - ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK), KM_USER1);
1272 page_cache_release (pages[1]);
1276 up_read (&mm->mmap_sem);
1286 unregister_all_uprobes (struct task_struct *task, int atomic)
1288 struct hlist_head *head;
1289 struct hlist_node *node, *tnode;
1293 for(i = 0; i < KPROBE_TABLE_SIZE; i++){
1294 head = &kprobe_table[i];
1295 hlist_for_each_entry_safe (p, node, tnode, head, hlist){
1296 if(p->tgid == task->tgid){
1297 printk("unregister_all_uprobes: delete uprobe at %pf for %s/%d\n", p->addr, task->comm, task->pid);
1298 unregister_uprobe (p, task, atomic);
1302 purge_garbage_uslots(task, atomic);
1307 access_process_vm (struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
1309 struct mm_struct *mm;
1310 struct vm_area_struct *vma;
1312 void *old_buf = buf;
1314 mm = get_task_mm (tsk);
1318 down_read (&mm->mmap_sem);
1319 /* ignore errors, just check how much was sucessfully transfered */
1322 int bytes, ret, offset;
1325 ret = get_user_pages (tsk, mm, addr, 1, write, 1, &page, &vma);
1330 offset = addr & (PAGE_SIZE - 1);
1331 if (bytes > PAGE_SIZE - offset)
1332 bytes = PAGE_SIZE - offset;
1334 maddr = kmap (page); //, KM_USER0);
1337 copy_to_user_page (vma, page, addr, maddr + offset, buf, bytes);
1338 set_page_dirty_lock (page);
1342 copy_from_user_page (vma, page, addr, buf, maddr + offset, bytes);
1344 kunmap (page); //, KM_USER0);
1345 page_cache_release (page);
1350 up_read (&mm->mmap_sem);
1353 return buf - old_buf;
1357 #ifdef CONFIG_DEBUG_FS
1358 const char *(*__real_kallsyms_lookup) (unsigned long addr, unsigned long *symbolsize, unsigned long *offset, char **modname, char *namebuf);
1360 kallsyms_lookup (unsigned long addr, unsigned long *symbolsize, unsigned long *offset, char **modname, char *namebuf)
1362 return __real_kallsyms_lookup (addr, symbolsize, offset, modname, namebuf);
1365 static void __kprobes
1366 report_probe (struct seq_file *pi, struct kprobe *p, const char *sym, int offset, char *modname)
1370 if (p->pre_handler == pre_handler_kretprobe)
1375 else if (p->pre_handler == setjmp_pre_handler)
1385 seq_printf (pi, "%p %s %s+0x%x %s\n", p->addr, kprobe_type, sym, offset, (modname ? modname : " "));
1387 seq_printf (pi, "%p %s %p\n", p->addr, kprobe_type, p->addr);
1390 static void __kprobes *
1391 kprobe_seq_start (struct seq_file *f, loff_t * pos)
1393 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
1396 static void __kprobes *
1397 kprobe_seq_next (struct seq_file *f, void *v, loff_t * pos)
1400 if (*pos >= KPROBE_TABLE_SIZE)
1405 static void __kprobes
1406 kprobe_seq_stop (struct seq_file *f, void *v)
1415 struct jprobe jprobe;
1416 struct kretprobe retprobe;
1417 unsigned long offset;
1420 static int __kprobes
1421 show_kprobe_addr (struct seq_file *pi, void *v)
1423 struct hlist_head *head;
1424 struct hlist_node *node;
1425 struct kprobe *p, *kp;
1426 const char *sym = NULL;
1427 unsigned int i = *(loff_t *) v;
1428 unsigned long size, offset = 0;
1429 char *modname, namebuf[128];
1431 head = &kprobe_table[i];
1433 hlist_for_each_entry_rcu (p, node, head, hlist)
1436 struct us_proc_ip *up = NULL;
1437 if (p->pre_handler == pre_handler_kretprobe){
1438 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
1439 up = container_of(rp, struct us_proc_ip, retprobe);
1441 else {//if (p->pre_handler == setjmp_pre_handler){
1442 struct jprobe *jp = container_of(p, struct jprobe, kp);
1443 up = container_of(jp, struct us_proc_ip, jprobe);
1447 printk("show_kprobe_addr: %s\n", sym);
1451 sym = kallsyms_lookup ((unsigned long) p->addr, &size, &offset, &modname, namebuf);
1452 if (p->pre_handler == aggr_pre_handler)
1454 list_for_each_entry_rcu (kp, &p->list, list) report_probe (pi, kp, sym, offset, modname);
1457 report_probe (pi, p, sym, offset, modname);
1459 //seq_printf (pi, "handled exceptions %lu\n", handled_exceptions);
1464 static struct seq_operations kprobes_seq_ops = {
1465 .start = kprobe_seq_start,
1466 .next = kprobe_seq_next,
1467 .stop = kprobe_seq_stop,
1468 .show = show_kprobe_addr
1471 static int __kprobes
1472 kprobes_open (struct inode *inode, struct file *filp)
1474 return seq_open (filp, &kprobes_seq_ops);
1477 static struct file_operations debugfs_kprobes_operations = {
1478 .open = kprobes_open,
1480 .llseek = seq_lseek,
1481 .release = seq_release,
1484 #ifdef KPROBES_PROFILE
1485 extern unsigned long nCount;
1486 extern struct timeval probe_enter_diff_sum;
1487 static void __kprobes *
1488 kprobe_prof_seq_start (struct seq_file *f, loff_t * pos)
1490 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
1493 static void __kprobes *
1494 kprobe_prof_seq_next (struct seq_file *f, void *v, loff_t * pos)
1497 if (*pos >= KPROBE_TABLE_SIZE)
1502 static void __kprobes
1503 kprobe_prof_seq_stop (struct seq_file *f, void *v)
1507 static void __kprobes
1508 report_probe_prof (struct seq_file *pi, struct kprobe *p, const char *sym, int offset, char *modname)
1512 if (p->pre_handler == pre_handler_kretprobe)
1517 else if (p->pre_handler == setjmp_pre_handler)
1528 seq_printf (pi, "%p %s %s+0x%x %s %lu.%06ld\n", p->addr, kprobe_type,
1529 sym, offset, (modname ? modname : " "), p->count ? p->hnd_tm_sum.tv_sec / p->count : 0, p->count ? p->hnd_tm_sum.tv_usec / p->count : 0);
1532 seq_printf (pi, "%p %s %p %lu.%06ld\n", p->addr, kprobe_type, p->addr, p->count ? p->hnd_tm_sum.tv_sec / p->count : 0, p->count ? p->hnd_tm_sum.tv_usec / p->count : 0);
1535 static int __kprobes
1536 show_kprobe_prof (struct seq_file *pi, void *v)
1538 struct hlist_head *head;
1539 struct hlist_node *node;
1540 struct kprobe *p; //, *kp;
1541 const char *sym = NULL;
1542 unsigned int i = *(loff_t *) v;
1543 unsigned long size, offset = 0;
1544 char *modname, namebuf[128];
1545 static struct timeval utv, ktv;
1546 static unsigned long ucount, kcount;
1548 head = &kprobe_table[i];
1550 hlist_for_each_entry_rcu (p, node, head, hlist)
1552 sym = kallsyms_lookup ((unsigned long) p->addr, &size, &offset, &modname, namebuf);
1553 /*if (p->pre_handler == aggr_pre_handler) {
1554 list_for_each_entry_rcu(kp, &p->list, list)
1555 report_probe_prof(pi, kp, sym, offset, modname);
1557 report_probe_prof (pi, p, sym, offset, modname);
1562 set_normalized_timeval (&utv, utv.tv_sec + p->hnd_tm_sum.tv_sec, utv.tv_usec + p->hnd_tm_sum.tv_usec);
1567 //seq_printf(pi, "kernel probe handling %lu %lu.%06ld\n",
1568 // p->count, p->hnd_tm_sum.tv_sec, p->hnd_tm_sum.tv_usec);
1569 //seq_printf(pi, "kernel probe handling2 %lu %lu.%06ld\n",
1570 // kcount, ktv.tv_sec, ktv.tv_usec);
1571 set_normalized_timeval (&ktv, ktv.tv_sec + p->hnd_tm_sum.tv_sec, ktv.tv_usec + p->hnd_tm_sum.tv_usec);
1573 //seq_printf(pi, "kernel probe handling3 %lu %lu.%06ld\n",
1574 // kcount, ktv.tv_sec, ktv.tv_usec);
1578 if (i == (KPROBE_TABLE_SIZE - 1))
1580 seq_printf (pi, "Average kernel probe handling %lu.%06ld\n", kcount ? ktv.tv_sec / kcount : 0, kcount ? ktv.tv_usec / kcount : 0);
1581 seq_printf (pi, "Average user probe handling %lu.%06ld\n", ucount ? utv.tv_sec / ucount : 0, ucount ? utv.tv_usec / ucount : 0);
1582 seq_printf (pi, "Average probe period %lu.%06ld\n", nCount ? probe_enter_diff_sum.tv_sec / nCount : 0, nCount ? probe_enter_diff_sum.tv_usec / nCount : 0);
1583 utv.tv_sec = utv.tv_usec = ktv.tv_sec = ktv.tv_usec = 0;
1584 ucount = kcount = 0;
1590 static struct seq_operations kprobes_prof_seq_ops = {
1591 .start = kprobe_prof_seq_start,
1592 .next = kprobe_prof_seq_next,
1593 .stop = kprobe_prof_seq_stop,
1594 .show = show_kprobe_prof
1597 static int __kprobes
1598 kprobes_prof_open (struct inode *inode, struct file *filp)
1600 return seq_open (filp, &kprobes_prof_seq_ops);
1603 static struct file_operations debugfs_kprobes_prof_operations = {
1604 .open = kprobes_prof_open,
1606 .llseek = seq_lseek,
1607 .release = seq_release,
1611 int __kprobes debugfs_kprobe_init (void);
1612 static struct dentry *dbg_dir, *dbg_file;
1613 #ifdef KPROBES_PROFILE
1614 static struct dentry *dbg_file_prof;
1618 debugfs_kprobe_init (void)
1620 //struct dentry *dir, *file;
1622 dbg_dir = debugfs_create_dir ("kprobes", NULL);
1626 dbg_file = debugfs_create_file ("list", 0444, dbg_dir, 0, &debugfs_kprobes_operations);
1629 debugfs_remove (dbg_dir);
1634 #ifdef KPROBES_PROFILE
1635 dbg_file_prof = debugfs_create_file ("prof", 0444, dbg_dir, 0, &debugfs_kprobes_prof_operations);
1638 debugfs_remove (dbg_file);
1639 debugfs_remove (dbg_dir);
1647 //late_initcall(debugfs_kprobe_init);
1648 extern unsigned long (*kallsyms_search) (const char *name);
1649 #endif /* CONFIG_DEBUG_FS */
1651 #if defined(CONFIG_X86)
1652 static struct notifier_block kprobe_exceptions_nb = {
1653 .notifier_call = kprobe_exceptions_notify,
1663 /* FIXME allocate the probe table, currently defined statically */
1664 /* initialize all list heads */
1665 for (i = 0; i < KPROBE_TABLE_SIZE; i++)
1667 INIT_HLIST_HEAD (&kprobe_table[i]);
1668 INIT_HLIST_HEAD (&kretprobe_inst_table[i]);
1669 INIT_HLIST_HEAD (&uprobe_insn_slot_table[i]);
1671 atomic_set (&kprobe_count, 0);
1673 err = arch_init_kprobes ();
1675 DBPRINTF ("init_kprobes: arch_init_kprobes - %d", err);
1676 #if defined(CONFIG_X86)
1678 err = register_die_notifier (&kprobe_exceptions_nb);
1679 DBPRINTF ("init_kprobes: register_die_notifier - %d", err);
1680 #endif // CONFIG_X86
1682 #ifdef CONFIG_DEBUG_FS
1685 __real_kallsyms_lookup = (void *) kallsyms_search ("kallsyms_lookup");
1686 if (!__real_kallsyms_lookup)
1688 DBPRINTF ("kallsyms_lookup is not found! Oops. Where is the kernel?");
1691 err = debugfs_kprobe_init ();
1692 DBPRINTF ("init_kprobes: debugfs_kprobe_init - %d", err);
1694 #endif /* CONFIG_DEBUG_FS */
1702 #ifdef CONFIG_DEBUG_FS
1703 #ifdef KPROBES_PROFILE
1705 debugfs_remove (dbg_file_prof);
1708 debugfs_remove (dbg_file);
1710 debugfs_remove (dbg_dir);
1711 #endif /* CONFIG_DEBUG_FS */
1713 #if defined(CONFIG_X86)
1714 unregister_die_notifier (&kprobe_exceptions_nb);
1715 #endif // CONFIG_X86
1716 arch_exit_kprobes ();
1719 module_init (init_kprobes);
1720 module_exit (exit_kprobes);
1722 EXPORT_SYMBOL_GPL (register_kprobe);
1723 EXPORT_SYMBOL_GPL (unregister_kprobe);
1724 EXPORT_SYMBOL_GPL (register_jprobe);
1725 EXPORT_SYMBOL_GPL (unregister_jprobe);
1726 EXPORT_SYMBOL_GPL (register_ujprobe);
1727 EXPORT_SYMBOL_GPL (unregister_ujprobe);
1728 EXPORT_SYMBOL_GPL (jprobe_return);
1729 EXPORT_SYMBOL_GPL (uprobe_return);
1730 EXPORT_SYMBOL_GPL (register_kretprobe);
1731 EXPORT_SYMBOL_GPL (unregister_kretprobe);
1732 EXPORT_SYMBOL_GPL (register_uretprobe);
1733 EXPORT_SYMBOL_GPL (unregister_uretprobe);
1734 EXPORT_SYMBOL_GPL (unregister_all_uprobes);
1735 //EXPORT_SYMBOL_GPL (access_process_vm_atomic);
1736 #if LINUX_VERSION_CODE != KERNEL_VERSION(2,6,23)
1737 EXPORT_SYMBOL_GPL (access_process_vm);
1739 #ifdef KERNEL_HAS_ISPAGEPRESENT
1740 EXPORT_SYMBOL_GPL (is_page_present);
1742 EXPORT_SYMBOL_GPL (page_present);
1744 //EXPORT_SYMBOL_GPL(get_user_pages_atomic);