4 * Kernel Probes (KProbes)
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 * Copyright (C) IBM Corporation, 2002, 2004
25 * Dynamic Binary Instrumentation Module based on KProbes
26 * modules/kprobe/dbi_kprobes.h
28 * This program is free software; you can redistribute it and/or modify
29 * it under the terms of the GNU General Public License as published by
30 * the Free Software Foundation; either version 2 of the License, or
31 * (at your option) any later version.
33 * This program is distributed in the hope that it will be useful,
34 * but WITHOUT ANY WARRANTY; without even the implied warranty of
35 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
36 * GNU General Public License for more details.
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
42 * Copyright (C) Samsung Electronics, 2006-2010
44 * 2006-2007 Ekaterina Gorelkina <e.gorelkina@samsung.com>: initial implementation for ARM and MIPS
45 * 2008-2009 Alexey Gerenkov <a.gerenkov@samsung.com> User-Space
46 * Probes initial implementation; Support x86/ARM/MIPS for both user and kernel spaces.
47 * 2010 Ekaterina Gorelkina <e.gorelkina@samsung.com>: redesign module for separating core and arch parts
52 #include <linux/version.h>
53 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
54 #include <linux/config.h>
57 #include <linux/hash.h>
58 #include <linux/module.h>
61 #include "dbi_kprobes.h"
62 #include "arch/dbi_kprobes.h"
63 #include "arch/asm/dbi_kprobes.h"
65 #include "dbi_kdebug.h"
66 #include "dbi_kprobes_deps.h"
67 #include "dbi_insn_slots.h"
68 #include "dbi_uprobes.h"
70 extern unsigned int *sched_addr;
71 extern unsigned int *fork_addr;
73 extern struct hlist_head kprobe_insn_pages;
75 extern unsigned long (*kallsyms_search) (const char *name);
77 DEFINE_PER_CPU (struct kprobe *, current_kprobe) = NULL;
78 DEFINE_PER_CPU (struct kprobe_ctlblk, kprobe_ctlblk);
80 DEFINE_SPINLOCK (kretprobe_lock); /* Protects kretprobe_inst_table */
81 DEFINE_PER_CPU (struct kprobe *, kprobe_instance) = NULL;
83 struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
84 struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
86 struct mutex kprobe_mutex;
87 atomic_t kprobe_count;
90 void kretprobe_assert (struct kretprobe_instance *ri, unsigned long orig_ret_address, unsigned long trampoline_address)
92 if (!orig_ret_address || (orig_ret_address == trampoline_address))
93 panic ("kretprobe BUG!: Processing kretprobe %p @ %p\n", ri->rp, ri->rp->kp.addr);
97 /* We have preemption disabled.. so it is safe to use __ versions */
99 void set_kprobe_instance (struct kprobe *kp)
101 __get_cpu_var (kprobe_instance) = kp;
105 void reset_kprobe_instance (void)
107 __get_cpu_var (kprobe_instance) = NULL;
110 /* kprobe_running() will just return the current_kprobe on this CPU */
111 struct kprobe *kprobe_running (void)
113 return (__get_cpu_var (current_kprobe));
116 void reset_current_kprobe (void)
118 __get_cpu_var (current_kprobe) = NULL;
121 struct kprobe_ctlblk *get_kprobe_ctlblk (void)
123 return (&__get_cpu_var (kprobe_ctlblk));
127 * This routine is called either:
128 * - under the kprobe_mutex - during kprobe_[un]register()
130 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
132 struct kprobe *get_kprobe (void *addr, int tgid, struct task_struct *ctask)
134 struct hlist_head *head;
135 struct hlist_node *node;
136 struct kprobe *p, *retVal = NULL;
137 int ret = 0, uprobe_found;
138 struct page *page = 0, *tpage = 0;
139 struct vm_area_struct *vma = 0;
140 struct task_struct *task = 0;
144 if (ctask && ctask->active_mm)
146 ret = get_user_pages_uprobe (ctask, ctask->active_mm, (unsigned long) addr, 1, 0, 0, &tpage, NULL);
148 DBPRINTF ("get_user_pages for task %d at %p failed!", current->pid, addr);
151 paddr = page_address (tpage);
152 page_cache_release (tpage);
156 //TODO: test - two processes invokes instrumented function
157 head = &kprobe_table[hash_ptr (addr, KPROBE_HASH_BITS)];
158 hlist_for_each_entry_rcu (p, node, head, hlist)
160 //if looking for kernel probe and this is kernel probe with the same addr OR
161 //if looking for the user space probe and this is user space probe probe with the same addr and pid
162 DBPRINTF ("get_kprobe: check probe at %p/%p, task %d/%d", addr, p->addr, tgid, p->tgid);
168 if (!tgid || uprobe_found)
172 DBPRINTF ("get_kprobe: found user space probe at %p for task %d", p->addr, p->tgid);
174 DBPRINTF ("get_kprobe: found kernel probe at %p", p->addr);
178 else if (tgid != p->tgid)
180 // if looking for the user space probe and this is user space probe
181 // with another addr and pid but with the same offset whithin the page
182 // it could be that it is the same probe (with address from other user space)
183 // we should handle it as usual probe but without notification to user
184 if (paddr && tgid && (((unsigned long) addr & ~PAGE_MASK) == ((unsigned long) p->addr & ~PAGE_MASK))
187 DBPRINTF ("get_kprobe: found user space probe at %p in task %d. possibly for addr %p in task %d", p->addr, p->tgid, addr, tgid);
188 // this probe has the same offset in the page
189 // look in the probes for the other pids
190 // get page for user space probe addr
192 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
193 task = find_task_by_pid (p->tgid);
194 #else //lif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
195 task = pid_task(find_pid_ns(p->tgid, &init_pid_ns), PIDTYPE_PID);
198 get_task_struct (task);
202 DBPRINTF ("task for pid %d not found! Dead probe?", p->tgid);
207 if (page_present (task->active_mm, (unsigned long) p->addr))
209 ret = get_user_pages_uprobe (task, task->active_mm, (unsigned long) p->addr, 1, 0, 0, &page, &vma);
211 DBPRINTF ("get_user_pages for task %d at %p failed!", p->tgid, p->addr);
218 DBPRINTF ("task %d has no mm!", task->pid);
221 //put_task_struct (task);
225 if (paddr == page_address (page))
227 retVal = p; // we found the probe in other process address space
228 DBPRINTF ("get_kprobe: found user space probe at %p in task %d for addr %p in task %d", p->addr, p->tgid, addr, tgid);
229 panic ("user space probe from another process");
231 page_cache_release (page);
238 DBPRINTF ("get_kprobe: probe %p", retVal);
244 * Aggregate handlers for multiple kprobes support - these handlers
245 * take care of invoking the individual kprobe handlers on p->list
248 int aggr_pre_handler (struct kprobe *p, struct pt_regs *regs)
253 list_for_each_entry_rcu (kp, &p->list, list)
257 set_kprobe_instance (kp);
258 ret = kp->pre_handler (kp, regs);
262 reset_kprobe_instance ();
268 void aggr_post_handler (struct kprobe *p, struct pt_regs *regs, unsigned long flags)
272 list_for_each_entry_rcu (kp, &p->list, list)
274 if (kp->post_handler)
276 set_kprobe_instance (kp);
277 kp->post_handler (kp, regs, flags);
278 reset_kprobe_instance ();
285 int aggr_fault_handler (struct kprobe *p, struct pt_regs *regs, int trapnr)
287 struct kprobe *cur = __get_cpu_var (kprobe_instance);
290 * if we faulted "during" the execution of a user specified
291 * probe handler, invoke just that probe's fault handler
293 if (cur && cur->fault_handler)
295 if (cur->fault_handler (cur, regs, trapnr))
302 int aggr_break_handler (struct kprobe *p, struct pt_regs *regs)
304 struct kprobe *cur = __get_cpu_var (kprobe_instance);
306 DBPRINTF ("cur = 0x%p\n", cur);
308 DBPRINTF ("cur = 0x%p cur->break_handler = 0x%p\n", cur, cur->break_handler);
310 if (cur && cur->break_handler)
312 if (cur->break_handler (cur, regs /*, vma, page, kaddr */ ))
315 reset_kprobe_instance ();
319 /* Walks the list and increments nmissed count for multiprobe case */
320 void kprobes_inc_nmissed_count (struct kprobe *p)
323 if (p->pre_handler != aggr_pre_handler)
329 list_for_each_entry_rcu (kp, &p->list, list) kp->nmissed++;
334 /* Called with kretprobe_lock held */
335 struct kretprobe_instance *get_free_rp_inst (struct kretprobe *rp)
337 struct hlist_node *node;
338 struct kretprobe_instance *ri;
339 hlist_for_each_entry (ri, node, &rp->free_instances, uflist)
344 /* Called with kretprobe_lock held */
345 struct kretprobe_instance *get_used_rp_inst (struct kretprobe *rp)
347 struct hlist_node *node;
348 struct kretprobe_instance *ri;
349 hlist_for_each_entry (ri, node, &rp->used_instances, uflist) return ri;
353 /* Called with kretprobe_lock held */
354 void add_rp_inst (struct kretprobe_instance *ri)
357 * Remove rp inst off the free list -
358 * Add it back when probed function returns
360 hlist_del (&ri->uflist);
362 /* Add rp inst onto table */
363 INIT_HLIST_NODE (&ri->hlist);
364 hlist_add_head (&ri->hlist, &kretprobe_inst_table[hash_ptr (ri->task, KPROBE_HASH_BITS)]);
366 /* Also add this rp inst to the used list. */
367 INIT_HLIST_NODE (&ri->uflist);
368 hlist_add_head (&ri->uflist, &ri->rp->used_instances);
371 /* Called with kretprobe_lock held */
372 void recycle_rp_inst (struct kretprobe_instance *ri, struct hlist_head *head)
374 /* remove rp inst off the rprobe_inst_table */
375 hlist_del (&ri->hlist);
378 /* remove rp inst off the used list */
379 hlist_del (&ri->uflist);
380 /* put rp inst back onto the free list */
381 INIT_HLIST_NODE (&ri->uflist);
382 hlist_add_head (&ri->uflist, &ri->rp->free_instances);
386 hlist_add_head (&ri->hlist, head);
389 struct hlist_head * kretprobe_inst_table_head (struct task_struct *tsk)
391 return &kretprobe_inst_table[hash_ptr (tsk, KPROBE_HASH_BITS)];
394 void free_rp_inst (struct kretprobe *rp)
396 struct kretprobe_instance *ri;
397 while ((ri = get_free_rp_inst (rp)) != NULL)
399 hlist_del (&ri->uflist);
405 * Keep all fields in the kprobe consistent
408 void copy_kprobe (struct kprobe *old_p, struct kprobe *p)
410 memcpy (&p->opcode, &old_p->opcode, sizeof (kprobe_opcode_t));
411 memcpy (&p->ainsn, &old_p->ainsn, sizeof (struct arch_specific_insn));
412 p->tgid = old_p->tgid;
413 p->ss_addr = old_p->ss_addr;
414 //p->spid = old_p->spid;
418 * Add the new probe to old_p->list. Fail if this is the
419 * second jprobe at the address - two jprobes can't coexist
421 int add_new_kprobe (struct kprobe *old_p, struct kprobe *p)
423 if (p->break_handler)
425 if (old_p->break_handler)
427 list_add_tail_rcu (&p->list, &old_p->list);
428 old_p->break_handler = aggr_break_handler;
431 list_add_rcu (&p->list, &old_p->list);
432 if (p->post_handler && !old_p->post_handler)
433 old_p->post_handler = aggr_post_handler;
438 * hlist_replace_rcu - replace old entry by new one
439 * @old : the element to be replaced
440 * @new : the new element to insert
442 * The @old entry will be replaced with the @new entry atomically.
444 inline void dbi_hlist_replace_rcu (struct hlist_node *old, struct hlist_node *new)
446 struct hlist_node *next = old->next;
449 new->pprev = old->pprev;
452 new->next->pprev = &new->next;
455 old->pprev = LIST_POISON2;
460 * Fill in the required fields of the "manager kprobe". Replace the
461 * earlier kprobe in the hlist with the manager kprobe
464 void add_aggr_kprobe (struct kprobe *ap, struct kprobe *p)
467 //flush_insn_slot (ap);
469 ap->pre_handler = aggr_pre_handler;
470 ap->fault_handler = aggr_fault_handler;
472 ap->post_handler = aggr_post_handler;
473 if (p->break_handler)
474 ap->break_handler = aggr_break_handler;
476 INIT_LIST_HEAD (&ap->list);
477 list_add_rcu (&p->list, &ap->list);
479 dbi_hlist_replace_rcu (&p->hlist, &ap->hlist);
483 * This is the second or subsequent kprobe at the address - handle
486 int register_aggr_kprobe (struct kprobe *old_p, struct kprobe *p)
490 DBPRINTF ("start\n");
492 DBPRINTF ("p = %p old_p = %p \n", p, old_p);
493 if (old_p->pre_handler == aggr_pre_handler)
495 DBPRINTF ("aggr_pre_handler \n");
497 copy_kprobe (old_p, p);
498 ret = add_new_kprobe (old_p, p);
502 DBPRINTF ("kzalloc\n");
505 ap = kzalloc (sizeof (struct kprobe), GFP_KERNEL);
507 ap = kmalloc (sizeof (struct kprobe), GFP_KERNEL);
509 memset (ap, 0, sizeof (struct kprobe));
513 add_aggr_kprobe (ap, old_p);
515 DBPRINTF ("ap = %p p = %p old_p = %p \n", ap, p, old_p);
516 ret = add_new_kprobe (ap, p);
522 int __register_kprobe (struct kprobe *p, unsigned long called_from, int atomic)
524 struct kprobe *old_p;
525 // struct module *probed_mod;
528 * If we have a symbol_name argument look it up,
529 * and add it to the address. That way the addr
530 * field can either be global or relative to a symbol.
536 p->addr = (unsigned int) kallsyms_search (p->symbol_name);
541 DBPRINTF ("p->addr = 0x%p\n", p->addr);
542 p->addr = (kprobe_opcode_t *) (((char *) p->addr) + p->offset);
543 DBPRINTF ("p->addr = 0x%p p = 0x%p\n", p->addr, p);
545 /* if ((!kernel_text_address((unsigned long) p->addr)) ||
546 in_kprobes_functions((unsigned long) p->addr))
549 #ifdef KPROBES_PROFILE
550 p->start_tm.tv_sec = p->start_tm.tv_usec = 0;
551 p->hnd_tm_sum.tv_sec = p->hnd_tm_sum.tv_usec = 0;
554 p->mod_refcounted = 0;
561 // Check are we probing a module
562 if ((probed_mod = module_text_address((unsigned long) p->addr))) {
563 struct module *calling_mod = module_text_address(called_from);
564 // We must allow modules to probe themself and
565 // in this case avoid incrementing the module refcount,
566 // so as to allow unloading of self probing modules.
568 if (calling_mod && (calling_mod != probed_mod)) {
569 if (unlikely(!try_module_get(probed_mod)))
571 p->mod_refcounted = 1;
577 // mutex_lock(&kprobe_mutex);
578 old_p = get_kprobe (p->addr, 0, NULL);
581 ret = register_aggr_kprobe (old_p, p);
583 atomic_inc (&kprobe_count);
587 if ((ret = arch_prepare_kprobe (p)) != 0)
590 DBPRINTF ("before out ret = 0x%x\n", ret);
592 INIT_HLIST_NODE (&p->hlist);
593 hlist_add_head_rcu (&p->hlist, &kprobe_table[hash_ptr (p->addr, KPROBE_HASH_BITS)]);
595 /* if (atomic_add_return(1, &kprobe_count) == \
596 (ARCH_INACTIVE_KPROBE_COUNT + 1))
597 register_page_fault_notifier(&kprobe_page_fault_nb);*/
602 // mutex_unlock(&kprobe_mutex);
604 if (ret && probed_mod)
605 module_put(probed_mod);
607 DBPRINTF ("out ret = 0x%x\n", ret);
613 int register_kprobe (struct kprobe *p, int atomic)
615 return __register_kprobe (p, (unsigned long) __builtin_return_address (0), atomic);
618 void unregister_kprobe (struct kprobe *p, struct task_struct *task, int atomic)
620 // struct module *mod;
621 struct kprobe *old_p, *list_p;
622 int cleanup_p, pid = 0;
624 // mutex_lock(&kprobe_mutex);
628 old_p = get_kprobe (p->addr, pid, NULL);
629 DBPRINTF ("unregister_kprobe p=%p old_p=%p", p, old_p);
630 if (unlikely (!old_p))
632 // mutex_unlock(&kprobe_mutex);
637 list_for_each_entry_rcu (list_p, &old_p->list, list)
639 /* kprobe p is a valid probe */
641 // mutex_unlock(&kprobe_mutex);
645 DBPRINTF ("unregister_kprobe valid_p");
646 if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) &&
647 (p->list.next == &old_p->list) && (p->list.prev == &old_p->list)))
649 /* Only probe on the hash list */
650 DBPRINTF ("unregister_kprobe disarm pid=%d", pid);
652 arch_disarm_uprobe (p, task);//vma, page, kaddr);
654 arch_disarm_kprobe (p);
655 hlist_del_rcu (&old_p->hlist);
660 list_del_rcu (&p->list);
663 DBPRINTF ("unregister_kprobe cleanup_p=%d", cleanup_p);
664 // mutex_unlock(&kprobe_mutex);
666 // synchronize_sched();
668 if (p->mod_refcounted &&
669 (mod = module_text_address((unsigned long)p->addr)))
676 list_del_rcu (&p->list);
679 arch_remove_kprobe (p, task);
683 /// mutex_lock(&kprobe_mutex);
684 if (p->break_handler)
685 old_p->break_handler = NULL;
688 list_for_each_entry_rcu (list_p, &old_p->list, list)
690 if (list_p->post_handler)
697 old_p->post_handler = NULL;
699 // mutex_unlock(&kprobe_mutex);
702 /* Call unregister_page_fault_notifier()
703 * if no probes are active
705 // mutex_lock(&kprobe_mutex);
706 /* if (atomic_add_return(-1, &kprobe_count) == \
707 ARCH_INACTIVE_KPROBE_COUNT)
708 unregister_page_fault_notifier(&kprobe_page_fault_nb);*/
709 // mutex_unlock(&kprobe_mutex);
713 int register_jprobe (struct jprobe *jp, int atomic)
715 /* Todo: Verify probepoint is a function entry point */
716 jp->kp.pre_handler = setjmp_pre_handler;
717 jp->kp.break_handler = longjmp_break_handler;
719 return __register_kprobe (&jp->kp, (unsigned long) __builtin_return_address (0), atomic);
722 void unregister_jprobe (struct jprobe *jp, int atomic)
724 unregister_kprobe (&jp->kp, 0, atomic);
728 * This kprobe pre_handler is registered with every kretprobe. When probe
729 * hits it will set up the return probe.
731 int pre_handler_kretprobe (struct kprobe *p, struct pt_regs *regs)
733 struct kretprobe *rp = container_of (p, struct kretprobe, kp);
734 unsigned long flags = 0;
735 DBPRINTF ("START\n");
737 /*TODO: consider to only swap the RA after the last pre_handler fired */
738 spin_lock_irqsave (&kretprobe_lock, flags);
740 __arch_prepare_kretprobe (rp, regs);
741 spin_unlock_irqrestore (&kretprobe_lock, flags);
746 struct kretprobe *sched_rp;
748 int register_kretprobe (struct kretprobe *rp, int atomic)
751 struct kretprobe_instance *inst;
755 rp->kp.pre_handler = pre_handler_kretprobe;
756 rp->kp.post_handler = NULL;
757 rp->kp.fault_handler = NULL;
758 rp->kp.break_handler = NULL;
762 /* Pre-allocate memory for max kretprobe instances */
763 if((unsigned int)rp->kp.addr == sched_addr)
764 rp->maxactive = 1000;//max (100, 2 * NR_CPUS);
765 else if (rp->maxactive <= 0)
767 #if 1//def CONFIG_PREEMPT
768 rp->maxactive = max (10, 2 * NR_CPUS);
770 rp->maxactive = NR_CPUS;
773 INIT_HLIST_HEAD (&rp->used_instances);
774 INIT_HLIST_HEAD (&rp->free_instances);
775 for (i = 0; i < rp->maxactive; i++)
777 inst = kmalloc (sizeof (struct kretprobe_instance), GFP_KERNEL);
783 INIT_HLIST_NODE (&inst->uflist);
784 hlist_add_head (&inst->uflist, &rp->free_instances);
787 DBPRINTF ("addr=%p, *addr=[%lx %lx %lx]", rp->kp.addr, (unsigned long) (*(rp->kp.addr)), (unsigned long) (*(rp->kp.addr + 1)), (unsigned long) (*(rp->kp.addr + 2)));
789 /* Establish function entry probe point */
790 if ((ret = __register_kprobe (&rp->kp, (unsigned long) __builtin_return_address (0), atomic)) != 0)
793 DBPRINTF ("addr=%p, *addr=[%lx %lx %lx]", rp->kp.addr, (unsigned long) (*(rp->kp.addr)), (unsigned long) (*(rp->kp.addr + 1)), (unsigned long) (*(rp->kp.addr + 2)));
794 if((unsigned int)rp->kp.addr == sched_addr)
800 void unregister_kretprobe (struct kretprobe *rp, int atomic)
803 struct kretprobe_instance *ri;
805 unregister_kprobe (&rp->kp, 0, atomic);
807 if((unsigned int)rp->kp.addr == sched_addr)
811 spin_lock_irqsave (&kretprobe_lock, flags);
812 while ((ri = get_used_rp_inst (rp)) != NULL)
815 hlist_del (&ri->uflist);
817 spin_unlock_irqrestore (&kretprobe_lock, flags);
821 struct kretprobe * clone_kretprobe (struct kretprobe *rp)
823 struct kprobe *old_p;
824 struct kretprobe *clone = NULL;
827 clone = kmalloc (sizeof (struct kretprobe), GFP_KERNEL);
830 DBPRINTF ("failed to alloc memory for clone probe %p!", rp->kp.addr);
833 memcpy (clone, rp, sizeof (struct kretprobe));
834 clone->kp.pre_handler = pre_handler_kretprobe;
835 clone->kp.post_handler = NULL;
836 clone->kp.fault_handler = NULL;
837 clone->kp.break_handler = NULL;
838 old_p = get_kprobe (rp->kp.addr, rp->kp.tgid, NULL);
841 ret = register_aggr_kprobe (old_p, &clone->kp);
847 atomic_inc (&kprobe_count);
854 int __init init_kprobes (void)
858 /* FIXME allocate the probe table, currently defined statically */
859 /* initialize all list heads */
860 for (i = 0; i < KPROBE_TABLE_SIZE; i++)
862 INIT_HLIST_HEAD (&kprobe_table[i]);
863 INIT_HLIST_HEAD (&kretprobe_inst_table[i]);
865 init_uprobes_insn_slots(i);
867 atomic_set (&kprobe_count, 0);
869 err = arch_init_kprobes ();
871 DBPRINTF ("init_kprobes: arch_init_kprobes - %d", err);
876 void __exit exit_kprobes (void)
878 arch_exit_kprobes ();
881 module_init (init_kprobes);
882 module_exit (exit_kprobes);
884 EXPORT_SYMBOL_GPL (register_kprobe);
885 EXPORT_SYMBOL_GPL (unregister_kprobe);
886 EXPORT_SYMBOL_GPL (register_jprobe);
887 EXPORT_SYMBOL_GPL (unregister_jprobe);
888 EXPORT_SYMBOL_GPL (jprobe_return);
889 EXPORT_SYMBOL_GPL (register_kretprobe);
890 EXPORT_SYMBOL_GPL (unregister_kretprobe);
892 MODULE_LICENSE ("Dual BSD/GPL");