Update changes for fixing seg fault for Tegra250 to Beagle Board works correctly
[kernel/swap-modules.git] / kprobe / kprobes.c
1 // src_kprobes.c
2
3
4 #include <linux/version.h>
5 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
6 #include <linux/config.h>
7 #endif
8
9 #include <asm/types.h>
10
11 #include <linux/hash.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/moduleloader.h>
16 #include <linux/kallsyms.h>
17 //#include <linux/freezer.h>
18 #include <linux/seq_file.h>
19 #ifdef CONFIG_DEBUG_FS
20 #include <linux/debugfs.h>
21 #endif
22 #include <asm-generic/sections.h>
23 #include <asm/cacheflush.h>
24 #include <asm/errno.h>
25 #include <linux/spinlock.h>
26 #include <linux/version.h>
27 #include <linux/highmem.h>      // kmap_atomic, kunmap_atomic, copy_from_user_page, copy_to_user_page
28 #include <linux/pagemap.h>      // page_cache_release
29 #include <linux/vmalloc.h>      // vmalloc, vfree
30 #if defined(CONFIG_X86)
31 #include <linux/kdebug.h>       // register_die_notifier, unregister_die_notifier
32 #endif
33 #include <linux/hugetlb.h>      // follow_hugetlb_page, is_vm_hugetlb_page
34
35 #include "kprobes.h"
36
37 //#define arch_remove_kprobe(p) do { } while (0)
38
39 #ifdef _DEBUG
40 extern int nCount;
41 #endif
42
43 /*
44 static spinlock_t die_notifier_lock = SPIN_LOCK_UNLOCKED;
45
46 int src_register_die_notifier(struct notifier_block *nb)
47 {
48         int err = 0;
49         unsigned long flags;
50
51         spin_lock_irqsave(&die_notifier_lock, flags);
52 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
53         err = atomic_notifier_chain_register(&panic_notifier_list, nb);
54 #else
55         err = notifier_chain_register(&panic_notifier_list, nb);
56 #endif
57         spin_unlock_irqrestore(&die_notifier_lock, flags);
58
59         return err;
60 }
61 */
62 /**
63  * hlist_replace_rcu - replace old entry by new one
64  * @old : the element to be replaced
65  * @new : the new element to insert
66  *
67  * The @old entry will be replaced with the @new entry atomically.
68  */
69 static inline void
70 src_hlist_replace_rcu (struct hlist_node *old, struct hlist_node *new)
71 {
72         struct hlist_node *next = old->next;
73
74         new->next = next;
75         new->pprev = old->pprev;
76         smp_wmb ();
77         if (next)
78                 new->next->pprev = &new->next;
79         if (new->pprev)
80                 *new->pprev = new;
81         old->pprev = LIST_POISON2;
82 }
83
84 #define KPROBE_HASH_BITS 6
85 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
86
87
88 /*
89  * Some oddball architectures like 64bit powerpc have function descriptors
90  * so this must be overridable.
91  */
92 #ifndef kprobe_lookup_name
93 #define kprobe_lookup_name(name, addr) \
94         addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
95 #endif
96
97 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
98 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
99 static struct hlist_head uprobe_insn_slot_table[KPROBE_TABLE_SIZE];
100 static atomic_t kprobe_count;
101
102 //DEFINE_MUTEX(kprobe_mutex);           /* Protects kprobe_table */
103 DEFINE_SPINLOCK (kretprobe_lock);       /* Protects kretprobe_inst_table */
104 static DEFINE_PER_CPU (struct kprobe *, kprobe_instance) = NULL;
105 unsigned long handled_exceptions;
106
107 /* We have preemption disabled.. so it is safe to use __ versions */
108 static inline void
109 set_kprobe_instance (struct kprobe *kp)
110 {
111         __get_cpu_var (kprobe_instance) = kp;
112 }
113
114 static inline void
115 reset_kprobe_instance (void)
116 {
117         __get_cpu_var (kprobe_instance) = NULL;
118 }
119
120 /*
121  * This routine is called either:
122  *      - under the kprobe_mutex - during kprobe_[un]register()
123  *                              OR
124  *      - with preemption disabled - from arch/xxx/kernel/kprobes.c
125  */
126 struct kprobe __kprobes *
127 get_kprobe (void *addr, int tgid, struct task_struct *ctask)
128 {
129         struct hlist_head *head;
130         struct hlist_node *node;
131         struct kprobe *p, *retVal = NULL;
132         int ret = 0, uprobe_found;
133         struct page *page = 0, *tpage = 0;
134         struct vm_area_struct *vma = 0;
135         struct task_struct *task = 0;
136         void *paddr = 0;
137
138
139         if (ctask && ctask->active_mm)
140         {
141                 ret = get_user_pages_atomic (ctask, ctask->active_mm, (unsigned long) addr, 1, 0, 0, &tpage, NULL);
142                 if (ret <= 0)
143                         DBPRINTF ("get_user_pages for task %d at %p failed!", current->pid, addr);
144                 else
145                 {
146                         paddr = page_address (tpage);
147                         page_cache_release (tpage);
148                 }
149         }
150         //else
151         //      DBPRINTF("task %d has no mm!", ctask->pid);
152
153         //TODO: test - two processes invokes instrumented function
154         head = &kprobe_table[hash_ptr (addr, KPROBE_HASH_BITS)];
155         hlist_for_each_entry_rcu (p, node, head, hlist)
156         {
157                 //if looking for kernel probe and this is kernel probe with the same addr OR
158                 //if looking for the user space probe and this is user space probe probe with the same addr and pid
159                 DBPRINTF ("get_kprobe[%d]: check probe at %p/%p, task %d/%d", nCount, addr, p->addr, tgid, p->tgid);
160                 if (p->addr == addr)
161                 {
162                         uprobe_found = 0;
163                         if (tgid == p->tgid)
164                                 uprobe_found = 1;
165                         if (!tgid || uprobe_found)
166                         {
167                                 retVal = p;
168                                 if (tgid)
169                                         DBPRINTF ("get_kprobe[%d]: found user space probe at %p for task %d", nCount, p->addr, p->tgid);
170                                 else
171                                         DBPRINTF ("get_kprobe[%d]: found kernel probe at %p", nCount, p->addr);
172                                 break;
173                         }
174                 }
175                 else if (tgid != p->tgid)
176                 {
177                         // if looking for the user space probe and this is user space probe 
178                         // with another addr and pid but with the same offset whithin the page
179                         // it could be that it is the same probe (with address from other user space)
180                         // we should handle it as usual probe but without notification to user 
181                         if (paddr && tgid && (((unsigned long) addr & ~PAGE_MASK) == ((unsigned long) p->addr & ~PAGE_MASK))
182                             && p->tgid)
183                         {
184                                 DBPRINTF ("get_kprobe[%d]: found user space probe at %p in task %d. possibly for addr %p in task %d", nCount, p->addr, p->tgid, addr, tgid);
185                                 // this probe has the same offset in the page
186                                 // look in the probes for the other pids                                
187                                 // get page for user space probe addr
188                                 rcu_read_lock ();
189 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
190                                 task = find_task_by_pid (p->tgid);
191 #else //lif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
192                                 task = pid_task(find_pid_ns(p->tgid, &init_pid_ns), PIDTYPE_PID);
193 #endif
194                                 if (task)
195                                         get_task_struct (task);
196                                 rcu_read_unlock ();
197                                 if (!task)
198                                 {
199                                         DBPRINTF ("task for pid %d not found! Dead probe?", p->tgid);
200                                         continue;
201                                 }
202                                 if (task->active_mm)
203                                 {
204                                         if (page_present (task->active_mm, (unsigned long) p->addr))
205                                         {
206                                                 ret = get_user_pages_atomic (task, task->active_mm, (unsigned long) p->addr, 1, 0, 0, &page, &vma);
207                                                 if (ret <= 0)
208                                                         DBPRINTF ("get_user_pages for task %d at %p failed!", p->tgid, p->addr);
209                                         }
210                                         else
211                                                 ret = -1;
212                                 }
213                                 else
214                                 {
215                                         DBPRINTF ("task %d has no mm!", task->pid);
216                                         ret = -1;
217                                 }
218                                 put_task_struct (task);
219                                 if (ret <= 0)
220                                         continue;
221                                 if (paddr == page_address (page))
222                                 {
223                                         retVal = p;     // we found the probe in other process address space
224                                         DBPRINTF ("get_kprobe[%d]: found user space probe at %p in task %d for addr %p in task %d", nCount, p->addr, p->tgid, addr, tgid);
225                                         panic ("user space probe from another process");
226                                 }
227                                 page_cache_release (page);
228                                 if (retVal)
229                                         break;
230                         }
231                 }
232         }
233
234         DBPRINTF ("get_kprobe[%d]: probe %p", nCount, retVal);
235         return retVal;
236 }
237
238 struct kprobe __kprobes *
239 get_kprobe_by_insn_slot (void *addr, int tgid, struct task_struct *ctask)
240 {
241         struct hlist_head *head;
242         struct hlist_node *node;
243         struct kprobe *p, *retVal = NULL;
244         int uprobe_found;
245
246         //TODO: test - two processes invokes instrumented function
247         head = &uprobe_insn_slot_table[hash_ptr (addr, KPROBE_HASH_BITS)];
248         hlist_for_each_entry_rcu (p, node, head, is_hlist)
249         {
250                 //if looking for kernel probe and this is kernel probe with the same addr OR
251                 //if looking for the user space probe and this is user space probe probe with the same addr and pid
252                 DBPRINTF ("get_kprobe[%d]: check probe at %p/%p, task %d/%d", nCount, addr, p->ainsn.insn, tgid, p->tgid);
253                 if (p->ainsn.insn == addr)
254                 {
255                         uprobe_found = 0;
256                         if (tgid == p->tgid)
257                                 uprobe_found = 1;
258                         if (!tgid || uprobe_found)
259                         {
260                                 retVal = p;
261                                 if (tgid)
262                                         DBPRINTF ("get_kprobe[%d]: found user space probe at %p for task %d", nCount, p->addr, p->tgid);
263                                 else
264                                         DBPRINTF ("get_kprobe[%d]: found kernel probe at %p", nCount, p->addr);
265                                 break;
266                         }
267                 }
268         }
269
270         DBPRINTF ("get_kprobe[%d]: probe %p", nCount, retVal);
271         return retVal;
272 }
273
274 /*
275  * Aggregate handlers for multiple kprobes support - these handlers
276  * take care of invoking the individual kprobe handlers on p->list
277  */
278 static int __kprobes
279 aggr_pre_handler (struct kprobe *p, struct pt_regs *regs        /*, 
280                                                                    struct vm_area_struct **vma, 
281                                                                    struct page **page, unsigned long **kaddr */ )
282 {
283         struct kprobe *kp;
284         int ret;
285
286         list_for_each_entry_rcu (kp, &p->list, list)
287         {
288                 if (kp->pre_handler)
289                 {
290                         set_kprobe_instance (kp);
291                         ret = kp->pre_handler (kp, regs);
292                         if (ret)
293                                 return ret;
294                 }
295                 reset_kprobe_instance ();
296         }
297         return 0;
298 }
299
300 static void __kprobes
301 aggr_post_handler (struct kprobe *p, struct pt_regs *regs, unsigned long flags)
302 {
303         struct kprobe *kp;
304
305         list_for_each_entry_rcu (kp, &p->list, list)
306         {
307                 if (kp->post_handler)
308                 {
309                         set_kprobe_instance (kp);
310                         kp->post_handler (kp, regs, flags);
311                         reset_kprobe_instance ();
312                 }
313         }
314         return;
315 }
316
317 #if 1
318 static int __kprobes
319 aggr_fault_handler (struct kprobe *p, struct pt_regs *regs, int trapnr)
320 {
321         struct kprobe *cur = __get_cpu_var (kprobe_instance);
322
323         /*
324          * if we faulted "during" the execution of a user specified
325          * probe handler, invoke just that probe's fault handler
326          */
327         if (cur && cur->fault_handler)
328         {
329                 if (cur->fault_handler (cur, regs, trapnr))
330                         return 1;
331         }
332         return 0;
333 }
334 #endif
335
336 static int __kprobes
337 aggr_break_handler (struct kprobe *p, struct pt_regs *regs      /*, 
338                                                                    struct vm_area_struct **vma, 
339                                                                    struct page **page, unsigned long **kaddr */ )
340 {
341         struct kprobe *cur = __get_cpu_var (kprobe_instance);
342         int ret = 0;
343         DBPRINTF ("cur = 0x%p\n", cur);
344         if (cur)
345                 DBPRINTF ("cur = 0x%p cur->break_handler = 0x%p\n", cur, cur->break_handler);
346
347         if (cur && cur->break_handler)
348         {
349                 if (cur->break_handler (cur, regs /*, vma, page, kaddr */ ))
350                         ret = 1;
351         }
352         reset_kprobe_instance ();
353         return ret;
354 }
355
356 /* Walks the list and increments nmissed count for multiprobe case */
357 void __kprobes
358 kprobes_inc_nmissed_count (struct kprobe *p)
359 {
360         struct kprobe *kp;
361         if (p->pre_handler != aggr_pre_handler)
362         {
363                 p->nmissed++;
364         }
365         else
366         {
367                 list_for_each_entry_rcu (kp, &p->list, list) kp->nmissed++;
368         }
369         return;
370 }
371
372 /* Called with kretprobe_lock held */
373 struct kretprobe_instance __kprobes *
374 get_free_rp_inst (struct kretprobe *rp)
375 {
376         struct hlist_node *node;
377         struct kretprobe_instance *ri;
378         hlist_for_each_entry (ri, node, &rp->free_instances, uflist) 
379                 return ri;
380         return NULL;
381 }
382
383 /* Called with kretprobe_lock held */
384 static struct kretprobe_instance __kprobes *
385 get_used_rp_inst (struct kretprobe *rp)
386 {
387         struct hlist_node *node;
388         struct kretprobe_instance *ri;
389         hlist_for_each_entry (ri, node, &rp->used_instances, uflist) return ri;
390         return NULL;
391 }
392
393 /* Called with kretprobe_lock held */
394 void __kprobes
395 add_rp_inst (struct kretprobe_instance *ri)
396 {
397         /*
398          * Remove rp inst off the free list -
399          * Add it back when probed function returns
400          */
401         hlist_del (&ri->uflist);
402
403         /* Add rp inst onto table */
404         INIT_HLIST_NODE (&ri->hlist);
405         hlist_add_head (&ri->hlist, &kretprobe_inst_table[hash_ptr (ri->task, KPROBE_HASH_BITS)]);
406
407         /* Also add this rp inst to the used list. */
408         INIT_HLIST_NODE (&ri->uflist);
409         hlist_add_head (&ri->uflist, &ri->rp->used_instances);
410 }
411
412 /* Called with kretprobe_lock held */
413 void __kprobes
414 recycle_rp_inst (struct kretprobe_instance *ri, struct hlist_head *head)
415 {
416         /* remove rp inst off the rprobe_inst_table */
417         hlist_del (&ri->hlist);
418         if (ri->rp)
419         {
420                 /* remove rp inst off the used list */
421                 hlist_del (&ri->uflist);
422                 /* put rp inst back onto the free list */
423                 INIT_HLIST_NODE (&ri->uflist);
424                 hlist_add_head (&ri->uflist, &ri->rp->free_instances);
425         }
426         else
427                 /* Unregistering */
428                 hlist_add_head (&ri->hlist, head);
429 }
430
431 struct hlist_head __kprobes *
432 kretprobe_inst_table_head (struct task_struct *tsk)
433 {
434         return &kretprobe_inst_table[hash_ptr (tsk, KPROBE_HASH_BITS)];
435 }
436
437 /*
438  * This function is called from finish_task_switch when task tk becomes dead,
439  * so that we can recycle any function-return probe instances associated
440  * with this task. These left over instances represent probed functions
441  * that have been called but will never return.
442  */
443 /*void __kprobes kprobe_flush_task(struct task_struct *tk)
444 {
445         struct kretprobe_instance *ri;
446         struct hlist_head *head, empty_rp;
447         struct hlist_node *node, *tmp;
448         unsigned long flags = 0;
449
450         INIT_HLIST_HEAD(&empty_rp);
451         spin_lock_irqsave(&kretprobe_lock, flags);
452         head = kretprobe_inst_table_head(tk);
453         hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
454                 if (ri->task == tk)
455                         recycle_rp_inst(ri, &empty_rp);
456         }
457         spin_unlock_irqrestore(&kretprobe_lock, flags);
458
459         hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
460                 hlist_del(&ri->hlist);
461                 kfree(ri);
462         }
463 }*/
464
465 static inline void
466 free_rp_inst (struct kretprobe *rp)
467 {
468         struct kretprobe_instance *ri;
469         while ((ri = get_free_rp_inst (rp)) != NULL)
470         {
471                 hlist_del (&ri->uflist);
472                 kfree (ri);
473         }
474 }
475
476 /*
477  * Keep all fields in the kprobe consistent
478  */
479 static inline void
480 copy_kprobe (struct kprobe *old_p, struct kprobe *p)
481 {
482         memcpy (&p->opcode, &old_p->opcode, sizeof (kprobe_opcode_t));
483         memcpy (&p->ainsn, &old_p->ainsn, sizeof (struct arch_specific_insn));
484         p->tgid = old_p->tgid;
485         p->ss_addr = old_p->ss_addr;
486         //p->spid = old_p->spid;
487 }
488
489 /*
490 * Add the new probe to old_p->list. Fail if this is the
491 * second jprobe at the address - two jprobes can't coexist
492 */
493 static int __kprobes
494 add_new_kprobe (struct kprobe *old_p, struct kprobe *p)
495 {
496         if (p->break_handler)
497         {
498                 if (old_p->break_handler)
499                         return -EEXIST;
500                 list_add_tail_rcu (&p->list, &old_p->list);
501                 old_p->break_handler = aggr_break_handler;
502         }
503         else
504                 list_add_rcu (&p->list, &old_p->list);
505         if (p->post_handler && !old_p->post_handler)
506                 old_p->post_handler = aggr_post_handler;
507         return 0;
508 }
509
510 /*
511  * Fill in the required fields of the "manager kprobe". Replace the
512  * earlier kprobe in the hlist with the manager kprobe
513  */
514 static inline void
515 add_aggr_kprobe (struct kprobe *ap, struct kprobe *p)
516 {
517         copy_kprobe (p, ap);
518         flush_insn_slot (ap);
519         ap->addr = p->addr;
520         ap->pre_handler = aggr_pre_handler;
521         ap->fault_handler = aggr_fault_handler;
522         if (p->post_handler)
523                 ap->post_handler = aggr_post_handler;
524         if (p->break_handler)
525                 ap->break_handler = aggr_break_handler;
526
527         INIT_LIST_HEAD (&ap->list);
528         list_add_rcu (&p->list, &ap->list);
529
530         src_hlist_replace_rcu (&p->hlist, &ap->hlist);
531 }
532
533 /*
534  * This is the second or subsequent kprobe at the address - handle
535  * the intricacies
536  */
537 static int __kprobes
538 register_aggr_kprobe (struct kprobe *old_p, struct kprobe *p)
539 {
540         int ret = 0;
541         struct kprobe *ap;
542         DBPRINTF ("start\n");
543
544         DBPRINTF ("p = %p old_p = %p \n", p, old_p);
545         if (old_p->pre_handler == aggr_pre_handler)
546         {
547                 DBPRINTF ("aggr_pre_handler \n");
548
549                 copy_kprobe (old_p, p);
550                 ret = add_new_kprobe (old_p, p);
551         }
552         else
553         {
554                 DBPRINTF ("kzalloc\n");
555
556 #ifdef kzalloc
557                 ap = kzalloc (sizeof (struct kprobe), GFP_KERNEL);
558 #else
559                 ap = kmalloc (sizeof (struct kprobe), GFP_KERNEL);
560                 if (ap)
561                         memset (ap, 0, sizeof (struct kprobe));
562 #endif
563                 if (!ap)
564                         return -ENOMEM;
565                 add_aggr_kprobe (ap, old_p);
566                 copy_kprobe (ap, p);
567                 DBPRINTF ("ap = %p p = %p old_p = %p \n", ap, p, old_p);
568                 ret = add_new_kprobe (ap, p);
569         }
570         return ret;
571 }
572
573 static int __kprobes
574 __register_kprobe (struct kprobe *p, unsigned long called_from, int atomic)
575 {
576         struct kprobe *old_p;
577 //      struct module *probed_mod;
578         int ret = 0;
579         /*
580          * If we have a symbol_name argument look it up,
581          * and add it to the address.  That way the addr
582          * field can either be global or relative to a symbol.
583          */
584         if (p->symbol_name)
585         {
586                 if (p->addr)
587                         return -EINVAL;
588                 kprobe_lookup_name (p->symbol_name, p->addr);
589         }
590
591         if (!p->addr)
592                 return -EINVAL;
593         DBPRINTF ("p->addr = 0x%p\n", p->addr);
594         p->addr = (kprobe_opcode_t *) (((char *) p->addr) + p->offset);
595         DBPRINTF ("p->addr = 0x%p p = 0x%p\n", p->addr, p);
596
597 /*      if ((!kernel_text_address((unsigned long) p->addr)) ||
598                 in_kprobes_functions((unsigned long) p->addr))
599                 return -EINVAL;*/
600
601 #ifdef KPROBES_PROFILE
602         p->start_tm.tv_sec = p->start_tm.tv_usec = 0;
603         p->hnd_tm_sum.tv_sec = p->hnd_tm_sum.tv_usec = 0;
604         p->count = 0;
605 #endif
606         p->mod_refcounted = 0;
607         //p->proc_prio = 0;
608         //p->proc_sched = 0;    
609         //p->spid = -1;
610         //p->irq = 0;
611         //p->task_flags = 0;
612 /*
613         // Check are we probing a module
614         if ((probed_mod = module_text_address((unsigned long) p->addr))) {
615                 struct module *calling_mod = module_text_address(called_from);
616                 // We must allow modules to probe themself and
617                 // in this case avoid incrementing the module refcount,
618                 // so as to allow unloading of self probing modules.
619                 //
620                 if (calling_mod && (calling_mod != probed_mod)) {
621                         if (unlikely(!try_module_get(probed_mod)))
622                                 return -EINVAL;
623                         p->mod_refcounted = 1;
624                 } else
625                         probed_mod = NULL;
626         }
627 */
628         p->nmissed = 0;
629 //      mutex_lock(&kprobe_mutex);
630         old_p = get_kprobe (p->addr, 0, NULL);
631         if (old_p)
632         {
633                 ret = register_aggr_kprobe (old_p, p);
634                 if (!ret)
635                         atomic_inc (&kprobe_count);
636                 goto out;
637         }
638
639         if ((ret = arch_prepare_kprobe (p)) != 0)
640                 goto out;
641
642         DBPRINTF ("before out ret = 0x%x\n", ret);
643
644         INIT_HLIST_NODE (&p->hlist);
645         hlist_add_head_rcu (&p->hlist, &kprobe_table[hash_ptr (p->addr, KPROBE_HASH_BITS)]);
646
647 /*      if (atomic_add_return(1, &kprobe_count) == \
648                                 (ARCH_INACTIVE_KPROBE_COUNT + 1))
649                 register_page_fault_notifier(&kprobe_page_fault_nb);*/
650
651         arch_arm_kprobe (p);
652
653       out:
654 //      mutex_unlock(&kprobe_mutex);
655 /*
656         if (ret && probed_mod)
657                 module_put(probed_mod);
658 */
659         DBPRINTF ("out ret = 0x%x\n", ret);
660
661         return ret;
662 }
663
664 static int __kprobes
665 __register_uprobe (struct kprobe *p, struct task_struct *task, int atomic, unsigned long called_from)
666 {
667         int ret = 0;
668         struct kprobe *old_p;
669
670         if (!p->addr)
671                 return -EINVAL;
672
673         DBPRINTF ("p->addr = 0x%p p = 0x%p\n", p->addr, p);
674
675         p->mod_refcounted = 0;
676         p->nmissed = 0;
677 #ifdef KPROBES_PROFILE
678         p->start_tm.tv_sec = p->start_tm.tv_usec = 0;
679         p->hnd_tm_sum.tv_sec = p->hnd_tm_sum.tv_usec = 0;
680         p->count = 0;
681 #endif
682
683         // get the first item
684         old_p = get_kprobe (p->addr, p->tgid, NULL);
685         if (old_p)
686         {
687                 ret = register_aggr_kprobe (old_p, p);
688                 if (!ret)
689                         atomic_inc (&kprobe_count);
690                 goto out;
691         }
692         if ((ret = arch_prepare_uprobe (p, task, atomic)) != 0)
693         {
694                 goto out;
695         }
696
697         DBPRINTF ("before out ret = 0x%x\n", ret);
698
699         INIT_HLIST_NODE (&p->hlist);
700         hlist_add_head_rcu (&p->hlist, &kprobe_table[hash_ptr (p->addr, KPROBE_HASH_BITS)]);
701
702         INIT_HLIST_NODE (&p->is_hlist);
703         hlist_add_head_rcu (&p->is_hlist, &uprobe_insn_slot_table[hash_ptr (p->ainsn.insn, KPROBE_HASH_BITS)]);
704
705         arch_arm_uprobe (p, task);
706 out:
707         DBPRINTF ("out ret = 0x%x\n", ret);
708
709         return ret;
710 }
711
712 void __kprobes
713 unregister_uprobe (struct kprobe *p, struct task_struct *task, int atomic)
714 {
715         unregister_kprobe (p, task, atomic);
716 }
717
718 int __kprobes
719 register_kprobe (struct kprobe *p, int atomic)
720 {
721         return __register_kprobe (p, (unsigned long) __builtin_return_address (0), atomic);
722 }
723
724 void __kprobes
725 unregister_kprobe (struct kprobe *p, struct task_struct *task, int atomic)
726 {
727 //      struct module *mod;
728         struct kprobe *old_p, *list_p;
729         int cleanup_p, pid = 0;
730
731 //      mutex_lock(&kprobe_mutex);
732
733         pid = p->tgid;
734
735         old_p = get_kprobe (p->addr, pid, NULL);
736         DBPRINTF ("unregister_kprobe p=%p old_p=%p", p, old_p);
737         if (unlikely (!old_p))
738         {
739 //              mutex_unlock(&kprobe_mutex);
740                 return;
741         }
742         if (p != old_p)
743         {
744                 list_for_each_entry_rcu (list_p, &old_p->list, list) 
745                         if (list_p == p)
746                                 /* kprobe p is a valid probe */
747                                 goto valid_p;
748 //              mutex_unlock(&kprobe_mutex);
749                 return;
750         }
751 valid_p:
752         DBPRINTF ("unregister_kprobe valid_p");
753         if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) && 
754                 (p->list.next == &old_p->list) && (p->list.prev == &old_p->list)))
755         {
756                 /* Only probe on the hash list */
757                 DBPRINTF ("unregister_kprobe disarm pid=%d", pid);
758                 if (pid)
759                         arch_disarm_uprobe (p, task);//vma, page, kaddr);
760                 else
761                         arch_disarm_kprobe (p);
762                 hlist_del_rcu (&old_p->hlist);
763                 cleanup_p = 1;
764         }
765         else
766         {
767                 list_del_rcu (&p->list);
768                 cleanup_p = 0;
769         }
770         DBPRINTF ("unregister_kprobe cleanup_p=%d", cleanup_p);
771 //      mutex_unlock(&kprobe_mutex);
772
773 //      synchronize_sched();
774 /*
775         if (p->mod_refcounted &&
776             (mod = module_text_address((unsigned long)p->addr)))
777                 module_put(mod);
778 */
779         if (cleanup_p)
780         {
781                 if (p != old_p)
782                 {
783                         list_del_rcu (&p->list);
784                         kfree (old_p);
785                 }
786                 arch_remove_kprobe (p, task);
787         }
788         else
789         {
790 ///             mutex_lock(&kprobe_mutex);
791                 if (p->break_handler)
792                         old_p->break_handler = NULL;
793                 if (p->post_handler)
794                 {
795                         list_for_each_entry_rcu (list_p, &old_p->list, list)
796                         {
797                                 if (list_p->post_handler)
798                                 {
799                                         cleanup_p = 2;
800                                         break;
801                                 }
802                         }
803                         if (cleanup_p == 0)
804                                 old_p->post_handler = NULL;
805                 }
806 //              mutex_unlock(&kprobe_mutex);
807         }
808
809         /* Call unregister_page_fault_notifier()
810          * if no probes are active
811          */
812 //      mutex_lock(&kprobe_mutex);
813 /*      if (atomic_add_return(-1, &kprobe_count) == \
814                                 ARCH_INACTIVE_KPROBE_COUNT)
815                 unregister_page_fault_notifier(&kprobe_page_fault_nb);*/
816 //      mutex_unlock(&kprobe_mutex);
817         return;
818 }
819
820 int __kprobes
821 register_ujprobe (struct task_struct *task, struct mm_struct *mm, struct jprobe *jp, int atomic)
822 {
823         int ret = 0;
824 #ifdef _DEBUG
825         gSilent = 0;
826 #endif
827         /* Todo: Verify probepoint is a function entry point */
828         jp->kp.pre_handler = setjmp_pre_handler;
829         jp->kp.break_handler = longjmp_break_handler;
830         
831         ret = __register_uprobe (&jp->kp, task, atomic,
832                                     (unsigned long) __builtin_return_address (0));
833
834 #ifdef _DEBUG
835         gSilent = 1;
836 #endif
837         return ret;
838 }
839
840 void __kprobes
841 unregister_ujprobe (struct task_struct *task, struct jprobe *jp, int atomic)
842 {
843         unregister_uprobe (&jp->kp, task, atomic);
844 }
845
846 int __kprobes
847 register_jprobe (struct jprobe *jp, int atomic)
848 {
849         /* Todo: Verify probepoint is a function entry point */
850         jp->kp.pre_handler = setjmp_pre_handler;
851         jp->kp.break_handler = longjmp_break_handler;
852
853         return __register_kprobe (&jp->kp, (unsigned long) __builtin_return_address (0), atomic);
854 }
855
856 void __kprobes
857 unregister_jprobe (struct jprobe *jp, int atomic)
858 {
859         unregister_kprobe (&jp->kp, 0, atomic);
860 }
861
862 /*
863  * This kprobe pre_handler is registered with every kretprobe. When probe
864  * hits it will set up the return probe.
865  */
866 static int __kprobes
867 pre_handler_kretprobe (struct kprobe *p, struct pt_regs *regs   /*, struct vm_area_struct **vma, 
868                                                                    struct page **page, unsigned long **kaddr */ )
869 {
870         struct kretprobe *rp = container_of (p, struct kretprobe, kp);
871         unsigned long flags = 0;
872         DBPRINTF ("START\n");
873
874         /*TODO: consider to only swap the RA after the last pre_handler fired */
875         spin_lock_irqsave (&kretprobe_lock, flags);
876         if (!rp->disarm)
877                 __arch_prepare_kretprobe (rp, regs);
878         spin_unlock_irqrestore (&kretprobe_lock, flags);
879         DBPRINTF ("END\n");
880         return 0;
881 }
882
883 struct kretprobe *sched_rp;
884
885 int __kprobes
886 register_kretprobe (struct kretprobe *rp, int atomic)
887 {
888         int ret = 0;
889         struct kretprobe_instance *inst;
890         int i;
891         DBPRINTF ("START");
892
893         rp->kp.pre_handler = pre_handler_kretprobe;
894         rp->kp.post_handler = NULL;
895         rp->kp.fault_handler = NULL;
896         rp->kp.break_handler = NULL;
897
898         rp->disarm = 0;
899
900         /* Pre-allocate memory for max kretprobe instances */
901         if(rp->kp.addr == sched_addr)
902                 rp->maxactive = 1000;//max (100, 2 * NR_CPUS);
903         else if (rp->maxactive <= 0)
904         {
905 #if 1//def CONFIG_PREEMPT
906                 rp->maxactive = max (10, 2 * NR_CPUS);
907 #else
908                 rp->maxactive = NR_CPUS;
909 #endif
910         }
911         INIT_HLIST_HEAD (&rp->used_instances);
912         INIT_HLIST_HEAD (&rp->free_instances);
913         for (i = 0; i < rp->maxactive; i++)
914         {
915                 inst = kmalloc (sizeof (struct kretprobe_instance), GFP_KERNEL);
916                 if (inst == NULL)
917                 {
918                         free_rp_inst (rp);
919                         return -ENOMEM;
920                 }
921                 INIT_HLIST_NODE (&inst->uflist);
922                 hlist_add_head (&inst->uflist, &rp->free_instances);
923         }
924
925         DBPRINTF ("addr=%p, *addr=[%lx %lx %lx]", rp->kp.addr, (unsigned long) (*(rp->kp.addr)), (unsigned long) (*(rp->kp.addr + 1)), (unsigned long) (*(rp->kp.addr + 2)));
926         rp->nmissed = 0;
927         /* Establish function entry probe point */
928         if ((ret = __register_kprobe (&rp->kp, (unsigned long) __builtin_return_address (0), atomic)) != 0)
929                 free_rp_inst (rp);
930
931         DBPRINTF ("addr=%p, *addr=[%lx %lx %lx]", rp->kp.addr, (unsigned long) (*(rp->kp.addr)), (unsigned long) (*(rp->kp.addr + 1)), (unsigned long) (*(rp->kp.addr + 2)));
932         if(rp->kp.addr == sched_addr)
933                 sched_rp = rp;
934
935         return ret;
936 }
937
938 void __kprobes
939 unregister_kretprobe (struct kretprobe *rp, int atomic)
940 {
941         unsigned long flags;
942         struct kretprobe_instance *ri;
943
944         //printk("addr=%p, *addr=[%lx %lx %lx]\n", rp->kp.addr, 
945         //               *(rp->kp.addr), *(rp->kp.addr+1), *(rp->kp.addr+2));
946         unregister_kprobe (&rp->kp, 0, atomic);
947
948         if(rp->kp.addr == sched_addr)
949                 sched_rp = NULL;
950                 
951         //printk("addr=%p, *addr=[%lx %lx %lx]\n", rp->kp.addr, 
952         //               *(rp->kp.addr), *(rp->kp.addr+1), *(rp->kp.addr+2));
953         /* No race here */
954         spin_lock_irqsave (&kretprobe_lock, flags);
955         while ((ri = get_used_rp_inst (rp)) != NULL)
956         {
957                 ri->rp = NULL;
958                 hlist_del (&ri->uflist);
959         }
960         spin_unlock_irqrestore (&kretprobe_lock, flags);
961         free_rp_inst (rp);
962 }
963
964 int __kprobes
965 register_uretprobe (struct task_struct *task, struct mm_struct *mm, struct kretprobe *rp, int atomic)
966 {
967         int ret = 0;
968         struct kretprobe_instance *inst;
969         /*struct page *pages[2] = {0, 0};
970            struct vm_area_struct *vmas[2] = {0, 0};
971            unsigned long *kaddrs[2] = {0, 0}; */
972         int i;
973 #ifdef _DEBUG
974         gSilent = 0;
975 #endif
976
977         DBPRINTF ("START\n");
978
979         rp->kp.pre_handler = pre_handler_kretprobe;
980         rp->kp.post_handler = NULL;
981         rp->kp.fault_handler = NULL;
982         rp->kp.break_handler = NULL;
983
984         rp->disarm = 0;
985
986         /* Pre-allocate memory for max kretprobe instances */
987         if (rp->maxactive <= 0)
988         {
989 #if 1//def CONFIG_PREEMPT
990                 rp->maxactive = max (10, 2 * NR_CPUS);
991 #else
992                 rp->maxactive = NR_CPUS;
993 #endif
994         }
995         INIT_HLIST_HEAD (&rp->used_instances);
996         INIT_HLIST_HEAD (&rp->free_instances);
997         for (i = 0; i < rp->maxactive; i++)
998         {
999                 inst = kmalloc (sizeof (struct kretprobe_instance), GFP_KERNEL);
1000                 if (inst == NULL)
1001                 {
1002                         free_rp_inst (rp);
1003                         ret = -ENOMEM;
1004                         goto out;
1005                 }
1006                 INIT_HLIST_NODE (&inst->uflist);
1007                 hlist_add_head (&inst->uflist, &rp->free_instances);
1008         }
1009
1010         rp->nmissed = 0;
1011 #if 0
1012         if (atomic)
1013                 ret = get_user_pages_atomic (task, mm, (unsigned long) rp->kp.addr, 1, 1, 1, &pages[0], &vmas[0]);
1014         else
1015                 ret = get_user_pages (task, mm, (unsigned long) rp->kp.addr, 1, 1, 1, &pages[0], &vmas[0]);
1016         if (ret <= 0)
1017         {
1018                 DBPRINTF ("get_user_pages for %p failed!", rp->kp.addr);
1019                 ret = -EFAULT;
1020                 goto out;
1021         }
1022         if (atomic)
1023                 kaddrs[0] = kmap_atomic (pages[0], KM_USER0) + ((unsigned long) rp->kp.addr & ~PAGE_MASK);
1024         else
1025                 kaddrs[0] = kmap (pages[0]) + ((unsigned long) rp->kp.addr & ~PAGE_MASK);
1026         // if 2nd instruction is on the 2nd page
1027         if ((((unsigned long) (rp->kp.addr + 1)) & ~PAGE_MASK) == 0)
1028         {
1029                 if (atomic)
1030                         ret = get_user_pages_atomic (task, mm, (unsigned long) (rp->kp.addr + 1), 1, 1, 1, &pages[1], &vmas[1]);
1031                 else
1032                         ret = get_user_pages (task, mm, (unsigned long) (rp->kp.addr + 1), 1, 1, 1, &pages[1], &vmas[1]);
1033                 if (ret <= 0)
1034                 {
1035                         DBPRINTF ("get_user_pages for %p failed!", rp->kp.addr + 1);
1036                         ret = -EFAULT;
1037                         goto out;
1038                 }
1039                 if (atomic)
1040                         kaddrs[1] = kmap_atomic (pages[1], KM_USER1) + ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK);
1041                 else
1042                         kaddrs[1] = kmap (pages[1]) + ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK);
1043         }
1044         else
1045         {
1046                 // 2nd instruction is on the 1st page too
1047                 vmas[1] = vmas[0];
1048                 pages[1] = pages[0];
1049                 kaddrs[1] = kaddrs[0] + 1;
1050         }
1051 #endif
1052         /* Establish function exit probe point */
1053         if ((ret = arch_prepare_uretprobe (rp, task/*vmas, pages, kaddrs */ )) != 0)
1054                 goto out;
1055         /* Establish function entry probe point */
1056         if ((ret = __register_uprobe (&rp->kp, task, atomic,
1057                                          (unsigned long) __builtin_return_address (0))) != 0)
1058         {
1059                 free_rp_inst (rp);
1060                 goto out;
1061         }
1062           
1063         arch_arm_uretprobe (rp, task);//vmas[1], pages[1], kaddrs[1]);
1064 #if 0
1065         if (atomic)
1066                 set_page_dirty (pages[1]);
1067         else
1068                 set_page_dirty_lock (pages[1]);
1069 #endif
1070       out:
1071 #if 0
1072         if (pages[0])
1073         {
1074                 if (kaddrs[0])
1075                 {
1076                         if (atomic)
1077                                 kunmap_atomic (kaddrs[0] - ((unsigned long) rp->kp.addr & ~PAGE_MASK), KM_USER0);
1078                         else
1079                                 kunmap (pages[0]);
1080                 }
1081                 page_cache_release (pages[0]);
1082         }
1083         if ((pages[0] != pages[1]))
1084         {
1085                 if (pages[1])
1086                 {
1087                         if (kaddrs[1])
1088                         {
1089                                 if (atomic)
1090                                         kunmap_atomic (kaddrs[1] - ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK), KM_USER1);
1091                                 else
1092                                         kunmap (pages[1]);
1093                         }
1094                         page_cache_release (pages[1]);
1095                 }
1096         }
1097         /*else if( (pages[0] != pages[2]) ){
1098            if(pages[2]){
1099            if(kaddrs[2]) {
1100            if (atomic) kunmap_atomic(kaddrs[2], KM_USER1);
1101            else        kunmap(pages[2]);
1102            }
1103            page_cache_release(pages[2]);
1104            }
1105            } */
1106 #endif
1107
1108 #ifdef _DEBUG
1109         gSilent = 1;
1110 #endif
1111         return ret;
1112 }
1113
1114 static struct kretprobe *__kprobes
1115 clone_kretprobe (struct kretprobe *rp)
1116 {
1117         struct kprobe *old_p;
1118         struct kretprobe *clone = NULL;
1119         int ret;
1120
1121         clone = kmalloc (sizeof (struct kretprobe), GFP_KERNEL);
1122         if (!clone)
1123         {
1124                 DBPRINTF ("failed to alloc memory for clone probe %p!", rp->kp.addr);
1125                 return NULL;
1126         }
1127         memcpy (clone, rp, sizeof (struct kretprobe));
1128         clone->kp.pre_handler = pre_handler_kretprobe;
1129         clone->kp.post_handler = NULL;
1130         clone->kp.fault_handler = NULL;
1131         clone->kp.break_handler = NULL;
1132         old_p = get_kprobe (rp->kp.addr, rp->kp.tgid, NULL);
1133         if (old_p)
1134         {
1135                 ret = register_aggr_kprobe (old_p, &clone->kp);
1136                 if (ret)
1137                 {
1138                         kfree (clone);
1139                         return NULL;
1140                 }
1141                 atomic_inc (&kprobe_count);
1142         }
1143
1144         return clone;
1145 }
1146
1147 void __kprobes
1148 unregister_uretprobe (struct task_struct *task, struct kretprobe *rp, int atomic)
1149 {
1150         //int ret = 0;
1151         unsigned long flags;
1152         struct kretprobe_instance *ri;
1153         struct kretprobe *rp2 = NULL;
1154         /*struct mm_struct *mm;
1155            struct page *pages[2] = {0, 0};
1156            struct vm_area_struct *vmas[2] = {0, 0};
1157            unsigned long *kaddrs[2] = {0, 0}; */
1158
1159 #ifdef _DEBUG
1160         gSilent = 0;
1161 #endif
1162 #if 0
1163         mm = atomic ? task->active_mm : get_task_mm (task);
1164         if (!mm)
1165         {
1166                 DBPRINTF ("task %u has no mm!", task->pid);
1167 #ifdef _DEBUG
1168                 gSilent = 1;
1169 #endif
1170                 return;
1171         }
1172         if (atomic)
1173                 ret = get_user_pages_atomic (task, mm, (unsigned long) rp->kp.addr, 1, 1, 1, &pages[0], &vmas[0]);
1174         else
1175         {
1176                 down_read (&mm->mmap_sem);
1177                 ret = get_user_pages (task, mm, (unsigned long) rp->kp.addr, 1, 1, 1, &pages[0], &vmas[0]);
1178         }
1179         if (ret <= 0)
1180         {
1181                 DBPRINTF ("get_user_pages for %p failed!", rp->kp.addr);
1182                 goto out;
1183         }
1184         if (atomic)
1185                 kaddrs[0] = kmap_atomic (pages[0], KM_USER0) + ((unsigned long) rp->kp.addr & ~PAGE_MASK);
1186         else
1187                 kaddrs[0] = kmap (pages[0]) + ((unsigned long) rp->kp.addr & ~PAGE_MASK);
1188         if ((((unsigned long) (rp->kp.addr + 1)) & ~PAGE_MASK) == 0)
1189         {
1190                 if (atomic)
1191                         ret = get_user_pages_atomic (task, mm, (unsigned long) (rp->kp.addr + 1), 1, 1, 1, &pages[1], &vmas[1]);
1192                 else
1193                         ret = get_user_pages (task, mm, (unsigned long) (rp->kp.addr + 1), 1, 1, 1, &pages[1], &vmas[1]);
1194                 if (ret <= 0)
1195                 {
1196                         DBPRINTF ("get_user_pages for %p failed!", rp->kp.addr + 1);
1197                         goto out;
1198                 }
1199                 if (atomic)
1200                         kaddrs[1] = kmap_atomic (pages[1], KM_USER1) + ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK);
1201                 else
1202                         kaddrs[1] = kmap (pages[1]) + ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK);
1203         }
1204         else
1205         {
1206                 vmas[1] = vmas[0];
1207                 pages[1] = pages[0];
1208                 kaddrs[1] = kaddrs[0] + 1;
1209         }
1210
1211         /* No race here */
1212         DBPRINTF ("unregister_uretprobe1 addr %p [%lx %lx]", rp->kp.addr, *kaddrs[0], *kaddrs[1]);
1213 #endif
1214         spin_lock_irqsave (&kretprobe_lock, flags);
1215         if (hlist_empty (&rp->used_instances))
1216         {
1217                 // if there are no used retprobe instances (i.e. function is not entered) - disarm retprobe
1218                 arch_disarm_uretprobe (rp, task);//vmas[1], pages[1], kaddrs[1]);
1219 #if 0
1220                 if (atomic)
1221                         set_page_dirty (pages[1]);
1222                 else
1223                         set_page_dirty_lock (pages[1]);
1224 #endif
1225         }
1226         else
1227         {
1228                 rp2 = clone_kretprobe (rp);
1229                 if (!rp2)
1230                         DBPRINTF ("unregister_uretprobe addr %p: failed to clone retprobe!", rp->kp.addr);
1231                 else
1232                 {
1233                         DBPRINTF ("initiating deferred retprobe deletion addr %p", rp->kp.addr);
1234                         printk ("initiating deferred retprobe deletion addr %p\n", rp->kp.addr);
1235                         rp2->disarm = 1;
1236                 }
1237         }
1238
1239         while ((ri = get_used_rp_inst (rp)) != NULL)
1240         {
1241                 ri->rp = NULL;
1242                 ri->rp2 = rp2;
1243                 hlist_del (&ri->uflist);
1244         }
1245         spin_unlock_irqrestore (&kretprobe_lock, flags);
1246         free_rp_inst (rp);
1247
1248         unregister_uprobe (&rp->kp, task, atomic);
1249         //DBPRINTF("unregister_uretprobe3 addr %p [%lx %lx]", 
1250         //              rp->kp.addr, *kaddrs[0], *kaddrs[1]);
1251 #if 0
1252       out:
1253         if (pages[0])
1254         {
1255                 if (kaddrs[0])
1256                 {
1257                         if (atomic)
1258                                 kunmap_atomic (kaddrs[0] - ((unsigned long) rp->kp.addr & ~PAGE_MASK), KM_USER0);
1259                         else
1260                                 kunmap (pages[0]);
1261                 }
1262                 page_cache_release (pages[0]);
1263         }
1264         if (pages[1] && (pages[0] != pages[1]))
1265         {
1266                 if (kaddrs[1])
1267                 {
1268                         if (atomic)
1269                                 kunmap_atomic (kaddrs[1] - ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK), KM_USER1);
1270                         else
1271                                 kunmap (pages[1]);
1272                 }
1273                 page_cache_release (pages[1]);
1274         }
1275         if (!atomic)
1276         {
1277                 up_read (&mm->mmap_sem);
1278                 mmput (mm);
1279         }
1280 #endif
1281 #ifdef _DEBUG
1282         gSilent = 1;
1283 #endif
1284 }
1285
1286 void __kprobes
1287 unregister_all_uprobes (struct task_struct *task, int atomic)
1288 {
1289         struct hlist_head *head;
1290         struct hlist_node *node, *tnode;
1291         struct kprobe *p;
1292         int i;
1293
1294         for(i = 0; i < KPROBE_TABLE_SIZE; i++){
1295                 head = &kprobe_table[i];
1296                 hlist_for_each_entry_safe (p, node, tnode, head, hlist){                        
1297                         if(p->tgid == task->tgid){
1298                                 printk("unregister_all_uprobes: delete uprobe at %pf for %s/%d\n", p->addr, task->comm, task->pid);
1299                                 unregister_uprobe (p, task, atomic);
1300                         }
1301                 }
1302         }
1303         purge_garbage_uslots(task, atomic);
1304 }
1305
1306
1307 #define GUP_FLAGS_WRITE                  0x1
1308 #define GUP_FLAGS_FORCE                  0x2
1309 #define GUP_FLAGS_IGNORE_VMA_PERMISSIONS 0x4
1310 #define GUP_FLAGS_IGNORE_SIGKILL         0x8
1311
1312
1313 static inline int use_zero_page(struct vm_area_struct *vma)
1314 {
1315         /*
1316          * We don't want to optimize FOLL_ANON for make_pages_present()
1317          * when it tries to page in a VM_LOCKED region. As to VM_SHARED,
1318          * we want to get the page from the page tables to make sure
1319          * that we serialize and update with any other user of that
1320          * mapping.
1321          */
1322         if (vma->vm_flags & (VM_LOCKED | VM_SHARED))
1323                 return 0;
1324         /*
1325          * And if we have a fault routine, it's not an anonymous region.
1326          */
1327         return !vma->vm_ops || !vma->vm_ops->fault;
1328 }
1329
1330 int __get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
1331                      unsigned long start, int len, int flags,
1332                 struct page **pages, struct vm_area_struct **vmas)
1333 {
1334         int i;
1335         unsigned int vm_flags = 0;
1336         int write = !!(flags & GUP_FLAGS_WRITE);
1337         int force = !!(flags & GUP_FLAGS_FORCE);
1338         int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
1339         int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL);
1340
1341         if (len <= 0)
1342                 return 0;
1343         /* 
1344          * Require read or write permissions.
1345          * If 'force' is set, we only require the "MAY" flags.
1346          */
1347         vm_flags  = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
1348         vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
1349         i = 0;
1350
1351         do {
1352                 struct vm_area_struct *vma;
1353                 unsigned int foll_flags;
1354
1355                 //vma = find_extend_vma(mm, start);
1356                 vma = find_vma(mm, start);
1357                 if (!vma && in_gate_area(tsk, start)) {
1358                         unsigned long pg = start & PAGE_MASK;
1359                         struct vm_area_struct *gate_vma = get_gate_vma(tsk);
1360                         pgd_t *pgd;
1361                         pud_t *pud;
1362                         pmd_t *pmd;
1363                         pte_t *pte;
1364
1365                         /* user gate pages are read-only */
1366                         if (!ignore && write)
1367                                 return i ? : -EFAULT;
1368                         if (pg > TASK_SIZE)
1369                                 pgd = pgd_offset_k(pg);
1370                         else
1371                                 pgd = pgd_offset_gate(mm, pg);
1372                         BUG_ON(pgd_none(*pgd));
1373                         pud = pud_offset(pgd, pg);
1374                         BUG_ON(pud_none(*pud));
1375                         pmd = pmd_offset(pud, pg);
1376                         if (pmd_none(*pmd))
1377                                 return i ? : -EFAULT;
1378                         pte = pte_offset_map(pmd, pg);
1379                         if (pte_none(*pte)) {
1380                                 pte_unmap(pte);
1381                                 return i ? : -EFAULT;
1382                         }
1383                         if (pages) {
1384                                 struct page *page = vm_normal_page(gate_vma, start, *pte);
1385                                 pages[i] = page;
1386                                 if (page)
1387                                         get_page(page);
1388                         }
1389                         pte_unmap(pte);
1390                         if (vmas)
1391                                 vmas[i] = gate_vma;
1392                         i++;
1393                         start += PAGE_SIZE;
1394                         len--;
1395                         continue;
1396                 }
1397
1398                 if (!vma ||
1399                     (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1400                     (!ignore && !(vm_flags & vma->vm_flags)))
1401                         return i ? : -EFAULT;
1402
1403                 if (is_vm_hugetlb_page(vma)) {
1404                         i = follow_hugetlb_page(mm, vma, pages, vmas,
1405                                                 &start, &len, i, write);
1406                         continue;
1407                 }
1408
1409                 foll_flags = FOLL_TOUCH;
1410                 if (pages)
1411                         foll_flags |= FOLL_GET;
1412
1413 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,30)
1414                 if (!write && use_zero_page(vma))
1415                   foll_flags |= FOLL_ANON;
1416 #endif
1417
1418                 do {
1419                         struct page *page;
1420
1421                         /*
1422                          * If we have a pending SIGKILL, don't keep faulting
1423                          * pages and potentially allocating memory, unless
1424                          * current is handling munlock--e.g., on exit. In
1425                          * that case, we are not allocating memory.  Rather,
1426                          * we're only unlocking already resident/mapped pages.
1427                          */
1428                         if (unlikely(!ignore_sigkill &&
1429                                         fatal_signal_pending(current)))
1430                                 return i ? i : -ERESTARTSYS;
1431
1432                         if (write)
1433                                 foll_flags |= FOLL_WRITE;
1434
1435                         
1436                         //cond_resched();
1437
1438                         DBPRINTF ("pages = %p vma = %p\n", pages, vma);
1439                         while (!(page = follow_page(vma, start, foll_flags))) {
1440                                 int ret;
1441                                 ret = handle_mm_fault(mm, vma, start,
1442                                                 foll_flags & FOLL_WRITE);
1443                                 if (ret & VM_FAULT_ERROR) {
1444                                         if (ret & VM_FAULT_OOM)
1445                                                 return i ? i : -ENOMEM;
1446                                         else if (ret & VM_FAULT_SIGBUS)
1447                                                 return i ? i : -EFAULT;
1448                                         BUG();
1449                                 }
1450                                 if (ret & VM_FAULT_MAJOR)
1451                                         tsk->maj_flt++;
1452                                 else
1453                                         tsk->min_flt++;
1454
1455                                 /*
1456                                  * The VM_FAULT_WRITE bit tells us that
1457                                  * do_wp_page has broken COW when necessary,
1458                                  * even if maybe_mkwrite decided not to set
1459                                  * pte_write. We can thus safely do subsequent
1460                                  * page lookups as if they were reads. But only
1461                                  * do so when looping for pte_write is futile:
1462                                  * in some cases userspace may also be wanting
1463                                  * to write to the gotten user page, which a
1464                                  * read fault here might prevent (a readonly
1465                                  * page might get reCOWed by userspace write).
1466                                  */
1467                                 if ((ret & VM_FAULT_WRITE) &&
1468                                     !(vma->vm_flags & VM_WRITE))
1469                                         foll_flags &= ~FOLL_WRITE;
1470
1471                                 //cond_resched();
1472                         }
1473                         if (IS_ERR(page))
1474                                 return i ? i : PTR_ERR(page);
1475                         if (pages) {
1476                                 pages[i] = page;
1477
1478                                 flush_anon_page(vma, page, start);
1479                                 flush_dcache_page(page);
1480                         }
1481                         if (vmas)
1482                                 vmas[i] = vma;
1483                         i++;
1484                         start += PAGE_SIZE;
1485                         len--;
1486                 } while (len && start < vma->vm_end);
1487         } while (len);
1488         return i;
1489 }
1490
1491 int get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
1492                 unsigned long start, int len, int write, int force,
1493                 struct page **pages, struct vm_area_struct **vmas)
1494 {
1495         int flags = 0;
1496
1497         if (write)
1498                 flags |= GUP_FLAGS_WRITE;
1499         if (force)
1500                 flags |= GUP_FLAGS_FORCE;
1501
1502         return __get_user_pages_uprobe(tsk, mm,
1503                                 start, len, flags,
1504                                 pages, vmas);
1505 }
1506
1507 int
1508 access_process_vm_atomic (struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
1509 {
1510
1511         
1512         struct mm_struct *mm;
1513         struct vm_area_struct *vma;
1514         void *old_buf = buf;
1515
1516         mm = get_task_mm(tsk);
1517         if (!mm)
1518                 return 0;
1519
1520         down_read(&mm->mmap_sem);
1521         /* ignore errors, just check how much was successfully transferred */
1522         while (len) {
1523                 int bytes, ret, offset;
1524                 void *maddr;
1525                 struct page *page = NULL;
1526
1527                 ret = get_user_pages_uprobe(tsk, mm, addr, 1,
1528                                 write, 1, &page, &vma);
1529                 if (ret <= 0) {
1530                         /*
1531                          * Check if this is a VM_IO | VM_PFNMAP VMA, which
1532                          * we can access using slightly different code.
1533                          */
1534 #ifdef CONFIG_HAVE_IOREMAP_PROT
1535                         vma = find_vma(mm, addr);
1536                         if (!vma)
1537                                 break;
1538                         if (vma->vm_ops && vma->vm_ops->access)
1539                                 ret = vma->vm_ops->access(vma, addr, buf,
1540                                                           len, write);
1541                         if (ret <= 0)
1542 #endif
1543                                 break;
1544                         bytes = ret;
1545                 } else {
1546                         bytes = len;
1547                         offset = addr & (PAGE_SIZE-1);
1548                         if (bytes > PAGE_SIZE-offset)
1549                                 bytes = PAGE_SIZE-offset;
1550
1551                         maddr = kmap(page);
1552                         if (write) {
1553                                 copy_to_user_page(vma, page, addr,
1554                                                   maddr + offset, buf, bytes);
1555                                 set_page_dirty_lock(page);
1556                         } else {
1557                                 copy_from_user_page(vma, page, addr,
1558                                                     buf, maddr + offset, bytes);
1559                         }
1560                         kunmap(page);
1561                         page_cache_release(page);
1562                 }
1563                 len -= bytes;
1564                 buf += bytes;
1565                 addr += bytes;
1566         }
1567         up_read(&mm->mmap_sem);
1568         mmput(mm);
1569
1570         return buf - old_buf;
1571
1572 }
1573
1574 #ifdef CONFIG_DEBUG_FS
1575 const char *(*__real_kallsyms_lookup) (unsigned long addr, unsigned long *symbolsize, unsigned long *offset, char **modname, char *namebuf);
1576 const char *
1577 kallsyms_lookup (unsigned long addr, unsigned long *symbolsize, unsigned long *offset, char **modname, char *namebuf)
1578 {
1579         return __real_kallsyms_lookup (addr, symbolsize, offset, modname, namebuf);
1580 }
1581
1582 static void __kprobes
1583 report_probe (struct seq_file *pi, struct kprobe *p, const char *sym, int offset, char *modname)
1584 {
1585         char *kprobe_type;
1586
1587         if (p->pre_handler == pre_handler_kretprobe)
1588                 if (p->tgid)
1589                         kprobe_type = "ur";
1590                 else
1591                         kprobe_type = "r";
1592         else if (p->pre_handler == setjmp_pre_handler)
1593                 if (p->tgid)
1594                         kprobe_type = "uj";
1595                 else
1596                         kprobe_type = "j";
1597         else if (p->tgid)
1598                 kprobe_type = "u";
1599         else
1600                 kprobe_type = "k";
1601         if (sym)
1602                 seq_printf (pi, "%p  %s  %s+0x%x  %s\n", p->addr, kprobe_type, sym, offset, (modname ? modname : " "));
1603         else
1604                 seq_printf (pi, "%p  %s  %p\n", p->addr, kprobe_type, p->addr);
1605 }
1606
1607 static void __kprobes *
1608 kprobe_seq_start (struct seq_file *f, loff_t * pos)
1609 {
1610         return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
1611 }
1612
1613 static void __kprobes *
1614 kprobe_seq_next (struct seq_file *f, void *v, loff_t * pos)
1615 {
1616         (*pos)++;
1617         if (*pos >= KPROBE_TABLE_SIZE)
1618                 return NULL;
1619         return pos;
1620 }
1621
1622 static void __kprobes
1623 kprobe_seq_stop (struct seq_file *f, void *v)
1624 {
1625         /* Nothing to do */
1626 }
1627
1628 struct us_proc_ip
1629 {
1630         char *name;
1631         int installed;
1632         struct jprobe jprobe;
1633         struct kretprobe retprobe;
1634         unsigned long offset;
1635 };
1636
1637 static int __kprobes
1638 show_kprobe_addr (struct seq_file *pi, void *v)
1639 {
1640         struct hlist_head *head;
1641         struct hlist_node *node;
1642         struct kprobe *p, *kp;
1643         const char *sym = NULL;
1644         unsigned int i = *(loff_t *) v;
1645         unsigned long size, offset = 0;
1646         char *modname, namebuf[128];
1647
1648         head = &kprobe_table[i];
1649         preempt_disable ();
1650         hlist_for_each_entry_rcu (p, node, head, hlist)
1651         {
1652                 /*if(p->pid){
1653                    struct us_proc_ip *up = NULL;
1654                    if (p->pre_handler == pre_handler_kretprobe){
1655                    struct kretprobe *rp = container_of(p, struct kretprobe, kp);
1656                    up = container_of(rp, struct us_proc_ip, retprobe);
1657                    }
1658                    else {//if (p->pre_handler == setjmp_pre_handler){
1659                    struct jprobe *jp = container_of(p, struct jprobe, kp);
1660                    up = container_of(jp, struct us_proc_ip, jprobe);
1661                    }
1662                    if(up){
1663                    sym = up->name;
1664                    printk("show_kprobe_addr: %s\n", sym);
1665                    }
1666                    }
1667                    else */
1668                 sym = kallsyms_lookup ((unsigned long) p->addr, &size, &offset, &modname, namebuf);
1669                 if (p->pre_handler == aggr_pre_handler)
1670                 {
1671                         list_for_each_entry_rcu (kp, &p->list, list) report_probe (pi, kp, sym, offset, modname);
1672                 }
1673                 else
1674                         report_probe (pi, p, sym, offset, modname);
1675         }
1676         //seq_printf (pi, "handled exceptions %lu\n", handled_exceptions);
1677         preempt_enable ();
1678         return 0;
1679 }
1680
1681 static struct seq_operations kprobes_seq_ops = {
1682         .start = kprobe_seq_start,
1683         .next = kprobe_seq_next,
1684         .stop = kprobe_seq_stop,
1685         .show = show_kprobe_addr
1686 };
1687
1688 static int __kprobes
1689 kprobes_open (struct inode *inode, struct file *filp)
1690 {
1691         return seq_open (filp, &kprobes_seq_ops);
1692 }
1693
1694 static struct file_operations debugfs_kprobes_operations = {
1695         .open = kprobes_open,
1696         .read = seq_read,
1697         .llseek = seq_lseek,
1698         .release = seq_release,
1699 };
1700
1701 #ifdef KPROBES_PROFILE
1702 extern unsigned long nCount;
1703 extern struct timeval probe_enter_diff_sum;
1704 static void __kprobes *
1705 kprobe_prof_seq_start (struct seq_file *f, loff_t * pos)
1706 {
1707         return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
1708 }
1709
1710 static void __kprobes *
1711 kprobe_prof_seq_next (struct seq_file *f, void *v, loff_t * pos)
1712 {
1713         (*pos)++;
1714         if (*pos >= KPROBE_TABLE_SIZE)
1715                 return NULL;
1716         return pos;
1717 }
1718
1719 static void __kprobes
1720 kprobe_prof_seq_stop (struct seq_file *f, void *v)
1721 {
1722 }
1723
1724 static void __kprobes
1725 report_probe_prof (struct seq_file *pi, struct kprobe *p, const char *sym, int offset, char *modname)
1726 {
1727         char *kprobe_type;
1728
1729         if (p->pre_handler == pre_handler_kretprobe)
1730                 if (p->pid)
1731                         kprobe_type = "ur";
1732                 else
1733                         kprobe_type = "r";
1734         else if (p->pre_handler == setjmp_pre_handler)
1735                 if (p->pid)
1736                         kprobe_type = "uj";
1737                 else
1738                         kprobe_type = "j";
1739         else if (p->pid)
1740                 kprobe_type = "u";
1741         else
1742                 kprobe_type = "k";
1743
1744         if (sym)
1745                 seq_printf (pi, "%p  %s  %s+0x%x  %s %lu.%06ld\n", p->addr, kprobe_type,
1746                             sym, offset, (modname ? modname : " "), p->count ? p->hnd_tm_sum.tv_sec / p->count : 0, p->count ? p->hnd_tm_sum.tv_usec / p->count : 0);
1747         else
1748
1749                 seq_printf (pi, "%p  %s  %p %lu.%06ld\n", p->addr, kprobe_type, p->addr, p->count ? p->hnd_tm_sum.tv_sec / p->count : 0, p->count ? p->hnd_tm_sum.tv_usec / p->count : 0);
1750 }
1751
1752 static int __kprobes
1753 show_kprobe_prof (struct seq_file *pi, void *v)
1754 {
1755         struct hlist_head *head;
1756         struct hlist_node *node;
1757         struct kprobe *p;       //, *kp;
1758         const char *sym = NULL;
1759         unsigned int i = *(loff_t *) v;
1760         unsigned long size, offset = 0;
1761         char *modname, namebuf[128];
1762         static struct timeval utv, ktv;
1763         static unsigned long ucount, kcount;
1764
1765         head = &kprobe_table[i];
1766         preempt_disable ();
1767         hlist_for_each_entry_rcu (p, node, head, hlist)
1768         {
1769                 sym = kallsyms_lookup ((unsigned long) p->addr, &size, &offset, &modname, namebuf);
1770                 /*if (p->pre_handler == aggr_pre_handler) {
1771                    list_for_each_entry_rcu(kp, &p->list, list)
1772                    report_probe_prof(pi, kp, sym, offset, modname);
1773                    } else */
1774                 report_probe_prof (pi, p, sym, offset, modname);
1775                 if (p->count)
1776                 {
1777                         if (p->pid)
1778                         {
1779                                 set_normalized_timeval (&utv, utv.tv_sec + p->hnd_tm_sum.tv_sec, utv.tv_usec + p->hnd_tm_sum.tv_usec);
1780                                 ucount += p->count;
1781                         }
1782                         else
1783                         {
1784                                 //seq_printf(pi, "kernel probe handling %lu %lu.%06ld\n", 
1785                                 //              p->count, p->hnd_tm_sum.tv_sec, p->hnd_tm_sum.tv_usec); 
1786                                 //seq_printf(pi, "kernel probe handling2 %lu %lu.%06ld\n", 
1787                                 //              kcount, ktv.tv_sec, ktv.tv_usec);       
1788                                 set_normalized_timeval (&ktv, ktv.tv_sec + p->hnd_tm_sum.tv_sec, ktv.tv_usec + p->hnd_tm_sum.tv_usec);
1789                                 kcount += p->count;
1790                                 //seq_printf(pi, "kernel probe handling3 %lu %lu.%06ld\n", 
1791                                 //              kcount, ktv.tv_sec, ktv.tv_usec);       
1792                         }
1793                 }
1794         }
1795         if (i == (KPROBE_TABLE_SIZE - 1))
1796         {
1797                 seq_printf (pi, "Average kernel probe handling %lu.%06ld\n", kcount ? ktv.tv_sec / kcount : 0, kcount ? ktv.tv_usec / kcount : 0);
1798                 seq_printf (pi, "Average user probe handling %lu.%06ld\n", ucount ? utv.tv_sec / ucount : 0, ucount ? utv.tv_usec / ucount : 0);
1799                 seq_printf (pi, "Average probe period %lu.%06ld\n", nCount ? probe_enter_diff_sum.tv_sec / nCount : 0, nCount ? probe_enter_diff_sum.tv_usec / nCount : 0);
1800                 utv.tv_sec = utv.tv_usec = ktv.tv_sec = ktv.tv_usec = 0;
1801                 ucount = kcount = 0;
1802         }
1803         preempt_enable ();
1804         return 0;
1805 }
1806
1807 static struct seq_operations kprobes_prof_seq_ops = {
1808         .start = kprobe_prof_seq_start,
1809         .next = kprobe_prof_seq_next,
1810         .stop = kprobe_prof_seq_stop,
1811         .show = show_kprobe_prof
1812 };
1813
1814 static int __kprobes
1815 kprobes_prof_open (struct inode *inode, struct file *filp)
1816 {
1817         return seq_open (filp, &kprobes_prof_seq_ops);
1818 }
1819
1820 static struct file_operations debugfs_kprobes_prof_operations = {
1821         .open = kprobes_prof_open,
1822         .read = seq_read,
1823         .llseek = seq_lseek,
1824         .release = seq_release,
1825 };
1826 #endif
1827
1828 int __kprobes debugfs_kprobe_init (void);
1829 static struct dentry *dbg_dir, *dbg_file;
1830 #ifdef KPROBES_PROFILE
1831 static struct dentry *dbg_file_prof;
1832 #endif
1833
1834 int __kprobes
1835 debugfs_kprobe_init (void)
1836 {
1837         //struct dentry *dir, *file;
1838
1839         dbg_dir = debugfs_create_dir ("kprobes", NULL);
1840         if (!dbg_dir)
1841                 return -ENOMEM;
1842
1843         dbg_file = debugfs_create_file ("list", 0444, dbg_dir, 0, &debugfs_kprobes_operations);
1844         if (!dbg_file)
1845         {
1846                 debugfs_remove (dbg_dir);
1847                 dbg_dir = NULL;
1848                 return -ENOMEM;
1849         }
1850
1851 #ifdef KPROBES_PROFILE
1852         dbg_file_prof = debugfs_create_file ("prof", 0444, dbg_dir, 0, &debugfs_kprobes_prof_operations);
1853         if (!dbg_file_prof)
1854         {
1855                 debugfs_remove (dbg_file);
1856                 debugfs_remove (dbg_dir);
1857                 dbg_dir = NULL;
1858                 return -ENOMEM;
1859         }
1860 #endif
1861         return 0;
1862 }
1863
1864 //late_initcall(debugfs_kprobe_init);
1865 extern unsigned long (*kallsyms_search) (const char *name);
1866 #endif /* CONFIG_DEBUG_FS */
1867
1868 #if defined(CONFIG_X86)
1869 static struct notifier_block kprobe_exceptions_nb = {
1870         .notifier_call = kprobe_exceptions_notify,
1871         .priority = INT_MAX
1872 };
1873 #endif
1874
1875 static int __init
1876 init_kprobes (void)
1877 {
1878         int i, err = 0;
1879
1880         /* FIXME allocate the probe table, currently defined statically */
1881         /* initialize all list heads */
1882         for (i = 0; i < KPROBE_TABLE_SIZE; i++)
1883         {
1884                 INIT_HLIST_HEAD (&kprobe_table[i]);
1885                 INIT_HLIST_HEAD (&kretprobe_inst_table[i]);
1886                 INIT_HLIST_HEAD (&uprobe_insn_slot_table[i]);
1887         }
1888         atomic_set (&kprobe_count, 0);
1889
1890         err = arch_init_kprobes ();
1891
1892         DBPRINTF ("init_kprobes: arch_init_kprobes - %d", err);
1893 #if defined(CONFIG_X86)
1894         if (!err)
1895                 err = register_die_notifier (&kprobe_exceptions_nb);
1896         DBPRINTF ("init_kprobes: register_die_notifier - %d", err);
1897 #endif // CONFIG_X86
1898
1899 #ifdef CONFIG_DEBUG_FS
1900         if (!err)
1901         {
1902                 __real_kallsyms_lookup = (void *) kallsyms_search ("kallsyms_lookup");
1903                 if (!__real_kallsyms_lookup)
1904                 {
1905                         DBPRINTF ("kallsyms_lookup is not found! Oops. Where is the kernel?");
1906                         return -ESRCH;
1907                 }
1908                 err = debugfs_kprobe_init ();
1909                 DBPRINTF ("init_kprobes: debugfs_kprobe_init - %d", err);
1910         }
1911 #endif /* CONFIG_DEBUG_FS */
1912
1913         return err;
1914 }
1915
1916 static void __exit
1917 exit_kprobes (void)
1918 {
1919 #ifdef CONFIG_DEBUG_FS
1920 #ifdef KPROBES_PROFILE
1921         if (dbg_file_prof)
1922                 debugfs_remove (dbg_file_prof);
1923 #endif
1924         if (dbg_file)
1925                 debugfs_remove (dbg_file);
1926         if (dbg_dir)
1927                 debugfs_remove (dbg_dir);
1928 #endif /* CONFIG_DEBUG_FS */
1929
1930 #if defined(CONFIG_X86)
1931         unregister_die_notifier (&kprobe_exceptions_nb);
1932 #endif // CONFIG_X86
1933         arch_exit_kprobes ();
1934 }
1935
1936 module_init (init_kprobes);
1937 module_exit (exit_kprobes);
1938
1939 EXPORT_SYMBOL_GPL (register_kprobe);
1940 EXPORT_SYMBOL_GPL (unregister_kprobe);
1941 EXPORT_SYMBOL_GPL (register_jprobe);
1942 EXPORT_SYMBOL_GPL (unregister_jprobe);
1943 EXPORT_SYMBOL_GPL (register_ujprobe);
1944 EXPORT_SYMBOL_GPL (unregister_ujprobe);
1945 EXPORT_SYMBOL_GPL (jprobe_return);
1946 EXPORT_SYMBOL_GPL (uprobe_return);
1947 EXPORT_SYMBOL_GPL (register_kretprobe);
1948 EXPORT_SYMBOL_GPL (unregister_kretprobe);
1949 EXPORT_SYMBOL_GPL (register_uretprobe);
1950 EXPORT_SYMBOL_GPL (unregister_uretprobe);
1951 EXPORT_SYMBOL_GPL (unregister_all_uprobes);
1952 EXPORT_SYMBOL_GPL (access_process_vm_atomic);
1953 #if LINUX_VERSION_CODE != KERNEL_VERSION(2,6,23)
1954 EXPORT_SYMBOL_GPL (access_process_vm);
1955 #endif
1956 #ifdef KERNEL_HAS_ISPAGEPRESENT
1957 EXPORT_SYMBOL_GPL (is_page_present);
1958 #else
1959 EXPORT_SYMBOL_GPL (page_present);
1960 #endif
1961