Merge branch 'master' of 106.109.8.71:/srv/git/dbi_new_build
[kernel/swap-modules.git] / kprobe / kprobes.c
1 // src_kprobes.c
2
3
4 #include <linux/version.h>
5 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
6 #include <linux/config.h>
7 #endif
8
9 #include <asm/types.h>
10
11 #include <linux/hash.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/moduleloader.h>
16 #include <linux/kallsyms.h>
17 //#include <linux/freezer.h>
18 #include <linux/seq_file.h>
19 #ifdef CONFIG_DEBUG_FS
20 #include <linux/debugfs.h>
21 #endif
22 #include <asm-generic/sections.h>
23 #include <asm/cacheflush.h>
24 #include <asm/errno.h>
25 #include <linux/spinlock.h>
26 #include <linux/version.h>
27 #include <linux/highmem.h>      // kmap_atomic, kunmap_atomic, copy_from_user_page, copy_to_user_page
28 #include <linux/pagemap.h>      // page_cache_release
29 #include <linux/vmalloc.h>      // vmalloc, vfree
30 #if defined(CONFIG_X86)
31 #include <linux/kdebug.h>       // register_die_notifier, unregister_die_notifier
32 #endif
33 #include <linux/hugetlb.h>      // follow_hugetlb_page, is_vm_hugetlb_page
34
35 #include "kprobes.h"
36
37 //#define arch_remove_kprobe(p) do { } while (0)
38
39 #ifdef _DEBUG
40 extern int nCount;
41 #endif
42
43 /*
44 static spinlock_t die_notifier_lock = SPIN_LOCK_UNLOCKED;
45
46 int src_register_die_notifier(struct notifier_block *nb)
47 {
48         int err = 0;
49         unsigned long flags;
50
51         spin_lock_irqsave(&die_notifier_lock, flags);
52 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
53         err = atomic_notifier_chain_register(&panic_notifier_list, nb);
54 #else
55         err = notifier_chain_register(&panic_notifier_list, nb);
56 #endif
57         spin_unlock_irqrestore(&die_notifier_lock, flags);
58
59         return err;
60 }
61 */
62
63 int get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
64                           unsigned long start, int len, int write, int force,
65                           struct page **pages, struct vm_area_struct **vmas);
66 /**
67  * hlist_replace_rcu - replace old entry by new one
68  * @old : the element to be replaced
69  * @new : the new element to insert
70  *
71  * The @old entry will be replaced with the @new entry atomically.
72  */
73 static inline void
74 src_hlist_replace_rcu (struct hlist_node *old, struct hlist_node *new)
75 {
76         struct hlist_node *next = old->next;
77
78         new->next = next;
79         new->pprev = old->pprev;
80         smp_wmb ();
81         if (next)
82                 new->next->pprev = &new->next;
83         if (new->pprev)
84                 *new->pprev = new;
85         old->pprev = LIST_POISON2;
86 }
87
88 #define KPROBE_HASH_BITS 6
89 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
90
91
92 /*
93  * Some oddball architectures like 64bit powerpc have function descriptors
94  * so this must be overridable.
95  */
96 #ifndef kprobe_lookup_name
97 #define kprobe_lookup_name(name, addr) \
98         addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
99 #endif
100
101 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
102 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
103 static struct hlist_head uprobe_insn_slot_table[KPROBE_TABLE_SIZE];
104 static atomic_t kprobe_count;
105
106 //DEFINE_MUTEX(kprobe_mutex);           /* Protects kprobe_table */
107 DEFINE_SPINLOCK (kretprobe_lock);       /* Protects kretprobe_inst_table */
108 static DEFINE_PER_CPU (struct kprobe *, kprobe_instance) = NULL;
109 unsigned long handled_exceptions;
110
111 /* We have preemption disabled.. so it is safe to use __ versions */
112 static inline void
113 set_kprobe_instance (struct kprobe *kp)
114 {
115         __get_cpu_var (kprobe_instance) = kp;
116 }
117
118 static inline void
119 reset_kprobe_instance (void)
120 {
121         __get_cpu_var (kprobe_instance) = NULL;
122 }
123
124 /*
125  * This routine is called either:
126  *      - under the kprobe_mutex - during kprobe_[un]register()
127  *                              OR
128  *      - with preemption disabled - from arch/xxx/kernel/kprobes.c
129  */
130 struct kprobe __kprobes *
131 get_kprobe (void *addr, int tgid, struct task_struct *ctask)
132 {
133         struct hlist_head *head;
134         struct hlist_node *node;
135         struct kprobe *p, *retVal = NULL;
136         int ret = 0, uprobe_found;
137         struct page *page = 0, *tpage = 0;
138         struct vm_area_struct *vma = 0;
139         struct task_struct *task = 0;
140         void *paddr = 0;
141
142
143         if (ctask && ctask->active_mm)
144         {
145                 ret = get_user_pages_uprobe (ctask, ctask->active_mm, (unsigned long) addr, 1, 0, 0, &tpage, NULL);
146                 if (ret <= 0)
147                         DBPRINTF ("get_user_pages for task %d at %p failed!", current->pid, addr);
148                 else
149                 {
150                         paddr = page_address (tpage);
151                         page_cache_release (tpage);
152                 }
153         }
154         //else
155         //      DBPRINTF("task %d has no mm!", ctask->pid);
156
157         //TODO: test - two processes invokes instrumented function
158         head = &kprobe_table[hash_ptr (addr, KPROBE_HASH_BITS)];
159         hlist_for_each_entry_rcu (p, node, head, hlist)
160         {
161                 //if looking for kernel probe and this is kernel probe with the same addr OR
162                 //if looking for the user space probe and this is user space probe probe with the same addr and pid
163                 DBPRINTF ("get_kprobe[%d]: check probe at %p/%p, task %d/%d", nCount, addr, p->addr, tgid, p->tgid);
164                 if (p->addr == addr)
165                 {
166                         uprobe_found = 0;
167                         if (tgid == p->tgid)
168                                 uprobe_found = 1;
169                         if (!tgid || uprobe_found)
170                         {
171                                 retVal = p;
172                                 if (tgid)
173                                         DBPRINTF ("get_kprobe[%d]: found user space probe at %p for task %d", nCount, p->addr, p->tgid);
174                                 else
175                                         DBPRINTF ("get_kprobe[%d]: found kernel probe at %p", nCount, p->addr);
176                                 break;
177                         }
178                 }
179                 else if (tgid != p->tgid)
180                 {
181                         // if looking for the user space probe and this is user space probe 
182                         // with another addr and pid but with the same offset whithin the page
183                         // it could be that it is the same probe (with address from other user space)
184                         // we should handle it as usual probe but without notification to user 
185                         if (paddr && tgid && (((unsigned long) addr & ~PAGE_MASK) == ((unsigned long) p->addr & ~PAGE_MASK))
186                             && p->tgid)
187                         {
188                                 DBPRINTF ("get_kprobe[%d]: found user space probe at %p in task %d. possibly for addr %p in task %d", nCount, p->addr, p->tgid, addr, tgid);
189                                 // this probe has the same offset in the page
190                                 // look in the probes for the other pids                                
191                                 // get page for user space probe addr
192                                 rcu_read_lock ();
193 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
194                                 task = find_task_by_pid (p->tgid);
195 #else //lif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
196                                 task = pid_task(find_pid_ns(p->tgid, &init_pid_ns), PIDTYPE_PID);
197 #endif
198                                 if (task)
199                                         get_task_struct (task);
200                                 rcu_read_unlock ();
201                                 if (!task)
202                                 {
203                                         DBPRINTF ("task for pid %d not found! Dead probe?", p->tgid);
204                                         continue;
205                                 }
206                                 if (task->active_mm)
207                                 {
208                                         if (page_present (task->active_mm, (unsigned long) p->addr))
209                                         {
210                                                 ret = get_user_pages_uprobe (task, task->active_mm, (unsigned long) p->addr, 1, 0, 0, &page, &vma);
211                                                 if (ret <= 0)
212                                                         DBPRINTF ("get_user_pages for task %d at %p failed!", p->tgid, p->addr);
213                                         }
214                                         else
215                                                 ret = -1;
216                                 }
217                                 else
218                                 {
219                                         DBPRINTF ("task %d has no mm!", task->pid);
220                                         ret = -1;
221                                 }
222                                 put_task_struct (task);
223                                 if (ret <= 0)
224                                         continue;
225                                 if (paddr == page_address (page))
226                                 {
227                                         retVal = p;     // we found the probe in other process address space
228                                         DBPRINTF ("get_kprobe[%d]: found user space probe at %p in task %d for addr %p in task %d", nCount, p->addr, p->tgid, addr, tgid);
229                                         panic ("user space probe from another process");
230                                 }
231                                 page_cache_release (page);
232                                 if (retVal)
233                                         break;
234                         }
235                 }
236         }
237
238         DBPRINTF ("get_kprobe[%d]: probe %p", nCount, retVal);
239         return retVal;
240 }
241
242 struct kprobe __kprobes *
243 get_kprobe_by_insn_slot (void *addr, int tgid, struct task_struct *ctask)
244 {
245         struct hlist_head *head;
246         struct hlist_node *node;
247         struct kprobe *p, *retVal = NULL;
248         int uprobe_found;
249
250         //TODO: test - two processes invokes instrumented function
251         head = &uprobe_insn_slot_table[hash_ptr (addr, KPROBE_HASH_BITS)];
252         hlist_for_each_entry_rcu (p, node, head, is_hlist)
253         {
254                 //if looking for kernel probe and this is kernel probe with the same addr OR
255                 //if looking for the user space probe and this is user space probe probe with the same addr and pid
256                 DBPRINTF ("get_kprobe[%d]: check probe at %p/%p, task %d/%d", nCount, addr, p->ainsn.insn, tgid, p->tgid);
257                 if (p->ainsn.insn == addr)
258                 {
259                         uprobe_found = 0;
260                         if (tgid == p->tgid)
261                                 uprobe_found = 1;
262                         if (!tgid || uprobe_found)
263                         {
264                                 retVal = p;
265                                 if (tgid)
266                                         DBPRINTF ("get_kprobe[%d]: found user space probe at %p for task %d", nCount, p->addr, p->tgid);
267                                 else
268                                         DBPRINTF ("get_kprobe[%d]: found kernel probe at %p", nCount, p->addr);
269                                 break;
270                         }
271                 }
272         }
273
274         DBPRINTF ("get_kprobe[%d]: probe %p", nCount, retVal);
275         return retVal;
276 }
277
278 /*
279  * Aggregate handlers for multiple kprobes support - these handlers
280  * take care of invoking the individual kprobe handlers on p->list
281  */
282 static int __kprobes
283 aggr_pre_handler (struct kprobe *p, struct pt_regs *regs        /*, 
284                                                                    struct vm_area_struct **vma, 
285                                                                    struct page **page, unsigned long **kaddr */ )
286 {
287         struct kprobe *kp;
288         int ret;
289
290         list_for_each_entry_rcu (kp, &p->list, list)
291         {
292                 if (kp->pre_handler)
293                 {
294                         set_kprobe_instance (kp);
295                         ret = kp->pre_handler (kp, regs);
296                         if (ret)
297                                 return ret;
298                 }
299                 reset_kprobe_instance ();
300         }
301         return 0;
302 }
303
304 static void __kprobes
305 aggr_post_handler (struct kprobe *p, struct pt_regs *regs, unsigned long flags)
306 {
307         struct kprobe *kp;
308
309         list_for_each_entry_rcu (kp, &p->list, list)
310         {
311                 if (kp->post_handler)
312                 {
313                         set_kprobe_instance (kp);
314                         kp->post_handler (kp, regs, flags);
315                         reset_kprobe_instance ();
316                 }
317         }
318         return;
319 }
320
321 #if 1
322 static int __kprobes
323 aggr_fault_handler (struct kprobe *p, struct pt_regs *regs, int trapnr)
324 {
325         struct kprobe *cur = __get_cpu_var (kprobe_instance);
326
327         /*
328          * if we faulted "during" the execution of a user specified
329          * probe handler, invoke just that probe's fault handler
330          */
331         if (cur && cur->fault_handler)
332         {
333                 if (cur->fault_handler (cur, regs, trapnr))
334                         return 1;
335         }
336         return 0;
337 }
338 #endif
339
340 static int __kprobes
341 aggr_break_handler (struct kprobe *p, struct pt_regs *regs      /*, 
342                                                                    struct vm_area_struct **vma, 
343                                                                    struct page **page, unsigned long **kaddr */ )
344 {
345         struct kprobe *cur = __get_cpu_var (kprobe_instance);
346         int ret = 0;
347         DBPRINTF ("cur = 0x%p\n", cur);
348         if (cur)
349                 DBPRINTF ("cur = 0x%p cur->break_handler = 0x%p\n", cur, cur->break_handler);
350
351         if (cur && cur->break_handler)
352         {
353                 if (cur->break_handler (cur, regs /*, vma, page, kaddr */ ))
354                         ret = 1;
355         }
356         reset_kprobe_instance ();
357         return ret;
358 }
359
360 /* Walks the list and increments nmissed count for multiprobe case */
361 void __kprobes
362 kprobes_inc_nmissed_count (struct kprobe *p)
363 {
364         struct kprobe *kp;
365         if (p->pre_handler != aggr_pre_handler)
366         {
367                 p->nmissed++;
368         }
369         else
370         {
371                 list_for_each_entry_rcu (kp, &p->list, list) kp->nmissed++;
372         }
373         return;
374 }
375
376 /* Called with kretprobe_lock held */
377 struct kretprobe_instance __kprobes *
378 get_free_rp_inst (struct kretprobe *rp)
379 {
380         struct hlist_node *node;
381         struct kretprobe_instance *ri;
382         hlist_for_each_entry (ri, node, &rp->free_instances, uflist) 
383                 return ri;
384         return NULL;
385 }
386
387 /* Called with kretprobe_lock held */
388 static struct kretprobe_instance __kprobes *
389 get_used_rp_inst (struct kretprobe *rp)
390 {
391         struct hlist_node *node;
392         struct kretprobe_instance *ri;
393         hlist_for_each_entry (ri, node, &rp->used_instances, uflist) return ri;
394         return NULL;
395 }
396
397 /* Called with kretprobe_lock held */
398 void __kprobes
399 add_rp_inst (struct kretprobe_instance *ri)
400 {
401         /*
402          * Remove rp inst off the free list -
403          * Add it back when probed function returns
404          */
405         hlist_del (&ri->uflist);
406
407         /* Add rp inst onto table */
408         INIT_HLIST_NODE (&ri->hlist);
409         hlist_add_head (&ri->hlist, &kretprobe_inst_table[hash_ptr (ri->task, KPROBE_HASH_BITS)]);
410
411         /* Also add this rp inst to the used list. */
412         INIT_HLIST_NODE (&ri->uflist);
413         hlist_add_head (&ri->uflist, &ri->rp->used_instances);
414 }
415
416 /* Called with kretprobe_lock held */
417 void __kprobes
418 recycle_rp_inst (struct kretprobe_instance *ri, struct hlist_head *head)
419 {
420         /* remove rp inst off the rprobe_inst_table */
421         hlist_del (&ri->hlist);
422         if (ri->rp)
423         {
424                 /* remove rp inst off the used list */
425                 hlist_del (&ri->uflist);
426                 /* put rp inst back onto the free list */
427                 INIT_HLIST_NODE (&ri->uflist);
428                 hlist_add_head (&ri->uflist, &ri->rp->free_instances);
429         }
430         else
431                 /* Unregistering */
432                 hlist_add_head (&ri->hlist, head);
433 }
434
435 struct hlist_head __kprobes *
436 kretprobe_inst_table_head (struct task_struct *tsk)
437 {
438         return &kretprobe_inst_table[hash_ptr (tsk, KPROBE_HASH_BITS)];
439 }
440
441 /*
442  * This function is called from finish_task_switch when task tk becomes dead,
443  * so that we can recycle any function-return probe instances associated
444  * with this task. These left over instances represent probed functions
445  * that have been called but will never return.
446  */
447 /*void __kprobes kprobe_flush_task(struct task_struct *tk)
448 {
449         struct kretprobe_instance *ri;
450         struct hlist_head *head, empty_rp;
451         struct hlist_node *node, *tmp;
452         unsigned long flags = 0;
453
454         INIT_HLIST_HEAD(&empty_rp);
455         spin_lock_irqsave(&kretprobe_lock, flags);
456         head = kretprobe_inst_table_head(tk);
457         hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
458                 if (ri->task == tk)
459                         recycle_rp_inst(ri, &empty_rp);
460         }
461         spin_unlock_irqrestore(&kretprobe_lock, flags);
462
463         hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
464                 hlist_del(&ri->hlist);
465                 kfree(ri);
466         }
467 }*/
468
469 static inline void
470 free_rp_inst (struct kretprobe *rp)
471 {
472         struct kretprobe_instance *ri;
473         while ((ri = get_free_rp_inst (rp)) != NULL)
474         {
475                 hlist_del (&ri->uflist);
476                 kfree (ri);
477         }
478 }
479
480 /*
481  * Keep all fields in the kprobe consistent
482  */
483 static inline void
484 copy_kprobe (struct kprobe *old_p, struct kprobe *p)
485 {
486         memcpy (&p->opcode, &old_p->opcode, sizeof (kprobe_opcode_t));
487         memcpy (&p->ainsn, &old_p->ainsn, sizeof (struct arch_specific_insn));
488         p->tgid = old_p->tgid;
489         p->ss_addr = old_p->ss_addr;
490         //p->spid = old_p->spid;
491 }
492
493 /*
494 * Add the new probe to old_p->list. Fail if this is the
495 * second jprobe at the address - two jprobes can't coexist
496 */
497 static int __kprobes
498 add_new_kprobe (struct kprobe *old_p, struct kprobe *p)
499 {
500         if (p->break_handler)
501         {
502                 if (old_p->break_handler)
503                         return -EEXIST;
504                 list_add_tail_rcu (&p->list, &old_p->list);
505                 old_p->break_handler = aggr_break_handler;
506         }
507         else
508                 list_add_rcu (&p->list, &old_p->list);
509         if (p->post_handler && !old_p->post_handler)
510                 old_p->post_handler = aggr_post_handler;
511         return 0;
512 }
513
514 /*
515  * Fill in the required fields of the "manager kprobe". Replace the
516  * earlier kprobe in the hlist with the manager kprobe
517  */
518 static inline void
519 add_aggr_kprobe (struct kprobe *ap, struct kprobe *p)
520 {
521         copy_kprobe (p, ap);
522         flush_insn_slot (ap);
523         ap->addr = p->addr;
524         ap->pre_handler = aggr_pre_handler;
525         ap->fault_handler = aggr_fault_handler;
526         if (p->post_handler)
527                 ap->post_handler = aggr_post_handler;
528         if (p->break_handler)
529                 ap->break_handler = aggr_break_handler;
530
531         INIT_LIST_HEAD (&ap->list);
532         list_add_rcu (&p->list, &ap->list);
533
534         src_hlist_replace_rcu (&p->hlist, &ap->hlist);
535 }
536
537 /*
538  * This is the second or subsequent kprobe at the address - handle
539  * the intricacies
540  */
541 static int __kprobes
542 register_aggr_kprobe (struct kprobe *old_p, struct kprobe *p)
543 {
544         int ret = 0;
545         struct kprobe *ap;
546         DBPRINTF ("start\n");
547
548         DBPRINTF ("p = %p old_p = %p \n", p, old_p);
549         if (old_p->pre_handler == aggr_pre_handler)
550         {
551                 DBPRINTF ("aggr_pre_handler \n");
552
553                 copy_kprobe (old_p, p);
554                 ret = add_new_kprobe (old_p, p);
555         }
556         else
557         {
558                 DBPRINTF ("kzalloc\n");
559
560 #ifdef kzalloc
561                 ap = kzalloc (sizeof (struct kprobe), GFP_KERNEL);
562 #else
563                 ap = kmalloc (sizeof (struct kprobe), GFP_KERNEL);
564                 if (ap)
565                         memset (ap, 0, sizeof (struct kprobe));
566 #endif
567                 if (!ap)
568                         return -ENOMEM;
569                 add_aggr_kprobe (ap, old_p);
570                 copy_kprobe (ap, p);
571                 DBPRINTF ("ap = %p p = %p old_p = %p \n", ap, p, old_p);
572                 ret = add_new_kprobe (ap, p);
573         }
574         return ret;
575 }
576
577 static int __kprobes
578 __register_kprobe (struct kprobe *p, unsigned long called_from, int atomic)
579 {
580         struct kprobe *old_p;
581 //      struct module *probed_mod;
582         int ret = 0;
583         /*
584          * If we have a symbol_name argument look it up,
585          * and add it to the address.  That way the addr
586          * field can either be global or relative to a symbol.
587          */
588         if (p->symbol_name)
589         {
590                 if (p->addr)
591                         return -EINVAL;
592                 kprobe_lookup_name (p->symbol_name, p->addr);
593         }
594
595         if (!p->addr)
596                 return -EINVAL;
597         DBPRINTF ("p->addr = 0x%p\n", p->addr);
598         p->addr = (kprobe_opcode_t *) (((char *) p->addr) + p->offset);
599         DBPRINTF ("p->addr = 0x%p p = 0x%p\n", p->addr, p);
600
601 /*      if ((!kernel_text_address((unsigned long) p->addr)) ||
602                 in_kprobes_functions((unsigned long) p->addr))
603                 return -EINVAL;*/
604
605 #ifdef KPROBES_PROFILE
606         p->start_tm.tv_sec = p->start_tm.tv_usec = 0;
607         p->hnd_tm_sum.tv_sec = p->hnd_tm_sum.tv_usec = 0;
608         p->count = 0;
609 #endif
610         p->mod_refcounted = 0;
611         //p->proc_prio = 0;
612         //p->proc_sched = 0;    
613         //p->spid = -1;
614         //p->irq = 0;
615         //p->task_flags = 0;
616 /*
617         // Check are we probing a module
618         if ((probed_mod = module_text_address((unsigned long) p->addr))) {
619                 struct module *calling_mod = module_text_address(called_from);
620                 // We must allow modules to probe themself and
621                 // in this case avoid incrementing the module refcount,
622                 // so as to allow unloading of self probing modules.
623                 //
624                 if (calling_mod && (calling_mod != probed_mod)) {
625                         if (unlikely(!try_module_get(probed_mod)))
626                                 return -EINVAL;
627                         p->mod_refcounted = 1;
628                 } else
629                         probed_mod = NULL;
630         }
631 */
632         p->nmissed = 0;
633 //      mutex_lock(&kprobe_mutex);
634         old_p = get_kprobe (p->addr, 0, NULL);
635         if (old_p)
636         {
637                 ret = register_aggr_kprobe (old_p, p);
638                 if (!ret)
639                         atomic_inc (&kprobe_count);
640                 goto out;
641         }
642
643         if ((ret = arch_prepare_kprobe (p)) != 0)
644                 goto out;
645
646         DBPRINTF ("before out ret = 0x%x\n", ret);
647
648         INIT_HLIST_NODE (&p->hlist);
649         hlist_add_head_rcu (&p->hlist, &kprobe_table[hash_ptr (p->addr, KPROBE_HASH_BITS)]);
650
651 /*      if (atomic_add_return(1, &kprobe_count) == \
652                                 (ARCH_INACTIVE_KPROBE_COUNT + 1))
653                 register_page_fault_notifier(&kprobe_page_fault_nb);*/
654
655         arch_arm_kprobe (p);
656
657       out:
658 //      mutex_unlock(&kprobe_mutex);
659 /*
660         if (ret && probed_mod)
661                 module_put(probed_mod);
662 */
663         DBPRINTF ("out ret = 0x%x\n", ret);
664
665         return ret;
666 }
667
668 static int __kprobes
669 __register_uprobe (struct kprobe *p, struct task_struct *task, int atomic, unsigned long called_from)
670 {
671         int ret = 0;
672         struct kprobe *old_p;
673
674         if (!p->addr)
675                 return -EINVAL;
676
677         DBPRINTF ("p->addr = 0x%p p = 0x%p\n", p->addr, p);
678
679         p->mod_refcounted = 0;
680         p->nmissed = 0;
681 #ifdef KPROBES_PROFILE
682         p->start_tm.tv_sec = p->start_tm.tv_usec = 0;
683         p->hnd_tm_sum.tv_sec = p->hnd_tm_sum.tv_usec = 0;
684         p->count = 0;
685 #endif
686
687         // get the first item
688         old_p = get_kprobe (p->addr, p->tgid, NULL);
689         if (old_p)
690         {
691                 ret = register_aggr_kprobe (old_p, p);
692                 if (!ret)
693                         atomic_inc (&kprobe_count);
694                 goto out;
695         }
696         if ((ret = arch_prepare_uprobe (p, task, atomic)) != 0)
697         {
698                 goto out;
699         }
700
701         DBPRINTF ("before out ret = 0x%x\n", ret);
702
703         INIT_HLIST_NODE (&p->hlist);
704         hlist_add_head_rcu (&p->hlist, &kprobe_table[hash_ptr (p->addr, KPROBE_HASH_BITS)]);
705
706         INIT_HLIST_NODE (&p->is_hlist);
707         hlist_add_head_rcu (&p->is_hlist, &uprobe_insn_slot_table[hash_ptr (p->ainsn.insn, KPROBE_HASH_BITS)]);
708
709         arch_arm_uprobe (p, task);
710 out:
711         DBPRINTF ("out ret = 0x%x\n", ret);
712
713         return ret;
714 }
715
716 void __kprobes
717 unregister_uprobe (struct kprobe *p, struct task_struct *task, int atomic)
718 {
719         unregister_kprobe (p, task, atomic);
720 }
721
722 int __kprobes
723 register_kprobe (struct kprobe *p, int atomic)
724 {
725         return __register_kprobe (p, (unsigned long) __builtin_return_address (0), atomic);
726 }
727
728 void __kprobes
729 unregister_kprobe (struct kprobe *p, struct task_struct *task, int atomic)
730 {
731 //      struct module *mod;
732         struct kprobe *old_p, *list_p;
733         int cleanup_p, pid = 0;
734
735 //      mutex_lock(&kprobe_mutex);
736
737         pid = p->tgid;
738
739         old_p = get_kprobe (p->addr, pid, NULL);
740         DBPRINTF ("unregister_kprobe p=%p old_p=%p", p, old_p);
741         if (unlikely (!old_p))
742         {
743 //              mutex_unlock(&kprobe_mutex);
744                 return;
745         }
746         if (p != old_p)
747         {
748                 list_for_each_entry_rcu (list_p, &old_p->list, list) 
749                         if (list_p == p)
750                                 /* kprobe p is a valid probe */
751                                 goto valid_p;
752 //              mutex_unlock(&kprobe_mutex);
753                 return;
754         }
755 valid_p:
756         DBPRINTF ("unregister_kprobe valid_p");
757         if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) && 
758                 (p->list.next == &old_p->list) && (p->list.prev == &old_p->list)))
759         {
760                 /* Only probe on the hash list */
761                 DBPRINTF ("unregister_kprobe disarm pid=%d", pid);
762                 if (pid)
763                         arch_disarm_uprobe (p, task);//vma, page, kaddr);
764                 else
765                         arch_disarm_kprobe (p);
766                 hlist_del_rcu (&old_p->hlist);
767                 cleanup_p = 1;
768         }
769         else
770         {
771                 list_del_rcu (&p->list);
772                 cleanup_p = 0;
773         }
774         DBPRINTF ("unregister_kprobe cleanup_p=%d", cleanup_p);
775 //      mutex_unlock(&kprobe_mutex);
776
777 //      synchronize_sched();
778 /*
779         if (p->mod_refcounted &&
780             (mod = module_text_address((unsigned long)p->addr)))
781                 module_put(mod);
782 */
783         if (cleanup_p)
784         {
785                 if (p != old_p)
786                 {
787                         list_del_rcu (&p->list);
788                         kfree (old_p);
789                 }
790                 arch_remove_kprobe (p, task);
791         }
792         else
793         {
794 ///             mutex_lock(&kprobe_mutex);
795                 if (p->break_handler)
796                         old_p->break_handler = NULL;
797                 if (p->post_handler)
798                 {
799                         list_for_each_entry_rcu (list_p, &old_p->list, list)
800                         {
801                                 if (list_p->post_handler)
802                                 {
803                                         cleanup_p = 2;
804                                         break;
805                                 }
806                         }
807                         if (cleanup_p == 0)
808                                 old_p->post_handler = NULL;
809                 }
810 //              mutex_unlock(&kprobe_mutex);
811         }
812
813         /* Call unregister_page_fault_notifier()
814          * if no probes are active
815          */
816 //      mutex_lock(&kprobe_mutex);
817 /*      if (atomic_add_return(-1, &kprobe_count) == \
818                                 ARCH_INACTIVE_KPROBE_COUNT)
819                 unregister_page_fault_notifier(&kprobe_page_fault_nb);*/
820 //      mutex_unlock(&kprobe_mutex);
821         return;
822 }
823
824 int __kprobes
825 register_ujprobe (struct task_struct *task, struct mm_struct *mm, struct jprobe *jp, int atomic)
826 {
827         int ret = 0;
828 #ifdef _DEBUG
829         gSilent = 0;
830 #endif
831         /* Todo: Verify probepoint is a function entry point */
832         jp->kp.pre_handler = setjmp_pre_handler;
833         jp->kp.break_handler = longjmp_break_handler;
834         
835         ret = __register_uprobe (&jp->kp, task, atomic,
836                                     (unsigned long) __builtin_return_address (0));
837
838 #ifdef _DEBUG
839         gSilent = 1;
840 #endif
841         return ret;
842 }
843
844 void __kprobes
845 unregister_ujprobe (struct task_struct *task, struct jprobe *jp, int atomic)
846 {
847         unregister_uprobe (&jp->kp, task, atomic);
848 }
849
850 int __kprobes
851 register_jprobe (struct jprobe *jp, int atomic)
852 {
853         /* Todo: Verify probepoint is a function entry point */
854         jp->kp.pre_handler = setjmp_pre_handler;
855         jp->kp.break_handler = longjmp_break_handler;
856
857         return __register_kprobe (&jp->kp, (unsigned long) __builtin_return_address (0), atomic);
858 }
859
860 void __kprobes
861 unregister_jprobe (struct jprobe *jp, int atomic)
862 {
863         unregister_kprobe (&jp->kp, 0, atomic);
864 }
865
866 /*
867  * This kprobe pre_handler is registered with every kretprobe. When probe
868  * hits it will set up the return probe.
869  */
870 static int __kprobes
871 pre_handler_kretprobe (struct kprobe *p, struct pt_regs *regs   /*, struct vm_area_struct **vma, 
872                                                                    struct page **page, unsigned long **kaddr */ )
873 {
874         struct kretprobe *rp = container_of (p, struct kretprobe, kp);
875         unsigned long flags = 0;
876         DBPRINTF ("START\n");
877
878         /*TODO: consider to only swap the RA after the last pre_handler fired */
879         spin_lock_irqsave (&kretprobe_lock, flags);
880         if (!rp->disarm)
881                 __arch_prepare_kretprobe (rp, regs);
882         spin_unlock_irqrestore (&kretprobe_lock, flags);
883         DBPRINTF ("END\n");
884         return 0;
885 }
886
887 struct kretprobe *sched_rp;
888
889 int __kprobes
890 register_kretprobe (struct kretprobe *rp, int atomic)
891 {
892         int ret = 0;
893         struct kretprobe_instance *inst;
894         int i;
895         DBPRINTF ("START");
896
897         rp->kp.pre_handler = pre_handler_kretprobe;
898         rp->kp.post_handler = NULL;
899         rp->kp.fault_handler = NULL;
900         rp->kp.break_handler = NULL;
901
902         rp->disarm = 0;
903
904         /* Pre-allocate memory for max kretprobe instances */
905         if(rp->kp.addr == sched_addr)
906                 rp->maxactive = 1000;//max (100, 2 * NR_CPUS);
907         else if (rp->maxactive <= 0)
908         {
909 #if 1//def CONFIG_PREEMPT
910                 rp->maxactive = max (10, 2 * NR_CPUS);
911 #else
912                 rp->maxactive = NR_CPUS;
913 #endif
914         }
915         INIT_HLIST_HEAD (&rp->used_instances);
916         INIT_HLIST_HEAD (&rp->free_instances);
917         for (i = 0; i < rp->maxactive; i++)
918         {
919                 inst = kmalloc (sizeof (struct kretprobe_instance), GFP_KERNEL);
920                 if (inst == NULL)
921                 {
922                         free_rp_inst (rp);
923                         return -ENOMEM;
924                 }
925                 INIT_HLIST_NODE (&inst->uflist);
926                 hlist_add_head (&inst->uflist, &rp->free_instances);
927         }
928
929         DBPRINTF ("addr=%p, *addr=[%lx %lx %lx]", rp->kp.addr, (unsigned long) (*(rp->kp.addr)), (unsigned long) (*(rp->kp.addr + 1)), (unsigned long) (*(rp->kp.addr + 2)));
930         rp->nmissed = 0;
931         /* Establish function entry probe point */
932         if ((ret = __register_kprobe (&rp->kp, (unsigned long) __builtin_return_address (0), atomic)) != 0)
933                 free_rp_inst (rp);
934
935         DBPRINTF ("addr=%p, *addr=[%lx %lx %lx]", rp->kp.addr, (unsigned long) (*(rp->kp.addr)), (unsigned long) (*(rp->kp.addr + 1)), (unsigned long) (*(rp->kp.addr + 2)));
936         if(rp->kp.addr == sched_addr)
937                 sched_rp = rp;
938
939         return ret;
940 }
941
942 void __kprobes
943 unregister_kretprobe (struct kretprobe *rp, int atomic)
944 {
945         unsigned long flags;
946         struct kretprobe_instance *ri;
947
948         //printk("addr=%p, *addr=[%lx %lx %lx]\n", rp->kp.addr, 
949         //               *(rp->kp.addr), *(rp->kp.addr+1), *(rp->kp.addr+2));
950         unregister_kprobe (&rp->kp, 0, atomic);
951
952         if(rp->kp.addr == sched_addr)
953                 sched_rp = NULL;
954                 
955         //printk("addr=%p, *addr=[%lx %lx %lx]\n", rp->kp.addr, 
956         //               *(rp->kp.addr), *(rp->kp.addr+1), *(rp->kp.addr+2));
957         /* No race here */
958         spin_lock_irqsave (&kretprobe_lock, flags);
959         while ((ri = get_used_rp_inst (rp)) != NULL)
960         {
961                 ri->rp = NULL;
962                 hlist_del (&ri->uflist);
963         }
964         spin_unlock_irqrestore (&kretprobe_lock, flags);
965         free_rp_inst (rp);
966 }
967
968 int __kprobes
969 register_uretprobe (struct task_struct *task, struct mm_struct *mm, struct kretprobe *rp, int atomic)
970 {
971         int ret = 0;
972         struct kretprobe_instance *inst;
973         /*struct page *pages[2] = {0, 0};
974            struct vm_area_struct *vmas[2] = {0, 0};
975            unsigned long *kaddrs[2] = {0, 0}; */
976         int i;
977 #ifdef _DEBUG
978         gSilent = 0;
979 #endif
980
981         DBPRINTF ("START\n");
982
983         rp->kp.pre_handler = pre_handler_kretprobe;
984         rp->kp.post_handler = NULL;
985         rp->kp.fault_handler = NULL;
986         rp->kp.break_handler = NULL;
987
988         rp->disarm = 0;
989
990         /* Pre-allocate memory for max kretprobe instances */
991         if (rp->maxactive <= 0)
992         {
993 #if 1//def CONFIG_PREEMPT
994                 rp->maxactive = max (10, 2 * NR_CPUS);
995 #else
996                 rp->maxactive = NR_CPUS;
997 #endif
998         }
999         INIT_HLIST_HEAD (&rp->used_instances);
1000         INIT_HLIST_HEAD (&rp->free_instances);
1001         for (i = 0; i < rp->maxactive; i++)
1002         {
1003                 inst = kmalloc (sizeof (struct kretprobe_instance), GFP_KERNEL);
1004                 if (inst == NULL)
1005                 {
1006                         free_rp_inst (rp);
1007                         ret = -ENOMEM;
1008                         goto out;
1009                 }
1010                 INIT_HLIST_NODE (&inst->uflist);
1011                 hlist_add_head (&inst->uflist, &rp->free_instances);
1012         }
1013
1014         rp->nmissed = 0;
1015 #if 0
1016         ret = get_user_pages_uprobe (task, mm, (unsigned long) rp->kp.addr, 1, 1, 1, &pages[0], &vmas[0]);
1017         if (ret <= 0)
1018         {
1019                 DBPRINTF ("get_user_pages for %p failed!", rp->kp.addr);
1020                 ret = -EFAULT;
1021                 goto out;
1022         }
1023         if (atomic)
1024                 kaddrs[0] = kmap_atomic (pages[0], KM_USER0) + ((unsigned long) rp->kp.addr & ~PAGE_MASK);
1025         else
1026                 kaddrs[0] = kmap (pages[0]) + ((unsigned long) rp->kp.addr & ~PAGE_MASK);
1027         // if 2nd instruction is on the 2nd page
1028         if ((((unsigned long) (rp->kp.addr + 1)) & ~PAGE_MASK) == 0)
1029         {
1030           ret = get_user_pages_uprobe (task, mm, (unsigned long) (rp->kp.addr + 1), 1, 1, 1, &pages[1], &vmas[1]);
1031                 if (ret <= 0)
1032                 {
1033                         DBPRINTF ("get_user_pages for %p failed!", rp->kp.addr + 1);
1034                         ret = -EFAULT;
1035                         goto out;
1036                 }
1037                 if (atomic)
1038                         kaddrs[1] = kmap_atomic (pages[1], KM_USER1) + ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK);
1039                 else
1040                         kaddrs[1] = kmap (pages[1]) + ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK);
1041         }
1042         else
1043         {
1044                 // 2nd instruction is on the 1st page too
1045                 vmas[1] = vmas[0];
1046                 pages[1] = pages[0];
1047                 kaddrs[1] = kaddrs[0] + 1;
1048         }
1049 #endif
1050         /* Establish function exit probe point */
1051         if ((ret = arch_prepare_uretprobe (rp, task/*vmas, pages, kaddrs */ )) != 0)
1052                 goto out;
1053         /* Establish function entry probe point */
1054         if ((ret = __register_uprobe (&rp->kp, task, atomic,
1055                                          (unsigned long) __builtin_return_address (0))) != 0)
1056         {
1057                 free_rp_inst (rp);
1058                 goto out;
1059         }
1060           
1061         arch_arm_uretprobe (rp, task);//vmas[1], pages[1], kaddrs[1]);
1062 #if 0
1063         if (atomic)
1064                 set_page_dirty (pages[1]);
1065         else
1066                 set_page_dirty_lock (pages[1]);
1067 #endif
1068       out:
1069 #if 0
1070         if (pages[0])
1071         {
1072                 if (kaddrs[0])
1073                 {
1074                         if (atomic)
1075                                 kunmap_atomic (kaddrs[0] - ((unsigned long) rp->kp.addr & ~PAGE_MASK), KM_USER0);
1076                         else
1077                                 kunmap (pages[0]);
1078                 }
1079                 page_cache_release (pages[0]);
1080         }
1081         if ((pages[0] != pages[1]))
1082         {
1083                 if (pages[1])
1084                 {
1085                         if (kaddrs[1])
1086                         {
1087                                 if (atomic)
1088                                         kunmap_atomic (kaddrs[1] - ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK), KM_USER1);
1089                                 else
1090                                         kunmap (pages[1]);
1091                         }
1092                         page_cache_release (pages[1]);
1093                 }
1094         }
1095         /*else if( (pages[0] != pages[2]) ){
1096            if(pages[2]){
1097            if(kaddrs[2]) {
1098            if (atomic) kunmap_atomic(kaddrs[2], KM_USER1);
1099            else        kunmap(pages[2]);
1100            }
1101            page_cache_release(pages[2]);
1102            }
1103            } */
1104 #endif
1105
1106 #ifdef _DEBUG
1107         gSilent = 1;
1108 #endif
1109         return ret;
1110 }
1111
1112 static struct kretprobe *__kprobes
1113 clone_kretprobe (struct kretprobe *rp)
1114 {
1115         struct kprobe *old_p;
1116         struct kretprobe *clone = NULL;
1117         int ret;
1118
1119         clone = kmalloc (sizeof (struct kretprobe), GFP_KERNEL);
1120         if (!clone)
1121         {
1122                 DBPRINTF ("failed to alloc memory for clone probe %p!", rp->kp.addr);
1123                 return NULL;
1124         }
1125         memcpy (clone, rp, sizeof (struct kretprobe));
1126         clone->kp.pre_handler = pre_handler_kretprobe;
1127         clone->kp.post_handler = NULL;
1128         clone->kp.fault_handler = NULL;
1129         clone->kp.break_handler = NULL;
1130         old_p = get_kprobe (rp->kp.addr, rp->kp.tgid, NULL);
1131         if (old_p)
1132         {
1133                 ret = register_aggr_kprobe (old_p, &clone->kp);
1134                 if (ret)
1135                 {
1136                         kfree (clone);
1137                         return NULL;
1138                 }
1139                 atomic_inc (&kprobe_count);
1140         }
1141
1142         return clone;
1143 }
1144
1145 void __kprobes
1146 unregister_uretprobe (struct task_struct *task, struct kretprobe *rp, int atomic)
1147 {
1148         //int ret = 0;
1149         unsigned long flags;
1150         struct kretprobe_instance *ri;
1151         struct kretprobe *rp2 = NULL;
1152         /*struct mm_struct *mm;
1153            struct page *pages[2] = {0, 0};
1154            struct vm_area_struct *vmas[2] = {0, 0};
1155            unsigned long *kaddrs[2] = {0, 0}; */
1156
1157 #ifdef _DEBUG
1158         gSilent = 0;
1159 #endif
1160 #if 0
1161         mm = atomic ? task->active_mm : get_task_mm (task);
1162         if (!mm)
1163         {
1164                 DBPRINTF ("task %u has no mm!", task->pid);
1165 #ifdef _DEBUG
1166                 gSilent = 1;
1167 #endif
1168                 return;
1169         }
1170         down_read (&mm->mmap_sem);
1171         ret = get_user_pages_uprobe (task, mm, (unsigned long) rp->kp.addr, 1, 1, 1, &pages[0], &vmas[0]);
1172
1173         if (ret <= 0)
1174         {
1175                 DBPRINTF ("get_user_pages for %p failed!", rp->kp.addr);
1176                 goto out;
1177         }
1178         if (atomic)
1179                 kaddrs[0] = kmap_atomic (pages[0], KM_USER0) + ((unsigned long) rp->kp.addr & ~PAGE_MASK);
1180         else
1181                 kaddrs[0] = kmap (pages[0]) + ((unsigned long) rp->kp.addr & ~PAGE_MASK);
1182         if ((((unsigned long) (rp->kp.addr + 1)) & ~PAGE_MASK) == 0)
1183         {
1184           
1185           ret = get_user_pages_uprobe (task, mm, (unsigned long) (rp->kp.addr + 1), 1, 1, 1, &pages[1], &vmas[1]);
1186                 if (ret <= 0)
1187                 {
1188                         DBPRINTF ("get_user_pages for %p failed!", rp->kp.addr + 1);
1189                         goto out;
1190                 }
1191                 if (atomic)
1192                         kaddrs[1] = kmap_atomic (pages[1], KM_USER1) + ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK);
1193                 else
1194                         kaddrs[1] = kmap (pages[1]) + ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK);
1195         }
1196         else
1197         {
1198                 vmas[1] = vmas[0];
1199                 pages[1] = pages[0];
1200                 kaddrs[1] = kaddrs[0] + 1;
1201         }
1202
1203         /* No race here */
1204         DBPRINTF ("unregister_uretprobe1 addr %p [%lx %lx]", rp->kp.addr, *kaddrs[0], *kaddrs[1]);
1205 #endif
1206         spin_lock_irqsave (&kretprobe_lock, flags);
1207         if (hlist_empty (&rp->used_instances))
1208         {
1209                 // if there are no used retprobe instances (i.e. function is not entered) - disarm retprobe
1210                 arch_disarm_uretprobe (rp, task);//vmas[1], pages[1], kaddrs[1]);
1211 #if 0
1212                 if (atomic)
1213                         set_page_dirty (pages[1]);
1214                 else
1215                         set_page_dirty_lock (pages[1]);
1216 #endif
1217         }
1218         else
1219         {
1220                 rp2 = clone_kretprobe (rp);
1221                 if (!rp2)
1222                         DBPRINTF ("unregister_uretprobe addr %p: failed to clone retprobe!", rp->kp.addr);
1223                 else
1224                 {
1225                         DBPRINTF ("initiating deferred retprobe deletion addr %p", rp->kp.addr);
1226                         printk ("initiating deferred retprobe deletion addr %p\n", rp->kp.addr);
1227                         rp2->disarm = 1;
1228                 }
1229         }
1230
1231         while ((ri = get_used_rp_inst (rp)) != NULL)
1232         {
1233                 ri->rp = NULL;
1234                 ri->rp2 = rp2;
1235                 hlist_del (&ri->uflist);
1236         }
1237         spin_unlock_irqrestore (&kretprobe_lock, flags);
1238         free_rp_inst (rp);
1239
1240         unregister_uprobe (&rp->kp, task, atomic);
1241         //DBPRINTF("unregister_uretprobe3 addr %p [%lx %lx]", 
1242         //              rp->kp.addr, *kaddrs[0], *kaddrs[1]);
1243 #if 0
1244       out:
1245         if (pages[0])
1246         {
1247                 if (kaddrs[0])
1248                 {
1249                         if (atomic)
1250                                 kunmap_atomic (kaddrs[0] - ((unsigned long) rp->kp.addr & ~PAGE_MASK), KM_USER0);
1251                         else
1252                                 kunmap (pages[0]);
1253                 }
1254                 page_cache_release (pages[0]);
1255         }
1256         if (pages[1] && (pages[0] != pages[1]))
1257         {
1258                 if (kaddrs[1])
1259                 {
1260                         if (atomic)
1261                                 kunmap_atomic (kaddrs[1] - ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK), KM_USER1);
1262                         else
1263                                 kunmap (pages[1]);
1264                 }
1265                 page_cache_release (pages[1]);
1266         }
1267         if (!atomic)
1268         {
1269                 up_read (&mm->mmap_sem);
1270                 mmput (mm);
1271         }
1272 #endif
1273 #ifdef _DEBUG
1274         gSilent = 1;
1275 #endif
1276 }
1277
1278 void __kprobes
1279 unregister_all_uprobes (struct task_struct *task, int atomic)
1280 {
1281         struct hlist_head *head;
1282         struct hlist_node *node, *tnode;
1283         struct kprobe *p;
1284         int i;
1285
1286         for(i = 0; i < KPROBE_TABLE_SIZE; i++){
1287                 head = &kprobe_table[i];
1288                 hlist_for_each_entry_safe (p, node, tnode, head, hlist){                        
1289                         if(p->tgid == task->tgid){
1290                                 printk("unregister_all_uprobes: delete uprobe at %pf for %s/%d\n", p->addr, task->comm, task->pid);
1291                                 unregister_uprobe (p, task, atomic);
1292                         }
1293                 }
1294         }
1295         purge_garbage_uslots(task, atomic);
1296 }
1297
1298
1299 #define GUP_FLAGS_WRITE                  0x1
1300 #define GUP_FLAGS_FORCE                  0x2
1301 #define GUP_FLAGS_IGNORE_VMA_PERMISSIONS 0x4
1302 #define GUP_FLAGS_IGNORE_SIGKILL         0x8
1303
1304 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)
1305 static inline int use_zero_page(struct vm_area_struct *vma)
1306 {
1307         /*
1308          * We don't want to optimize FOLL_ANON for make_pages_present()
1309          * when it tries to page in a VM_LOCKED region. As to VM_SHARED,
1310          * we want to get the page from the page tables to make sure
1311          * that we serialize and update with any other user of that
1312          * mapping.
1313          */
1314         if (vma->vm_flags & (VM_LOCKED | VM_SHARED))
1315                 return 0;
1316         /*
1317          * And if we have a fault routine, it's not an anonymous region.
1318          */
1319         return !vma->vm_ops || !vma->vm_ops->fault;
1320 }
1321 #endif
1322
1323 int __get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
1324                      unsigned long start, int len, int flags,
1325                 struct page **pages, struct vm_area_struct **vmas)
1326 {
1327         int i;
1328         unsigned int vm_flags = 0;
1329         int write = !!(flags & GUP_FLAGS_WRITE);
1330         int force = !!(flags & GUP_FLAGS_FORCE);
1331         int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
1332         int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL);
1333
1334         if (len <= 0)
1335                 return 0;
1336         /* 
1337          * Require read or write permissions.
1338          * If 'force' is set, we only require the "MAY" flags.
1339          */
1340         vm_flags  = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
1341         vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
1342         i = 0;
1343
1344         do {
1345                 struct vm_area_struct *vma;
1346                 unsigned int foll_flags;
1347
1348                 //vma = find_extend_vma(mm, start);
1349                 vma = find_vma(mm, start);
1350                 if (!vma && in_gate_area(tsk, start)) {
1351                         unsigned long pg = start & PAGE_MASK;
1352                         struct vm_area_struct *gate_vma = get_gate_vma(tsk);
1353                         pgd_t *pgd;
1354                         pud_t *pud;
1355                         pmd_t *pmd;
1356                         pte_t *pte;
1357
1358                         /* user gate pages are read-only */
1359                         if (!ignore && write)
1360                                 return i ? : -EFAULT;
1361                         if (pg > TASK_SIZE)
1362                                 pgd = pgd_offset_k(pg);
1363                         else
1364                                 pgd = pgd_offset_gate(mm, pg);
1365                         BUG_ON(pgd_none(*pgd));
1366                         pud = pud_offset(pgd, pg);
1367                         BUG_ON(pud_none(*pud));
1368                         pmd = pmd_offset(pud, pg);
1369                         if (pmd_none(*pmd))
1370                                 return i ? : -EFAULT;
1371                         pte = pte_offset_map(pmd, pg);
1372                         if (pte_none(*pte)) {
1373                                 pte_unmap(pte);
1374                                 return i ? : -EFAULT;
1375                         }
1376                         if (pages) {
1377                                 struct page *page = vm_normal_page(gate_vma, start, *pte);
1378                                 pages[i] = page;
1379                                 if (page)
1380                                         get_page(page);
1381                         }
1382                         pte_unmap(pte);
1383                         if (vmas)
1384                                 vmas[i] = gate_vma;
1385                         i++;
1386                         start += PAGE_SIZE;
1387                         len--;
1388                         continue;
1389                 }
1390
1391                 if (!vma ||
1392                     (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1393                     (!ignore && !(vm_flags & vma->vm_flags)))
1394                         return i ? : -EFAULT;
1395
1396                 if (is_vm_hugetlb_page(vma)) {
1397 #if  LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1398                         i = follow_hugetlb_page(mm, vma, pages, vmas,
1399                                                 &start, &len, i);
1400 #else
1401                         i = follow_hugetlb_page(mm, vma, pages, vmas,
1402                                                 &start, &len, i, write);
1403 #endif
1404                         continue;
1405                 }
1406
1407                 foll_flags = FOLL_TOUCH;
1408                 if (pages)
1409                         foll_flags |= FOLL_GET;
1410
1411 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)
1412 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,30)
1413                 if (!write && use_zero_page(vma))
1414                   foll_flags |= FOLL_ANON;
1415 #endif
1416 #endif
1417
1418                 do {
1419                         struct page *page;
1420
1421 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)
1422                         /*
1423                          * If we have a pending SIGKILL, don't keep faulting
1424                          * pages and potentially allocating memory, unless
1425                          * current is handling munlock--e.g., on exit. In
1426                          * that case, we are not allocating memory.  Rather,
1427                          * we're only unlocking already resident/mapped pages.
1428                          */
1429                         if (unlikely(!ignore_sigkill &&
1430                                         fatal_signal_pending(current)))
1431                                 return i ? i : -ERESTARTSYS;
1432 #endif
1433
1434                         if (write)
1435                                 foll_flags |= FOLL_WRITE;
1436
1437                         
1438                         //cond_resched();
1439
1440                         DBPRINTF ("pages = %p vma = %p\n", pages, vma);
1441                         while (!(page = follow_page(vma, start, foll_flags))) {
1442                                 int ret;
1443                                 ret = handle_mm_fault(mm, vma, start,
1444                                                 foll_flags & FOLL_WRITE);
1445
1446 #if  LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1447                                 if (ret & VM_FAULT_WRITE)
1448                                   foll_flags &= ~FOLL_WRITE;
1449                                 
1450                                 switch (ret & ~VM_FAULT_WRITE) {
1451                                 case VM_FAULT_MINOR:
1452                                   tsk->min_flt++;
1453                                   break;
1454                                 case VM_FAULT_MAJOR:
1455                                   tsk->maj_flt++;
1456                                   break;
1457                                 case VM_FAULT_SIGBUS:
1458                                   return i ? i : -EFAULT;
1459                                 case VM_FAULT_OOM:
1460                                   return i ? i : -ENOMEM;
1461                                 default:
1462                                   BUG();
1463                                 }
1464                                 
1465 #else
1466                                 if (ret & VM_FAULT_ERROR) {
1467                                   if (ret & VM_FAULT_OOM)
1468                                     return i ? i : -ENOMEM;
1469                                   else if (ret & VM_FAULT_SIGBUS)
1470                                     return i ? i : -EFAULT;
1471                                   BUG();
1472                                 }
1473                                 if (ret & VM_FAULT_MAJOR)
1474                                   tsk->maj_flt++;
1475                                 else
1476                                   tsk->min_flt++;
1477                                 
1478                                 /*
1479                                  * The VM_FAULT_WRITE bit tells us that
1480                                  * do_wp_page has broken COW when necessary,
1481                                  * even if maybe_mkwrite decided not to set
1482                                  * pte_write. We can thus safely do subsequent
1483                                  * page lookups as if they were reads. But only
1484                                  * do so when looping for pte_write is futile:
1485                                  * in some cases userspace may also be wanting
1486                                  * to write to the gotten user page, which a
1487                                  * read fault here might prevent (a readonly
1488                                  * page might get reCOWed by userspace write).
1489                                  */
1490                                 if ((ret & VM_FAULT_WRITE) &&
1491                                     !(vma->vm_flags & VM_WRITE))
1492                                   foll_flags &= ~FOLL_WRITE;
1493                                 
1494                                 //cond_resched();
1495 #endif
1496                                 
1497                         }
1498
1499                         if (IS_ERR(page))
1500                                 return i ? i : PTR_ERR(page);
1501                         if (pages) {
1502                                 pages[i] = page;
1503
1504 #if  LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1505                                 flush_anon_page(page, start);
1506 #else
1507                                 flush_anon_page(vma, page, start);
1508 #endif
1509                                 flush_dcache_page(page);
1510                         }
1511                         if (vmas)
1512                                 vmas[i] = vma;
1513                         i++;
1514                         start += PAGE_SIZE;
1515                         len--;
1516                 } while (len && start < vma->vm_end);
1517         } while (len);
1518         return i;
1519 }
1520
1521 int get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
1522                 unsigned long start, int len, int write, int force,
1523                 struct page **pages, struct vm_area_struct **vmas)
1524 {
1525         int flags = 0;
1526
1527         if (write)
1528                 flags |= GUP_FLAGS_WRITE;
1529         if (force)
1530                 flags |= GUP_FLAGS_FORCE;
1531
1532         return __get_user_pages_uprobe(tsk, mm,
1533                                 start, len, flags,
1534                                 pages, vmas);
1535 }
1536
1537 int
1538 access_process_vm_atomic (struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
1539 {
1540
1541         
1542         struct mm_struct *mm;
1543         struct vm_area_struct *vma;
1544         void *old_buf = buf;
1545
1546         mm = get_task_mm(tsk);
1547         if (!mm)
1548                 return 0;
1549
1550         down_read(&mm->mmap_sem);
1551         /* ignore errors, just check how much was successfully transferred */
1552         while (len) {
1553                 int bytes, ret, offset;
1554                 void *maddr;
1555                 struct page *page = NULL;
1556
1557                 ret = get_user_pages_uprobe(tsk, mm, addr, 1,
1558                                 write, 1, &page, &vma);
1559                 if (ret <= 0) {
1560                         /*
1561                          * Check if this is a VM_IO | VM_PFNMAP VMA, which
1562                          * we can access using slightly different code.
1563                          */
1564 #ifdef CONFIG_HAVE_IOREMAP_PROT
1565                         vma = find_vma(mm, addr);
1566                         if (!vma)
1567                                 break;
1568                         if (vma->vm_ops && vma->vm_ops->access)
1569                                 ret = vma->vm_ops->access(vma, addr, buf,
1570                                                           len, write);
1571                         if (ret <= 0)
1572 #endif
1573                                 break;
1574                         bytes = ret;
1575                 } else {
1576                         bytes = len;
1577                         offset = addr & (PAGE_SIZE-1);
1578                         if (bytes > PAGE_SIZE-offset)
1579                                 bytes = PAGE_SIZE-offset;
1580
1581                         maddr = kmap(page);
1582                         if (write) {
1583                                 copy_to_user_page(vma, page, addr,
1584                                                   maddr + offset, buf, bytes);
1585                                 set_page_dirty_lock(page);
1586                         } else {
1587                                 copy_from_user_page(vma, page, addr,
1588                                                     buf, maddr + offset, bytes);
1589                         }
1590                         kunmap(page);
1591                         page_cache_release(page);
1592                 }
1593                 len -= bytes;
1594                 buf += bytes;
1595                 addr += bytes;
1596         }
1597         up_read(&mm->mmap_sem);
1598         mmput(mm);
1599
1600         return buf - old_buf;
1601
1602 }
1603
1604 #ifdef CONFIG_DEBUG_FS
1605 const char *(*__real_kallsyms_lookup) (unsigned long addr, unsigned long *symbolsize, unsigned long *offset, char **modname, char *namebuf);
1606 const char *
1607 kallsyms_lookup (unsigned long addr, unsigned long *symbolsize, unsigned long *offset, char **modname, char *namebuf)
1608 {
1609         return __real_kallsyms_lookup (addr, symbolsize, offset, modname, namebuf);
1610 }
1611
1612 static void __kprobes
1613 report_probe (struct seq_file *pi, struct kprobe *p, const char *sym, int offset, char *modname)
1614 {
1615         char *kprobe_type;
1616
1617         if (p->pre_handler == pre_handler_kretprobe)
1618                 if (p->tgid)
1619                         kprobe_type = "ur";
1620                 else
1621                         kprobe_type = "r";
1622         else if (p->pre_handler == setjmp_pre_handler)
1623                 if (p->tgid)
1624                         kprobe_type = "uj";
1625                 else
1626                         kprobe_type = "j";
1627         else if (p->tgid)
1628                 kprobe_type = "u";
1629         else
1630                 kprobe_type = "k";
1631         if (sym)
1632                 seq_printf (pi, "%p  %s  %s+0x%x  %s\n", p->addr, kprobe_type, sym, offset, (modname ? modname : " "));
1633         else
1634                 seq_printf (pi, "%p  %s  %p\n", p->addr, kprobe_type, p->addr);
1635 }
1636
1637 static void __kprobes *
1638 kprobe_seq_start (struct seq_file *f, loff_t * pos)
1639 {
1640         return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
1641 }
1642
1643 static void __kprobes *
1644 kprobe_seq_next (struct seq_file *f, void *v, loff_t * pos)
1645 {
1646         (*pos)++;
1647         if (*pos >= KPROBE_TABLE_SIZE)
1648                 return NULL;
1649         return pos;
1650 }
1651
1652 static void __kprobes
1653 kprobe_seq_stop (struct seq_file *f, void *v)
1654 {
1655         /* Nothing to do */
1656 }
1657
1658 struct us_proc_ip
1659 {
1660         char *name;
1661         int installed;
1662         struct jprobe jprobe;
1663         struct kretprobe retprobe;
1664         unsigned long offset;
1665 };
1666
1667 static int __kprobes
1668 show_kprobe_addr (struct seq_file *pi, void *v)
1669 {
1670         struct hlist_head *head;
1671         struct hlist_node *node;
1672         struct kprobe *p, *kp;
1673         const char *sym = NULL;
1674         unsigned int i = *(loff_t *) v;
1675         unsigned long size, offset = 0;
1676         char *modname, namebuf[128];
1677
1678         head = &kprobe_table[i];
1679         preempt_disable ();
1680         hlist_for_each_entry_rcu (p, node, head, hlist)
1681         {
1682                 /*if(p->pid){
1683                    struct us_proc_ip *up = NULL;
1684                    if (p->pre_handler == pre_handler_kretprobe){
1685                    struct kretprobe *rp = container_of(p, struct kretprobe, kp);
1686                    up = container_of(rp, struct us_proc_ip, retprobe);
1687                    }
1688                    else {//if (p->pre_handler == setjmp_pre_handler){
1689                    struct jprobe *jp = container_of(p, struct jprobe, kp);
1690                    up = container_of(jp, struct us_proc_ip, jprobe);
1691                    }
1692                    if(up){
1693                    sym = up->name;
1694                    printk("show_kprobe_addr: %s\n", sym);
1695                    }
1696                    }
1697                    else */
1698                 sym = kallsyms_lookup ((unsigned long) p->addr, &size, &offset, &modname, namebuf);
1699                 if (p->pre_handler == aggr_pre_handler)
1700                 {
1701                         list_for_each_entry_rcu (kp, &p->list, list) report_probe (pi, kp, sym, offset, modname);
1702                 }
1703                 else
1704                         report_probe (pi, p, sym, offset, modname);
1705         }
1706         //seq_printf (pi, "handled exceptions %lu\n", handled_exceptions);
1707         preempt_enable ();
1708         return 0;
1709 }
1710
1711 static struct seq_operations kprobes_seq_ops = {
1712         .start = kprobe_seq_start,
1713         .next = kprobe_seq_next,
1714         .stop = kprobe_seq_stop,
1715         .show = show_kprobe_addr
1716 };
1717
1718 static int __kprobes
1719 kprobes_open (struct inode *inode, struct file *filp)
1720 {
1721         return seq_open (filp, &kprobes_seq_ops);
1722 }
1723
1724 static struct file_operations debugfs_kprobes_operations = {
1725         .open = kprobes_open,
1726         .read = seq_read,
1727         .llseek = seq_lseek,
1728         .release = seq_release,
1729 };
1730
1731 #ifdef KPROBES_PROFILE
1732 extern unsigned long nCount;
1733 extern struct timeval probe_enter_diff_sum;
1734 static void __kprobes *
1735 kprobe_prof_seq_start (struct seq_file *f, loff_t * pos)
1736 {
1737         return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
1738 }
1739
1740 static void __kprobes *
1741 kprobe_prof_seq_next (struct seq_file *f, void *v, loff_t * pos)
1742 {
1743         (*pos)++;
1744         if (*pos >= KPROBE_TABLE_SIZE)
1745                 return NULL;
1746         return pos;
1747 }
1748
1749 static void __kprobes
1750 kprobe_prof_seq_stop (struct seq_file *f, void *v)
1751 {
1752 }
1753
1754 static void __kprobes
1755 report_probe_prof (struct seq_file *pi, struct kprobe *p, const char *sym, int offset, char *modname)
1756 {
1757         char *kprobe_type;
1758
1759         if (p->pre_handler == pre_handler_kretprobe)
1760                 if (p->pid)
1761                         kprobe_type = "ur";
1762                 else
1763                         kprobe_type = "r";
1764         else if (p->pre_handler == setjmp_pre_handler)
1765                 if (p->pid)
1766                         kprobe_type = "uj";
1767                 else
1768                         kprobe_type = "j";
1769         else if (p->pid)
1770                 kprobe_type = "u";
1771         else
1772                 kprobe_type = "k";
1773
1774         if (sym)
1775                 seq_printf (pi, "%p  %s  %s+0x%x  %s %lu.%06ld\n", p->addr, kprobe_type,
1776                             sym, offset, (modname ? modname : " "), p->count ? p->hnd_tm_sum.tv_sec / p->count : 0, p->count ? p->hnd_tm_sum.tv_usec / p->count : 0);
1777         else
1778
1779                 seq_printf (pi, "%p  %s  %p %lu.%06ld\n", p->addr, kprobe_type, p->addr, p->count ? p->hnd_tm_sum.tv_sec / p->count : 0, p->count ? p->hnd_tm_sum.tv_usec / p->count : 0);
1780 }
1781
1782 static int __kprobes
1783 show_kprobe_prof (struct seq_file *pi, void *v)
1784 {
1785         struct hlist_head *head;
1786         struct hlist_node *node;
1787         struct kprobe *p;       //, *kp;
1788         const char *sym = NULL;
1789         unsigned int i = *(loff_t *) v;
1790         unsigned long size, offset = 0;
1791         char *modname, namebuf[128];
1792         static struct timeval utv, ktv;
1793         static unsigned long ucount, kcount;
1794
1795         head = &kprobe_table[i];
1796         preempt_disable ();
1797         hlist_for_each_entry_rcu (p, node, head, hlist)
1798         {
1799                 sym = kallsyms_lookup ((unsigned long) p->addr, &size, &offset, &modname, namebuf);
1800                 /*if (p->pre_handler == aggr_pre_handler) {
1801                    list_for_each_entry_rcu(kp, &p->list, list)
1802                    report_probe_prof(pi, kp, sym, offset, modname);
1803                    } else */
1804                 report_probe_prof (pi, p, sym, offset, modname);
1805                 if (p->count)
1806                 {
1807                         if (p->pid)
1808                         {
1809                                 set_normalized_timeval (&utv, utv.tv_sec + p->hnd_tm_sum.tv_sec, utv.tv_usec + p->hnd_tm_sum.tv_usec);
1810                                 ucount += p->count;
1811                         }
1812                         else
1813                         {
1814                                 //seq_printf(pi, "kernel probe handling %lu %lu.%06ld\n", 
1815                                 //              p->count, p->hnd_tm_sum.tv_sec, p->hnd_tm_sum.tv_usec); 
1816                                 //seq_printf(pi, "kernel probe handling2 %lu %lu.%06ld\n", 
1817                                 //              kcount, ktv.tv_sec, ktv.tv_usec);       
1818                                 set_normalized_timeval (&ktv, ktv.tv_sec + p->hnd_tm_sum.tv_sec, ktv.tv_usec + p->hnd_tm_sum.tv_usec);
1819                                 kcount += p->count;
1820                                 //seq_printf(pi, "kernel probe handling3 %lu %lu.%06ld\n", 
1821                                 //              kcount, ktv.tv_sec, ktv.tv_usec);       
1822                         }
1823                 }
1824         }
1825         if (i == (KPROBE_TABLE_SIZE - 1))
1826         {
1827                 seq_printf (pi, "Average kernel probe handling %lu.%06ld\n", kcount ? ktv.tv_sec / kcount : 0, kcount ? ktv.tv_usec / kcount : 0);
1828                 seq_printf (pi, "Average user probe handling %lu.%06ld\n", ucount ? utv.tv_sec / ucount : 0, ucount ? utv.tv_usec / ucount : 0);
1829                 seq_printf (pi, "Average probe period %lu.%06ld\n", nCount ? probe_enter_diff_sum.tv_sec / nCount : 0, nCount ? probe_enter_diff_sum.tv_usec / nCount : 0);
1830                 utv.tv_sec = utv.tv_usec = ktv.tv_sec = ktv.tv_usec = 0;
1831                 ucount = kcount = 0;
1832         }
1833         preempt_enable ();
1834         return 0;
1835 }
1836
1837 static struct seq_operations kprobes_prof_seq_ops = {
1838         .start = kprobe_prof_seq_start,
1839         .next = kprobe_prof_seq_next,
1840         .stop = kprobe_prof_seq_stop,
1841         .show = show_kprobe_prof
1842 };
1843
1844 static int __kprobes
1845 kprobes_prof_open (struct inode *inode, struct file *filp)
1846 {
1847         return seq_open (filp, &kprobes_prof_seq_ops);
1848 }
1849
1850 static struct file_operations debugfs_kprobes_prof_operations = {
1851         .open = kprobes_prof_open,
1852         .read = seq_read,
1853         .llseek = seq_lseek,
1854         .release = seq_release,
1855 };
1856 #endif
1857
1858 int __kprobes debugfs_kprobe_init (void);
1859 static struct dentry *dbg_dir, *dbg_file;
1860 #ifdef KPROBES_PROFILE
1861 static struct dentry *dbg_file_prof;
1862 #endif
1863
1864 int __kprobes
1865 debugfs_kprobe_init (void)
1866 {
1867         //struct dentry *dir, *file;
1868
1869         dbg_dir = debugfs_create_dir ("kprobes", NULL);
1870         if (!dbg_dir)
1871                 return -ENOMEM;
1872
1873         dbg_file = debugfs_create_file ("list", 0444, dbg_dir, 0, &debugfs_kprobes_operations);
1874         if (!dbg_file)
1875         {
1876                 debugfs_remove (dbg_dir);
1877                 dbg_dir = NULL;
1878                 return -ENOMEM;
1879         }
1880
1881 #ifdef KPROBES_PROFILE
1882         dbg_file_prof = debugfs_create_file ("prof", 0444, dbg_dir, 0, &debugfs_kprobes_prof_operations);
1883         if (!dbg_file_prof)
1884         {
1885                 debugfs_remove (dbg_file);
1886                 debugfs_remove (dbg_dir);
1887                 dbg_dir = NULL;
1888                 return -ENOMEM;
1889         }
1890 #endif
1891         return 0;
1892 }
1893
1894 //late_initcall(debugfs_kprobe_init);
1895 extern unsigned long (*kallsyms_search) (const char *name);
1896 #endif /* CONFIG_DEBUG_FS */
1897
1898 #if defined(CONFIG_X86)
1899 static struct notifier_block kprobe_exceptions_nb = {
1900         .notifier_call = kprobe_exceptions_notify,
1901         .priority = INT_MAX
1902 };
1903 #endif
1904
1905 static int __init
1906 init_kprobes (void)
1907 {
1908         int i, err = 0;
1909
1910         /* FIXME allocate the probe table, currently defined statically */
1911         /* initialize all list heads */
1912         for (i = 0; i < KPROBE_TABLE_SIZE; i++)
1913         {
1914                 INIT_HLIST_HEAD (&kprobe_table[i]);
1915                 INIT_HLIST_HEAD (&kretprobe_inst_table[i]);
1916                 INIT_HLIST_HEAD (&uprobe_insn_slot_table[i]);
1917         }
1918         atomic_set (&kprobe_count, 0);
1919
1920         err = arch_init_kprobes ();
1921
1922         DBPRINTF ("init_kprobes: arch_init_kprobes - %d", err);
1923 #if defined(CONFIG_X86)
1924         if (!err)
1925                 err = register_die_notifier (&kprobe_exceptions_nb);
1926         DBPRINTF ("init_kprobes: register_die_notifier - %d", err);
1927 #endif // CONFIG_X86
1928
1929 #ifdef CONFIG_DEBUG_FS
1930         if (!err)
1931         {
1932                 __real_kallsyms_lookup = (void *) kallsyms_search ("kallsyms_lookup");
1933                 if (!__real_kallsyms_lookup)
1934                 {
1935                         DBPRINTF ("kallsyms_lookup is not found! Oops. Where is the kernel?");
1936                         return -ESRCH;
1937                 }
1938                 err = debugfs_kprobe_init ();
1939                 DBPRINTF ("init_kprobes: debugfs_kprobe_init - %d", err);
1940         }
1941 #endif /* CONFIG_DEBUG_FS */
1942
1943         return err;
1944 }
1945
1946 static void __exit
1947 exit_kprobes (void)
1948 {
1949 #ifdef CONFIG_DEBUG_FS
1950 #ifdef KPROBES_PROFILE
1951         if (dbg_file_prof)
1952                 debugfs_remove (dbg_file_prof);
1953 #endif
1954         if (dbg_file)
1955                 debugfs_remove (dbg_file);
1956         if (dbg_dir)
1957                 debugfs_remove (dbg_dir);
1958 #endif /* CONFIG_DEBUG_FS */
1959
1960 #if defined(CONFIG_X86)
1961         unregister_die_notifier (&kprobe_exceptions_nb);
1962 #endif // CONFIG_X86
1963         arch_exit_kprobes ();
1964 }
1965
1966 module_init (init_kprobes);
1967 module_exit (exit_kprobes);
1968
1969 EXPORT_SYMBOL_GPL (register_kprobe);
1970 EXPORT_SYMBOL_GPL (unregister_kprobe);
1971 EXPORT_SYMBOL_GPL (register_jprobe);
1972 EXPORT_SYMBOL_GPL (unregister_jprobe);
1973 EXPORT_SYMBOL_GPL (register_ujprobe);
1974 EXPORT_SYMBOL_GPL (unregister_ujprobe);
1975 EXPORT_SYMBOL_GPL (jprobe_return);
1976 EXPORT_SYMBOL_GPL (uprobe_return);
1977 EXPORT_SYMBOL_GPL (register_kretprobe);
1978 EXPORT_SYMBOL_GPL (unregister_kretprobe);
1979 EXPORT_SYMBOL_GPL (register_uretprobe);
1980 EXPORT_SYMBOL_GPL (unregister_uretprobe);
1981 EXPORT_SYMBOL_GPL (unregister_all_uprobes);
1982 EXPORT_SYMBOL_GPL (access_process_vm_atomic);
1983 #if LINUX_VERSION_CODE != KERNEL_VERSION(2,6,23)
1984 EXPORT_SYMBOL_GPL (access_process_vm);
1985 #endif
1986 #ifdef KERNEL_HAS_ISPAGEPRESENT
1987 EXPORT_SYMBOL_GPL (is_page_present);
1988 #else
1989 EXPORT_SYMBOL_GPL (page_present);
1990 #endif
1991