Fix reschedule during get_user_pages() kernel function execution (dump for Tegra250...
[kernel/swap-modules.git] / kprobe / kprobes.c
1 // src_kprobes.c
2
3
4 #include <linux/version.h>
5 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
6 #include <linux/config.h>
7 #endif
8
9 #include <asm/types.h>
10
11 #include <linux/hash.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/moduleloader.h>
16 #include <linux/kallsyms.h>
17 //#include <linux/freezer.h>
18 #include <linux/seq_file.h>
19 #ifdef CONFIG_DEBUG_FS
20 #include <linux/debugfs.h>
21 #endif
22 #include <asm-generic/sections.h>
23 #include <asm/cacheflush.h>
24 #include <asm/errno.h>
25 #include <linux/spinlock.h>
26 #include <linux/version.h>
27 #include <linux/highmem.h>      // kmap_atomic, kunmap_atomic, copy_from_user_page, copy_to_user_page
28 #include <linux/pagemap.h>      // page_cache_release
29 #include <linux/vmalloc.h>      // vmalloc, vfree
30 #if defined(CONFIG_X86)
31 #include <linux/kdebug.h>       // register_die_notifier, unregister_die_notifier
32 #endif
33 #include <linux/hugetlb.h>      // follow_hugetlb_page, is_vm_hugetlb_page
34
35 #include "kprobes.h"
36
37 //#define arch_remove_kprobe(p) do { } while (0)
38
39 #ifdef _DEBUG
40 extern int nCount;
41 #endif
42
43 /*
44 static spinlock_t die_notifier_lock = SPIN_LOCK_UNLOCKED;
45
46 int src_register_die_notifier(struct notifier_block *nb)
47 {
48         int err = 0;
49         unsigned long flags;
50
51         spin_lock_irqsave(&die_notifier_lock, flags);
52 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
53         err = atomic_notifier_chain_register(&panic_notifier_list, nb);
54 #else
55         err = notifier_chain_register(&panic_notifier_list, nb);
56 #endif
57         spin_unlock_irqrestore(&die_notifier_lock, flags);
58
59         return err;
60 }
61 */
62
63 int get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
64                           unsigned long start, int len, int write, int force,
65                           struct page **pages, struct vm_area_struct **vmas);
66 /**
67  * hlist_replace_rcu - replace old entry by new one
68  * @old : the element to be replaced
69  * @new : the new element to insert
70  *
71  * The @old entry will be replaced with the @new entry atomically.
72  */
73 static inline void
74 src_hlist_replace_rcu (struct hlist_node *old, struct hlist_node *new)
75 {
76         struct hlist_node *next = old->next;
77
78         new->next = next;
79         new->pprev = old->pprev;
80         smp_wmb ();
81         if (next)
82                 new->next->pprev = &new->next;
83         if (new->pprev)
84                 *new->pprev = new;
85         old->pprev = LIST_POISON2;
86 }
87
88 #define KPROBE_HASH_BITS 6
89 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
90
91
92 /*
93  * Some oddball architectures like 64bit powerpc have function descriptors
94  * so this must be overridable.
95  */
96 #ifndef kprobe_lookup_name
97 #define kprobe_lookup_name(name, addr) \
98         addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
99 #endif
100
101 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
102 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
103 static struct hlist_head uprobe_insn_slot_table[KPROBE_TABLE_SIZE];
104 static atomic_t kprobe_count;
105
106 //DEFINE_MUTEX(kprobe_mutex);           /* Protects kprobe_table */
107 DEFINE_SPINLOCK (kretprobe_lock);       /* Protects kretprobe_inst_table */
108 static DEFINE_PER_CPU (struct kprobe *, kprobe_instance) = NULL;
109 unsigned long handled_exceptions;
110
111 /* We have preemption disabled.. so it is safe to use __ versions */
112 static inline void
113 set_kprobe_instance (struct kprobe *kp)
114 {
115         __get_cpu_var (kprobe_instance) = kp;
116 }
117
118 static inline void
119 reset_kprobe_instance (void)
120 {
121         __get_cpu_var (kprobe_instance) = NULL;
122 }
123
124 /*
125  * This routine is called either:
126  *      - under the kprobe_mutex - during kprobe_[un]register()
127  *                              OR
128  *      - with preemption disabled - from arch/xxx/kernel/kprobes.c
129  */
130 struct kprobe __kprobes *
131 get_kprobe (void *addr, int tgid, struct task_struct *ctask)
132 {
133         struct hlist_head *head;
134         struct hlist_node *node;
135         struct kprobe *p, *retVal = NULL;
136         int ret = 0, uprobe_found;
137         struct page *page = 0, *tpage = 0;
138         struct vm_area_struct *vma = 0;
139         struct task_struct *task = 0;
140         void *paddr = 0;
141
142
143         if (ctask && ctask->active_mm)
144         {
145                 ret = get_user_pages_uprobe (ctask, ctask->active_mm, (unsigned long) addr, 1, 0, 0, &tpage, NULL);
146                 if (ret <= 0)
147                         DBPRINTF ("get_user_pages for task %d at %p failed!", current->pid, addr);
148                 else
149                 {
150                         paddr = page_address (tpage);
151                         page_cache_release (tpage);
152                 }
153         }
154         //else
155         //      DBPRINTF("task %d has no mm!", ctask->pid);
156
157         //TODO: test - two processes invokes instrumented function
158         head = &kprobe_table[hash_ptr (addr, KPROBE_HASH_BITS)];
159         hlist_for_each_entry_rcu (p, node, head, hlist)
160         {
161                 //if looking for kernel probe and this is kernel probe with the same addr OR
162                 //if looking for the user space probe and this is user space probe probe with the same addr and pid
163                 DBPRINTF ("get_kprobe[%d]: check probe at %p/%p, task %d/%d", nCount, addr, p->addr, tgid, p->tgid);
164                 if (p->addr == addr)
165                 {
166                         uprobe_found = 0;
167                         if (tgid == p->tgid)
168                                 uprobe_found = 1;
169                         if (!tgid || uprobe_found)
170                         {
171                                 retVal = p;
172                                 if (tgid)
173                                         DBPRINTF ("get_kprobe[%d]: found user space probe at %p for task %d", nCount, p->addr, p->tgid);
174                                 else
175                                         DBPRINTF ("get_kprobe[%d]: found kernel probe at %p", nCount, p->addr);
176                                 break;
177                         }
178                 }
179                 else if (tgid != p->tgid)
180                 {
181                         // if looking for the user space probe and this is user space probe 
182                         // with another addr and pid but with the same offset whithin the page
183                         // it could be that it is the same probe (with address from other user space)
184                         // we should handle it as usual probe but without notification to user 
185                         if (paddr && tgid && (((unsigned long) addr & ~PAGE_MASK) == ((unsigned long) p->addr & ~PAGE_MASK))
186                             && p->tgid)
187                         {
188                                 DBPRINTF ("get_kprobe[%d]: found user space probe at %p in task %d. possibly for addr %p in task %d", nCount, p->addr, p->tgid, addr, tgid);
189                                 // this probe has the same offset in the page
190                                 // look in the probes for the other pids                                
191                                 // get page for user space probe addr
192                                 rcu_read_lock ();
193 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
194                                 task = find_task_by_pid (p->tgid);
195 #else //lif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
196                                 task = pid_task(find_pid_ns(p->tgid, &init_pid_ns), PIDTYPE_PID);
197 #endif
198                                 if (task)
199                                         get_task_struct (task);
200                                 rcu_read_unlock ();
201                                 if (!task)
202                                 {
203                                         DBPRINTF ("task for pid %d not found! Dead probe?", p->tgid);
204                                         continue;
205                                 }
206                                 if (task->active_mm)
207                                 {
208                                         if (page_present (task->active_mm, (unsigned long) p->addr))
209                                         {
210                                                 ret = get_user_pages_uprobe (task, task->active_mm, (unsigned long) p->addr, 1, 0, 0, &page, &vma);
211                                                 if (ret <= 0)
212                                                         DBPRINTF ("get_user_pages for task %d at %p failed!", p->tgid, p->addr);
213                                         }
214                                         else
215                                                 ret = -1;
216                                 }
217                                 else
218                                 {
219                                         DBPRINTF ("task %d has no mm!", task->pid);
220                                         ret = -1;
221                                 }
222                                 put_task_struct (task);
223                                 if (ret <= 0)
224                                         continue;
225                                 if (paddr == page_address (page))
226                                 {
227                                         retVal = p;     // we found the probe in other process address space
228                                         DBPRINTF ("get_kprobe[%d]: found user space probe at %p in task %d for addr %p in task %d", nCount, p->addr, p->tgid, addr, tgid);
229                                         panic ("user space probe from another process");
230                                 }
231                                 page_cache_release (page);
232                                 if (retVal)
233                                         break;
234                         }
235                 }
236         }
237
238         DBPRINTF ("get_kprobe[%d]: probe %p", nCount, retVal);
239         return retVal;
240 }
241
242 struct kprobe __kprobes *
243 get_kprobe_by_insn_slot (void *addr, int tgid, struct task_struct *ctask)
244 {
245         struct hlist_head *head;
246         struct hlist_node *node;
247         struct kprobe *p, *retVal = NULL;
248         int uprobe_found;
249
250         //TODO: test - two processes invokes instrumented function
251         head = &uprobe_insn_slot_table[hash_ptr (addr, KPROBE_HASH_BITS)];
252         hlist_for_each_entry_rcu (p, node, head, is_hlist)
253         {
254                 //if looking for kernel probe and this is kernel probe with the same addr OR
255                 //if looking for the user space probe and this is user space probe probe with the same addr and pid
256                 DBPRINTF ("get_kprobe[%d]: check probe at %p/%p, task %d/%d", nCount, addr, p->ainsn.insn, tgid, p->tgid);
257                 if (p->ainsn.insn == addr)
258                 {
259                         uprobe_found = 0;
260                         if (tgid == p->tgid)
261                                 uprobe_found = 1;
262                         if (!tgid || uprobe_found)
263                         {
264                                 retVal = p;
265                                 if (tgid)
266                                         DBPRINTF ("get_kprobe[%d]: found user space probe at %p for task %d", nCount, p->addr, p->tgid);
267                                 else
268                                         DBPRINTF ("get_kprobe[%d]: found kernel probe at %p", nCount, p->addr);
269                                 break;
270                         }
271                 }
272         }
273
274         DBPRINTF ("get_kprobe[%d]: probe %p", nCount, retVal);
275         return retVal;
276 }
277
278 /*
279  * Aggregate handlers for multiple kprobes support - these handlers
280  * take care of invoking the individual kprobe handlers on p->list
281  */
282 static int __kprobes
283 aggr_pre_handler (struct kprobe *p, struct pt_regs *regs        /*, 
284                                                                    struct vm_area_struct **vma, 
285                                                                    struct page **page, unsigned long **kaddr */ )
286 {
287         struct kprobe *kp;
288         int ret;
289
290         list_for_each_entry_rcu (kp, &p->list, list)
291         {
292                 if (kp->pre_handler)
293                 {
294                         set_kprobe_instance (kp);
295                         ret = kp->pre_handler (kp, regs);
296                         if (ret)
297                                 return ret;
298                 }
299                 reset_kprobe_instance ();
300         }
301         return 0;
302 }
303
304 static void __kprobes
305 aggr_post_handler (struct kprobe *p, struct pt_regs *regs, unsigned long flags)
306 {
307         struct kprobe *kp;
308
309         list_for_each_entry_rcu (kp, &p->list, list)
310         {
311                 if (kp->post_handler)
312                 {
313                         set_kprobe_instance (kp);
314                         kp->post_handler (kp, regs, flags);
315                         reset_kprobe_instance ();
316                 }
317         }
318         return;
319 }
320
321 #if 1
322 static int __kprobes
323 aggr_fault_handler (struct kprobe *p, struct pt_regs *regs, int trapnr)
324 {
325         struct kprobe *cur = __get_cpu_var (kprobe_instance);
326
327         /*
328          * if we faulted "during" the execution of a user specified
329          * probe handler, invoke just that probe's fault handler
330          */
331         if (cur && cur->fault_handler)
332         {
333                 if (cur->fault_handler (cur, regs, trapnr))
334                         return 1;
335         }
336         return 0;
337 }
338 #endif
339
340 static int __kprobes
341 aggr_break_handler (struct kprobe *p, struct pt_regs *regs      /*, 
342                                                                    struct vm_area_struct **vma, 
343                                                                    struct page **page, unsigned long **kaddr */ )
344 {
345         struct kprobe *cur = __get_cpu_var (kprobe_instance);
346         int ret = 0;
347         DBPRINTF ("cur = 0x%p\n", cur);
348         if (cur)
349                 DBPRINTF ("cur = 0x%p cur->break_handler = 0x%p\n", cur, cur->break_handler);
350
351         if (cur && cur->break_handler)
352         {
353                 if (cur->break_handler (cur, regs /*, vma, page, kaddr */ ))
354                         ret = 1;
355         }
356         reset_kprobe_instance ();
357         return ret;
358 }
359
360 /* Walks the list and increments nmissed count for multiprobe case */
361 void __kprobes
362 kprobes_inc_nmissed_count (struct kprobe *p)
363 {
364         struct kprobe *kp;
365         if (p->pre_handler != aggr_pre_handler)
366         {
367                 p->nmissed++;
368         }
369         else
370         {
371                 list_for_each_entry_rcu (kp, &p->list, list) kp->nmissed++;
372         }
373         return;
374 }
375
376 /* Called with kretprobe_lock held */
377 struct kretprobe_instance __kprobes *
378 get_free_rp_inst (struct kretprobe *rp)
379 {
380         struct hlist_node *node;
381         struct kretprobe_instance *ri;
382         hlist_for_each_entry (ri, node, &rp->free_instances, uflist) 
383                 return ri;
384         return NULL;
385 }
386
387 /* Called with kretprobe_lock held */
388 static struct kretprobe_instance __kprobes *
389 get_used_rp_inst (struct kretprobe *rp)
390 {
391         struct hlist_node *node;
392         struct kretprobe_instance *ri;
393         hlist_for_each_entry (ri, node, &rp->used_instances, uflist) return ri;
394         return NULL;
395 }
396
397 /* Called with kretprobe_lock held */
398 void __kprobes
399 add_rp_inst (struct kretprobe_instance *ri)
400 {
401         /*
402          * Remove rp inst off the free list -
403          * Add it back when probed function returns
404          */
405         hlist_del (&ri->uflist);
406
407         /* Add rp inst onto table */
408         INIT_HLIST_NODE (&ri->hlist);
409         hlist_add_head (&ri->hlist, &kretprobe_inst_table[hash_ptr (ri->task, KPROBE_HASH_BITS)]);
410
411         /* Also add this rp inst to the used list. */
412         INIT_HLIST_NODE (&ri->uflist);
413         hlist_add_head (&ri->uflist, &ri->rp->used_instances);
414 }
415
416 /* Called with kretprobe_lock held */
417 void __kprobes
418 recycle_rp_inst (struct kretprobe_instance *ri, struct hlist_head *head)
419 {
420         /* remove rp inst off the rprobe_inst_table */
421         hlist_del (&ri->hlist);
422         if (ri->rp)
423         {
424                 /* remove rp inst off the used list */
425                 hlist_del (&ri->uflist);
426                 /* put rp inst back onto the free list */
427                 INIT_HLIST_NODE (&ri->uflist);
428                 hlist_add_head (&ri->uflist, &ri->rp->free_instances);
429         }
430         else
431                 /* Unregistering */
432                 hlist_add_head (&ri->hlist, head);
433 }
434
435 struct hlist_head __kprobes *
436 kretprobe_inst_table_head (struct task_struct *tsk)
437 {
438         return &kretprobe_inst_table[hash_ptr (tsk, KPROBE_HASH_BITS)];
439 }
440
441 /*
442  * This function is called from finish_task_switch when task tk becomes dead,
443  * so that we can recycle any function-return probe instances associated
444  * with this task. These left over instances represent probed functions
445  * that have been called but will never return.
446  */
447 /*void __kprobes kprobe_flush_task(struct task_struct *tk)
448 {
449         struct kretprobe_instance *ri;
450         struct hlist_head *head, empty_rp;
451         struct hlist_node *node, *tmp;
452         unsigned long flags = 0;
453
454         INIT_HLIST_HEAD(&empty_rp);
455         spin_lock_irqsave(&kretprobe_lock, flags);
456         head = kretprobe_inst_table_head(tk);
457         hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
458                 if (ri->task == tk)
459                         recycle_rp_inst(ri, &empty_rp);
460         }
461         spin_unlock_irqrestore(&kretprobe_lock, flags);
462
463         hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
464                 hlist_del(&ri->hlist);
465                 kfree(ri);
466         }
467 }*/
468
469 static inline void
470 free_rp_inst (struct kretprobe *rp)
471 {
472         struct kretprobe_instance *ri;
473         while ((ri = get_free_rp_inst (rp)) != NULL)
474         {
475                 hlist_del (&ri->uflist);
476                 kfree (ri);
477         }
478 }
479
480 /*
481  * Keep all fields in the kprobe consistent
482  */
483 static inline void
484 copy_kprobe (struct kprobe *old_p, struct kprobe *p)
485 {
486         memcpy (&p->opcode, &old_p->opcode, sizeof (kprobe_opcode_t));
487         memcpy (&p->ainsn, &old_p->ainsn, sizeof (struct arch_specific_insn));
488         p->tgid = old_p->tgid;
489         p->ss_addr = old_p->ss_addr;
490         //p->spid = old_p->spid;
491 }
492
493 /*
494 * Add the new probe to old_p->list. Fail if this is the
495 * second jprobe at the address - two jprobes can't coexist
496 */
497 static int __kprobes
498 add_new_kprobe (struct kprobe *old_p, struct kprobe *p)
499 {
500         if (p->break_handler)
501         {
502                 if (old_p->break_handler)
503                         return -EEXIST;
504                 list_add_tail_rcu (&p->list, &old_p->list);
505                 old_p->break_handler = aggr_break_handler;
506         }
507         else
508                 list_add_rcu (&p->list, &old_p->list);
509         if (p->post_handler && !old_p->post_handler)
510                 old_p->post_handler = aggr_post_handler;
511         return 0;
512 }
513
514 /*
515  * Fill in the required fields of the "manager kprobe". Replace the
516  * earlier kprobe in the hlist with the manager kprobe
517  */
518 static inline void
519 add_aggr_kprobe (struct kprobe *ap, struct kprobe *p)
520 {
521         copy_kprobe (p, ap);
522         flush_insn_slot (ap);
523         ap->addr = p->addr;
524         ap->pre_handler = aggr_pre_handler;
525         ap->fault_handler = aggr_fault_handler;
526         if (p->post_handler)
527                 ap->post_handler = aggr_post_handler;
528         if (p->break_handler)
529                 ap->break_handler = aggr_break_handler;
530
531         INIT_LIST_HEAD (&ap->list);
532         list_add_rcu (&p->list, &ap->list);
533
534         src_hlist_replace_rcu (&p->hlist, &ap->hlist);
535 }
536
537 /*
538  * This is the second or subsequent kprobe at the address - handle
539  * the intricacies
540  */
541 static int __kprobes
542 register_aggr_kprobe (struct kprobe *old_p, struct kprobe *p)
543 {
544         int ret = 0;
545         struct kprobe *ap;
546         DBPRINTF ("start\n");
547
548         DBPRINTF ("p = %p old_p = %p \n", p, old_p);
549         if (old_p->pre_handler == aggr_pre_handler)
550         {
551                 DBPRINTF ("aggr_pre_handler \n");
552
553                 copy_kprobe (old_p, p);
554                 ret = add_new_kprobe (old_p, p);
555         }
556         else
557         {
558                 DBPRINTF ("kzalloc\n");
559
560 #ifdef kzalloc
561                 ap = kzalloc (sizeof (struct kprobe), GFP_KERNEL);
562 #else
563                 ap = kmalloc (sizeof (struct kprobe), GFP_KERNEL);
564                 if (ap)
565                         memset (ap, 0, sizeof (struct kprobe));
566 #endif
567                 if (!ap)
568                         return -ENOMEM;
569                 add_aggr_kprobe (ap, old_p);
570                 copy_kprobe (ap, p);
571                 DBPRINTF ("ap = %p p = %p old_p = %p \n", ap, p, old_p);
572                 ret = add_new_kprobe (ap, p);
573         }
574         return ret;
575 }
576
577 static int __kprobes
578 __register_kprobe (struct kprobe *p, unsigned long called_from, int atomic)
579 {
580         struct kprobe *old_p;
581 //      struct module *probed_mod;
582         int ret = 0;
583         /*
584          * If we have a symbol_name argument look it up,
585          * and add it to the address.  That way the addr
586          * field can either be global or relative to a symbol.
587          */
588         if (p->symbol_name)
589         {
590                 if (p->addr)
591                         return -EINVAL;
592                 kprobe_lookup_name (p->symbol_name, p->addr);
593         }
594
595         if (!p->addr)
596                 return -EINVAL;
597         DBPRINTF ("p->addr = 0x%p\n", p->addr);
598         p->addr = (kprobe_opcode_t *) (((char *) p->addr) + p->offset);
599         DBPRINTF ("p->addr = 0x%p p = 0x%p\n", p->addr, p);
600
601 /*      if ((!kernel_text_address((unsigned long) p->addr)) ||
602                 in_kprobes_functions((unsigned long) p->addr))
603                 return -EINVAL;*/
604
605 #ifdef KPROBES_PROFILE
606         p->start_tm.tv_sec = p->start_tm.tv_usec = 0;
607         p->hnd_tm_sum.tv_sec = p->hnd_tm_sum.tv_usec = 0;
608         p->count = 0;
609 #endif
610         p->mod_refcounted = 0;
611         //p->proc_prio = 0;
612         //p->proc_sched = 0;    
613         //p->spid = -1;
614         //p->irq = 0;
615         //p->task_flags = 0;
616 /*
617         // Check are we probing a module
618         if ((probed_mod = module_text_address((unsigned long) p->addr))) {
619                 struct module *calling_mod = module_text_address(called_from);
620                 // We must allow modules to probe themself and
621                 // in this case avoid incrementing the module refcount,
622                 // so as to allow unloading of self probing modules.
623                 //
624                 if (calling_mod && (calling_mod != probed_mod)) {
625                         if (unlikely(!try_module_get(probed_mod)))
626                                 return -EINVAL;
627                         p->mod_refcounted = 1;
628                 } else
629                         probed_mod = NULL;
630         }
631 */
632         p->nmissed = 0;
633 //      mutex_lock(&kprobe_mutex);
634         old_p = get_kprobe (p->addr, 0, NULL);
635         if (old_p)
636         {
637                 ret = register_aggr_kprobe (old_p, p);
638                 if (!ret)
639                         atomic_inc (&kprobe_count);
640                 goto out;
641         }
642
643         if ((ret = arch_prepare_kprobe (p)) != 0)
644                 goto out;
645
646         DBPRINTF ("before out ret = 0x%x\n", ret);
647
648         INIT_HLIST_NODE (&p->hlist);
649         hlist_add_head_rcu (&p->hlist, &kprobe_table[hash_ptr (p->addr, KPROBE_HASH_BITS)]);
650
651 /*      if (atomic_add_return(1, &kprobe_count) == \
652                                 (ARCH_INACTIVE_KPROBE_COUNT + 1))
653                 register_page_fault_notifier(&kprobe_page_fault_nb);*/
654
655         arch_arm_kprobe (p);
656
657       out:
658 //      mutex_unlock(&kprobe_mutex);
659 /*
660         if (ret && probed_mod)
661                 module_put(probed_mod);
662 */
663         DBPRINTF ("out ret = 0x%x\n", ret);
664
665         return ret;
666 }
667
668 static int __kprobes
669 __register_uprobe (struct kprobe *p, struct task_struct *task, int atomic, unsigned long called_from)
670 {
671         int ret = 0;
672         struct kprobe *old_p;
673
674         if (!p->addr)
675                 return -EINVAL;
676
677         DBPRINTF ("p->addr = 0x%p p = 0x%p\n", p->addr, p);
678
679         p->mod_refcounted = 0;
680         p->nmissed = 0;
681 #ifdef KPROBES_PROFILE
682         p->start_tm.tv_sec = p->start_tm.tv_usec = 0;
683         p->hnd_tm_sum.tv_sec = p->hnd_tm_sum.tv_usec = 0;
684         p->count = 0;
685 #endif
686
687         // get the first item
688         old_p = get_kprobe (p->addr, p->tgid, NULL);
689         if (old_p)
690         {
691                 ret = register_aggr_kprobe (old_p, p);
692                 if (!ret)
693                         atomic_inc (&kprobe_count);
694                 goto out;
695         }
696         if ((ret = arch_prepare_uprobe (p, task, atomic)) != 0)
697         {
698                 goto out;
699         }
700
701         DBPRINTF ("before out ret = 0x%x\n", ret);
702
703         INIT_HLIST_NODE (&p->hlist);
704         hlist_add_head_rcu (&p->hlist, &kprobe_table[hash_ptr (p->addr, KPROBE_HASH_BITS)]);
705
706         INIT_HLIST_NODE (&p->is_hlist);
707         hlist_add_head_rcu (&p->is_hlist, &uprobe_insn_slot_table[hash_ptr (p->ainsn.insn, KPROBE_HASH_BITS)]);
708
709         arch_arm_uprobe (p, task);
710 out:
711         DBPRINTF ("out ret = 0x%x\n", ret);
712
713         return ret;
714 }
715
716 void __kprobes
717 unregister_uprobe (struct kprobe *p, struct task_struct *task, int atomic)
718 {
719         unregister_kprobe (p, task, atomic);
720 }
721
722 int __kprobes
723 register_kprobe (struct kprobe *p, int atomic)
724 {
725         return __register_kprobe (p, (unsigned long) __builtin_return_address (0), atomic);
726 }
727
728 void __kprobes
729 unregister_kprobe (struct kprobe *p, struct task_struct *task, int atomic)
730 {
731 //      struct module *mod;
732         struct kprobe *old_p, *list_p;
733         int cleanup_p, pid = 0;
734
735 //      mutex_lock(&kprobe_mutex);
736
737         pid = p->tgid;
738
739         old_p = get_kprobe (p->addr, pid, NULL);
740         DBPRINTF ("unregister_kprobe p=%p old_p=%p", p, old_p);
741         if (unlikely (!old_p))
742         {
743 //              mutex_unlock(&kprobe_mutex);
744                 return;
745         }
746         if (p != old_p)
747         {
748                 list_for_each_entry_rcu (list_p, &old_p->list, list) 
749                         if (list_p == p)
750                                 /* kprobe p is a valid probe */
751                                 goto valid_p;
752 //              mutex_unlock(&kprobe_mutex);
753                 return;
754         }
755 valid_p:
756         DBPRINTF ("unregister_kprobe valid_p");
757         if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) && 
758                 (p->list.next == &old_p->list) && (p->list.prev == &old_p->list)))
759         {
760                 /* Only probe on the hash list */
761                 DBPRINTF ("unregister_kprobe disarm pid=%d", pid);
762                 if (pid)
763                         arch_disarm_uprobe (p, task);//vma, page, kaddr);
764                 else
765                         arch_disarm_kprobe (p);
766                 hlist_del_rcu (&old_p->hlist);
767                 cleanup_p = 1;
768         }
769         else
770         {
771                 list_del_rcu (&p->list);
772                 cleanup_p = 0;
773         }
774         DBPRINTF ("unregister_kprobe cleanup_p=%d", cleanup_p);
775 //      mutex_unlock(&kprobe_mutex);
776
777 //      synchronize_sched();
778 /*
779         if (p->mod_refcounted &&
780             (mod = module_text_address((unsigned long)p->addr)))
781                 module_put(mod);
782 */
783         if (cleanup_p)
784         {
785                 if (p != old_p)
786                 {
787                         list_del_rcu (&p->list);
788                         kfree (old_p);
789                 }
790                 arch_remove_kprobe (p, task);
791         }
792         else
793         {
794 ///             mutex_lock(&kprobe_mutex);
795                 if (p->break_handler)
796                         old_p->break_handler = NULL;
797                 if (p->post_handler)
798                 {
799                         list_for_each_entry_rcu (list_p, &old_p->list, list)
800                         {
801                                 if (list_p->post_handler)
802                                 {
803                                         cleanup_p = 2;
804                                         break;
805                                 }
806                         }
807                         if (cleanup_p == 0)
808                                 old_p->post_handler = NULL;
809                 }
810 //              mutex_unlock(&kprobe_mutex);
811         }
812
813         /* Call unregister_page_fault_notifier()
814          * if no probes are active
815          */
816 //      mutex_lock(&kprobe_mutex);
817 /*      if (atomic_add_return(-1, &kprobe_count) == \
818                                 ARCH_INACTIVE_KPROBE_COUNT)
819                 unregister_page_fault_notifier(&kprobe_page_fault_nb);*/
820 //      mutex_unlock(&kprobe_mutex);
821         return;
822 }
823
824 int __kprobes
825 register_ujprobe (struct task_struct *task, struct mm_struct *mm, struct jprobe *jp, int atomic)
826 {
827         int ret = 0;
828 #ifdef _DEBUG
829         gSilent = 0;
830 #endif
831         /* Todo: Verify probepoint is a function entry point */
832         jp->kp.pre_handler = setjmp_pre_handler;
833         jp->kp.break_handler = longjmp_break_handler;
834         
835         ret = __register_uprobe (&jp->kp, task, atomic,
836                                     (unsigned long) __builtin_return_address (0));
837
838 #ifdef _DEBUG
839         gSilent = 1;
840 #endif
841         return ret;
842 }
843
844 void __kprobes
845 unregister_ujprobe (struct task_struct *task, struct jprobe *jp, int atomic)
846 {
847         unregister_uprobe (&jp->kp, task, atomic);
848 }
849
850 int __kprobes
851 register_jprobe (struct jprobe *jp, int atomic)
852 {
853         /* Todo: Verify probepoint is a function entry point */
854         jp->kp.pre_handler = setjmp_pre_handler;
855         jp->kp.break_handler = longjmp_break_handler;
856
857         return __register_kprobe (&jp->kp, (unsigned long) __builtin_return_address (0), atomic);
858 }
859
860 void __kprobes
861 unregister_jprobe (struct jprobe *jp, int atomic)
862 {
863         unregister_kprobe (&jp->kp, 0, atomic);
864 }
865
866 /*
867  * This kprobe pre_handler is registered with every kretprobe. When probe
868  * hits it will set up the return probe.
869  */
870 static int __kprobes
871 pre_handler_kretprobe (struct kprobe *p, struct pt_regs *regs   /*, struct vm_area_struct **vma, 
872                                                                    struct page **page, unsigned long **kaddr */ )
873 {
874         struct kretprobe *rp = container_of (p, struct kretprobe, kp);
875         unsigned long flags = 0;
876         DBPRINTF ("START\n");
877
878         /*TODO: consider to only swap the RA after the last pre_handler fired */
879         spin_lock_irqsave (&kretprobe_lock, flags);
880         if (!rp->disarm)
881                 __arch_prepare_kretprobe (rp, regs);
882         spin_unlock_irqrestore (&kretprobe_lock, flags);
883         DBPRINTF ("END\n");
884         return 0;
885 }
886
887 struct kretprobe *sched_rp;
888
889 int __kprobes
890 register_kretprobe (struct kretprobe *rp, int atomic)
891 {
892         int ret = 0;
893         struct kretprobe_instance *inst;
894         int i;
895         DBPRINTF ("START");
896
897         rp->kp.pre_handler = pre_handler_kretprobe;
898         rp->kp.post_handler = NULL;
899         rp->kp.fault_handler = NULL;
900         rp->kp.break_handler = NULL;
901
902         rp->disarm = 0;
903
904         /* Pre-allocate memory for max kretprobe instances */
905         if(rp->kp.addr == sched_addr)
906                 rp->maxactive = 1000;//max (100, 2 * NR_CPUS);
907         else if (rp->maxactive <= 0)
908         {
909 #if 1//def CONFIG_PREEMPT
910                 rp->maxactive = max (10, 2 * NR_CPUS);
911 #else
912                 rp->maxactive = NR_CPUS;
913 #endif
914         }
915         INIT_HLIST_HEAD (&rp->used_instances);
916         INIT_HLIST_HEAD (&rp->free_instances);
917         for (i = 0; i < rp->maxactive; i++)
918         {
919                 inst = kmalloc (sizeof (struct kretprobe_instance), GFP_KERNEL);
920                 if (inst == NULL)
921                 {
922                         free_rp_inst (rp);
923                         return -ENOMEM;
924                 }
925                 INIT_HLIST_NODE (&inst->uflist);
926                 hlist_add_head (&inst->uflist, &rp->free_instances);
927         }
928
929         DBPRINTF ("addr=%p, *addr=[%lx %lx %lx]", rp->kp.addr, (unsigned long) (*(rp->kp.addr)), (unsigned long) (*(rp->kp.addr + 1)), (unsigned long) (*(rp->kp.addr + 2)));
930         rp->nmissed = 0;
931         /* Establish function entry probe point */
932         if ((ret = __register_kprobe (&rp->kp, (unsigned long) __builtin_return_address (0), atomic)) != 0)
933                 free_rp_inst (rp);
934
935         DBPRINTF ("addr=%p, *addr=[%lx %lx %lx]", rp->kp.addr, (unsigned long) (*(rp->kp.addr)), (unsigned long) (*(rp->kp.addr + 1)), (unsigned long) (*(rp->kp.addr + 2)));
936         if(rp->kp.addr == sched_addr)
937                 sched_rp = rp;
938
939         return ret;
940 }
941
942 void __kprobes
943 unregister_kretprobe (struct kretprobe *rp, int atomic)
944 {
945         unsigned long flags;
946         struct kretprobe_instance *ri;
947
948         //printk("addr=%p, *addr=[%lx %lx %lx]\n", rp->kp.addr, 
949         //               *(rp->kp.addr), *(rp->kp.addr+1), *(rp->kp.addr+2));
950         unregister_kprobe (&rp->kp, 0, atomic);
951
952         if(rp->kp.addr == sched_addr)
953                 sched_rp = NULL;
954                 
955         //printk("addr=%p, *addr=[%lx %lx %lx]\n", rp->kp.addr, 
956         //               *(rp->kp.addr), *(rp->kp.addr+1), *(rp->kp.addr+2));
957         /* No race here */
958         spin_lock_irqsave (&kretprobe_lock, flags);
959         while ((ri = get_used_rp_inst (rp)) != NULL)
960         {
961                 ri->rp = NULL;
962                 hlist_del (&ri->uflist);
963         }
964         spin_unlock_irqrestore (&kretprobe_lock, flags);
965         free_rp_inst (rp);
966 }
967
968 int __kprobes
969 register_uretprobe (struct task_struct *task, struct mm_struct *mm, struct kretprobe *rp, int atomic)
970 {
971         int ret = 0;
972         struct kretprobe_instance *inst;
973         /*struct page *pages[2] = {0, 0};
974            struct vm_area_struct *vmas[2] = {0, 0};
975            unsigned long *kaddrs[2] = {0, 0}; */
976         int i;
977 #ifdef _DEBUG
978         gSilent = 0;
979 #endif
980
981         DBPRINTF ("START\n");
982
983         rp->kp.pre_handler = pre_handler_kretprobe;
984         rp->kp.post_handler = NULL;
985         rp->kp.fault_handler = NULL;
986         rp->kp.break_handler = NULL;
987
988         rp->disarm = 0;
989
990         /* Pre-allocate memory for max kretprobe instances */
991         if (rp->maxactive <= 0)
992         {
993 #if 1//def CONFIG_PREEMPT
994                 rp->maxactive = max (10, 2 * NR_CPUS);
995 #else
996                 rp->maxactive = NR_CPUS;
997 #endif
998         }
999         INIT_HLIST_HEAD (&rp->used_instances);
1000         INIT_HLIST_HEAD (&rp->free_instances);
1001         for (i = 0; i < rp->maxactive; i++)
1002         {
1003                 inst = kmalloc (sizeof (struct kretprobe_instance), GFP_KERNEL);
1004                 if (inst == NULL)
1005                 {
1006                         free_rp_inst (rp);
1007                         ret = -ENOMEM;
1008                         goto out;
1009                 }
1010                 INIT_HLIST_NODE (&inst->uflist);
1011                 hlist_add_head (&inst->uflist, &rp->free_instances);
1012         }
1013
1014         rp->nmissed = 0;
1015 #if 0
1016         ret = get_user_pages_uprobe (task, mm, (unsigned long) rp->kp.addr, 1, 1, 1, &pages[0], &vmas[0]);
1017         if (ret <= 0)
1018         {
1019                 DBPRINTF ("get_user_pages for %p failed!", rp->kp.addr);
1020                 ret = -EFAULT;
1021                 goto out;
1022         }
1023         if (atomic)
1024                 kaddrs[0] = kmap_atomic (pages[0], KM_USER0) + ((unsigned long) rp->kp.addr & ~PAGE_MASK);
1025         else
1026                 kaddrs[0] = kmap (pages[0]) + ((unsigned long) rp->kp.addr & ~PAGE_MASK);
1027         // if 2nd instruction is on the 2nd page
1028         if ((((unsigned long) (rp->kp.addr + 1)) & ~PAGE_MASK) == 0)
1029         {
1030           ret = get_user_pages_uprobe (task, mm, (unsigned long) (rp->kp.addr + 1), 1, 1, 1, &pages[1], &vmas[1]);
1031                 if (ret <= 0)
1032                 {
1033                         DBPRINTF ("get_user_pages for %p failed!", rp->kp.addr + 1);
1034                         ret = -EFAULT;
1035                         goto out;
1036                 }
1037                 if (atomic)
1038                         kaddrs[1] = kmap_atomic (pages[1], KM_USER1) + ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK);
1039                 else
1040                         kaddrs[1] = kmap (pages[1]) + ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK);
1041         }
1042         else
1043         {
1044                 // 2nd instruction is on the 1st page too
1045                 vmas[1] = vmas[0];
1046                 pages[1] = pages[0];
1047                 kaddrs[1] = kaddrs[0] + 1;
1048         }
1049 #endif
1050         /* Establish function exit probe point */
1051         if ((ret = arch_prepare_uretprobe (rp, task/*vmas, pages, kaddrs */ )) != 0)
1052                 goto out;
1053         /* Establish function entry probe point */
1054         if ((ret = __register_uprobe (&rp->kp, task, atomic,
1055                                          (unsigned long) __builtin_return_address (0))) != 0)
1056         {
1057                 free_rp_inst (rp);
1058                 goto out;
1059         }
1060           
1061         arch_arm_uretprobe (rp, task);//vmas[1], pages[1], kaddrs[1]);
1062 #if 0
1063         if (atomic)
1064                 set_page_dirty (pages[1]);
1065         else
1066                 set_page_dirty_lock (pages[1]);
1067 #endif
1068       out:
1069 #if 0
1070         if (pages[0])
1071         {
1072                 if (kaddrs[0])
1073                 {
1074                         if (atomic)
1075                                 kunmap_atomic (kaddrs[0] - ((unsigned long) rp->kp.addr & ~PAGE_MASK), KM_USER0);
1076                         else
1077                                 kunmap (pages[0]);
1078                 }
1079                 page_cache_release (pages[0]);
1080         }
1081         if ((pages[0] != pages[1]))
1082         {
1083                 if (pages[1])
1084                 {
1085                         if (kaddrs[1])
1086                         {
1087                                 if (atomic)
1088                                         kunmap_atomic (kaddrs[1] - ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK), KM_USER1);
1089                                 else
1090                                         kunmap (pages[1]);
1091                         }
1092                         page_cache_release (pages[1]);
1093                 }
1094         }
1095         /*else if( (pages[0] != pages[2]) ){
1096            if(pages[2]){
1097            if(kaddrs[2]) {
1098            if (atomic) kunmap_atomic(kaddrs[2], KM_USER1);
1099            else        kunmap(pages[2]);
1100            }
1101            page_cache_release(pages[2]);
1102            }
1103            } */
1104 #endif
1105
1106 #ifdef _DEBUG
1107         gSilent = 1;
1108 #endif
1109         return ret;
1110 }
1111
1112 static struct kretprobe *__kprobes
1113 clone_kretprobe (struct kretprobe *rp)
1114 {
1115         struct kprobe *old_p;
1116         struct kretprobe *clone = NULL;
1117         int ret;
1118
1119         clone = kmalloc (sizeof (struct kretprobe), GFP_KERNEL);
1120         if (!clone)
1121         {
1122                 DBPRINTF ("failed to alloc memory for clone probe %p!", rp->kp.addr);
1123                 return NULL;
1124         }
1125         memcpy (clone, rp, sizeof (struct kretprobe));
1126         clone->kp.pre_handler = pre_handler_kretprobe;
1127         clone->kp.post_handler = NULL;
1128         clone->kp.fault_handler = NULL;
1129         clone->kp.break_handler = NULL;
1130         old_p = get_kprobe (rp->kp.addr, rp->kp.tgid, NULL);
1131         if (old_p)
1132         {
1133                 ret = register_aggr_kprobe (old_p, &clone->kp);
1134                 if (ret)
1135                 {
1136                         kfree (clone);
1137                         return NULL;
1138                 }
1139                 atomic_inc (&kprobe_count);
1140         }
1141
1142         return clone;
1143 }
1144
1145 void __kprobes
1146 unregister_uretprobe (struct task_struct *task, struct kretprobe *rp, int atomic)
1147 {
1148         //int ret = 0;
1149         unsigned long flags;
1150         struct kretprobe_instance *ri;
1151         struct kretprobe *rp2 = NULL;
1152         /*struct mm_struct *mm;
1153            struct page *pages[2] = {0, 0};
1154            struct vm_area_struct *vmas[2] = {0, 0};
1155            unsigned long *kaddrs[2] = {0, 0}; */
1156
1157 #ifdef _DEBUG
1158         gSilent = 0;
1159 #endif
1160 #if 0
1161         mm = atomic ? task->active_mm : get_task_mm (task);
1162         if (!mm)
1163         {
1164                 DBPRINTF ("task %u has no mm!", task->pid);
1165 #ifdef _DEBUG
1166                 gSilent = 1;
1167 #endif
1168                 return;
1169         }
1170         down_read (&mm->mmap_sem);
1171         ret = get_user_pages_uprobe (task, mm, (unsigned long) rp->kp.addr, 1, 1, 1, &pages[0], &vmas[0]);
1172
1173         if (ret <= 0)
1174         {
1175                 DBPRINTF ("get_user_pages for %p failed!", rp->kp.addr);
1176                 goto out;
1177         }
1178         if (atomic)
1179                 kaddrs[0] = kmap_atomic (pages[0], KM_USER0) + ((unsigned long) rp->kp.addr & ~PAGE_MASK);
1180         else
1181                 kaddrs[0] = kmap (pages[0]) + ((unsigned long) rp->kp.addr & ~PAGE_MASK);
1182         if ((((unsigned long) (rp->kp.addr + 1)) & ~PAGE_MASK) == 0)
1183         {
1184           
1185           ret = get_user_pages_uprobe (task, mm, (unsigned long) (rp->kp.addr + 1), 1, 1, 1, &pages[1], &vmas[1]);
1186                 if (ret <= 0)
1187                 {
1188                         DBPRINTF ("get_user_pages for %p failed!", rp->kp.addr + 1);
1189                         goto out;
1190                 }
1191                 if (atomic)
1192                         kaddrs[1] = kmap_atomic (pages[1], KM_USER1) + ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK);
1193                 else
1194                         kaddrs[1] = kmap (pages[1]) + ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK);
1195         }
1196         else
1197         {
1198                 vmas[1] = vmas[0];
1199                 pages[1] = pages[0];
1200                 kaddrs[1] = kaddrs[0] + 1;
1201         }
1202
1203         /* No race here */
1204         DBPRINTF ("unregister_uretprobe1 addr %p [%lx %lx]", rp->kp.addr, *kaddrs[0], *kaddrs[1]);
1205 #endif
1206         spin_lock_irqsave (&kretprobe_lock, flags);
1207         if (hlist_empty (&rp->used_instances))
1208         {
1209                 // if there are no used retprobe instances (i.e. function is not entered) - disarm retprobe
1210                 arch_disarm_uretprobe (rp, task);//vmas[1], pages[1], kaddrs[1]);
1211 #if 0
1212                 if (atomic)
1213                         set_page_dirty (pages[1]);
1214                 else
1215                         set_page_dirty_lock (pages[1]);
1216 #endif
1217         }
1218         else
1219         {
1220                 rp2 = clone_kretprobe (rp);
1221                 if (!rp2)
1222                         DBPRINTF ("unregister_uretprobe addr %p: failed to clone retprobe!", rp->kp.addr);
1223                 else
1224                 {
1225                         DBPRINTF ("initiating deferred retprobe deletion addr %p", rp->kp.addr);
1226                         printk ("initiating deferred retprobe deletion addr %p\n", rp->kp.addr);
1227                         rp2->disarm = 1;
1228                 }
1229         }
1230
1231         while ((ri = get_used_rp_inst (rp)) != NULL)
1232         {
1233                 ri->rp = NULL;
1234                 ri->rp2 = rp2;
1235                 hlist_del (&ri->uflist);
1236         }
1237         spin_unlock_irqrestore (&kretprobe_lock, flags);
1238         free_rp_inst (rp);
1239
1240         unregister_uprobe (&rp->kp, task, atomic);
1241         //DBPRINTF("unregister_uretprobe3 addr %p [%lx %lx]", 
1242         //              rp->kp.addr, *kaddrs[0], *kaddrs[1]);
1243 #if 0
1244       out:
1245         if (pages[0])
1246         {
1247                 if (kaddrs[0])
1248                 {
1249                         if (atomic)
1250                                 kunmap_atomic (kaddrs[0] - ((unsigned long) rp->kp.addr & ~PAGE_MASK), KM_USER0);
1251                         else
1252                                 kunmap (pages[0]);
1253                 }
1254                 page_cache_release (pages[0]);
1255         }
1256         if (pages[1] && (pages[0] != pages[1]))
1257         {
1258                 if (kaddrs[1])
1259                 {
1260                         if (atomic)
1261                                 kunmap_atomic (kaddrs[1] - ((unsigned long) (rp->kp.addr + 1) & ~PAGE_MASK), KM_USER1);
1262                         else
1263                                 kunmap (pages[1]);
1264                 }
1265                 page_cache_release (pages[1]);
1266         }
1267         if (!atomic)
1268         {
1269                 up_read (&mm->mmap_sem);
1270                 mmput (mm);
1271         }
1272 #endif
1273 #ifdef _DEBUG
1274         gSilent = 1;
1275 #endif
1276 }
1277
1278 void __kprobes
1279 unregister_all_uprobes (struct task_struct *task, int atomic)
1280 {
1281         struct hlist_head *head;
1282         struct hlist_node *node, *tnode;
1283         struct kprobe *p;
1284         int i;
1285
1286         for(i = 0; i < KPROBE_TABLE_SIZE; i++){
1287                 head = &kprobe_table[i];
1288                 hlist_for_each_entry_safe (p, node, tnode, head, hlist){                        
1289                         if(p->tgid == task->tgid){
1290                                 printk("unregister_all_uprobes: delete uprobe at %pf for %s/%d\n", p->addr, task->comm, task->pid);
1291                                 unregister_uprobe (p, task, atomic);
1292                         }
1293                 }
1294         }
1295         purge_garbage_uslots(task, atomic);
1296 }
1297
1298
1299 #define GUP_FLAGS_WRITE                  0x1
1300 #define GUP_FLAGS_FORCE                  0x2
1301 #define GUP_FLAGS_IGNORE_VMA_PERMISSIONS 0x4
1302 #define GUP_FLAGS_IGNORE_SIGKILL         0x8
1303
1304
1305 static inline int use_zero_page(struct vm_area_struct *vma)
1306 {
1307         /*
1308          * We don't want to optimize FOLL_ANON for make_pages_present()
1309          * when it tries to page in a VM_LOCKED region. As to VM_SHARED,
1310          * we want to get the page from the page tables to make sure
1311          * that we serialize and update with any other user of that
1312          * mapping.
1313          */
1314         if (vma->vm_flags & (VM_LOCKED | VM_SHARED))
1315                 return 0;
1316         /*
1317          * And if we have a fault routine, it's not an anonymous region.
1318          */
1319         return !vma->vm_ops || !vma->vm_ops->fault;
1320 }
1321
1322 int __get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
1323                      unsigned long start, int len, int flags,
1324                 struct page **pages, struct vm_area_struct **vmas)
1325 {
1326         int i;
1327         unsigned int vm_flags = 0;
1328         int write = !!(flags & GUP_FLAGS_WRITE);
1329         int force = !!(flags & GUP_FLAGS_FORCE);
1330         int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
1331         int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL);
1332
1333         if (len <= 0)
1334                 return 0;
1335         /* 
1336          * Require read or write permissions.
1337          * If 'force' is set, we only require the "MAY" flags.
1338          */
1339         vm_flags  = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
1340         vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
1341         i = 0;
1342
1343         do {
1344                 struct vm_area_struct *vma;
1345                 unsigned int foll_flags;
1346
1347                 //vma = find_extend_vma(mm, start);
1348                 vma = find_vma(mm, start);
1349                 if (!vma && in_gate_area(tsk, start)) {
1350                         unsigned long pg = start & PAGE_MASK;
1351                         struct vm_area_struct *gate_vma = get_gate_vma(tsk);
1352                         pgd_t *pgd;
1353                         pud_t *pud;
1354                         pmd_t *pmd;
1355                         pte_t *pte;
1356
1357                         /* user gate pages are read-only */
1358                         if (!ignore && write)
1359                                 return i ? : -EFAULT;
1360                         if (pg > TASK_SIZE)
1361                                 pgd = pgd_offset_k(pg);
1362                         else
1363                                 pgd = pgd_offset_gate(mm, pg);
1364                         BUG_ON(pgd_none(*pgd));
1365                         pud = pud_offset(pgd, pg);
1366                         BUG_ON(pud_none(*pud));
1367                         pmd = pmd_offset(pud, pg);
1368                         if (pmd_none(*pmd))
1369                                 return i ? : -EFAULT;
1370                         pte = pte_offset_map(pmd, pg);
1371                         if (pte_none(*pte)) {
1372                                 pte_unmap(pte);
1373                                 return i ? : -EFAULT;
1374                         }
1375                         if (pages) {
1376                                 struct page *page = vm_normal_page(gate_vma, start, *pte);
1377                                 pages[i] = page;
1378                                 if (page)
1379                                         get_page(page);
1380                         }
1381                         pte_unmap(pte);
1382                         if (vmas)
1383                                 vmas[i] = gate_vma;
1384                         i++;
1385                         start += PAGE_SIZE;
1386                         len--;
1387                         continue;
1388                 }
1389
1390                 if (!vma ||
1391                     (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1392                     (!ignore && !(vm_flags & vma->vm_flags)))
1393                         return i ? : -EFAULT;
1394
1395                 if (is_vm_hugetlb_page(vma)) {
1396                         i = follow_hugetlb_page(mm, vma, pages, vmas,
1397                                                 &start, &len, i, write);
1398                         continue;
1399                 }
1400
1401                 foll_flags = FOLL_TOUCH;
1402                 if (pages)
1403                         foll_flags |= FOLL_GET;
1404
1405 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,30)
1406                 if (!write && use_zero_page(vma))
1407                   foll_flags |= FOLL_ANON;
1408 #endif
1409
1410                 do {
1411                         struct page *page;
1412
1413                         /*
1414                          * If we have a pending SIGKILL, don't keep faulting
1415                          * pages and potentially allocating memory, unless
1416                          * current is handling munlock--e.g., on exit. In
1417                          * that case, we are not allocating memory.  Rather,
1418                          * we're only unlocking already resident/mapped pages.
1419                          */
1420                         if (unlikely(!ignore_sigkill &&
1421                                         fatal_signal_pending(current)))
1422                                 return i ? i : -ERESTARTSYS;
1423
1424                         if (write)
1425                                 foll_flags |= FOLL_WRITE;
1426
1427                         
1428                         //cond_resched();
1429
1430                         DBPRINTF ("pages = %p vma = %p\n", pages, vma);
1431                         while (!(page = follow_page(vma, start, foll_flags))) {
1432                                 int ret;
1433                                 ret = handle_mm_fault(mm, vma, start,
1434                                                 foll_flags & FOLL_WRITE);
1435                                 if (ret & VM_FAULT_ERROR) {
1436                                         if (ret & VM_FAULT_OOM)
1437                                                 return i ? i : -ENOMEM;
1438                                         else if (ret & VM_FAULT_SIGBUS)
1439                                                 return i ? i : -EFAULT;
1440                                         BUG();
1441                                 }
1442                                 if (ret & VM_FAULT_MAJOR)
1443                                         tsk->maj_flt++;
1444                                 else
1445                                         tsk->min_flt++;
1446
1447                                 /*
1448                                  * The VM_FAULT_WRITE bit tells us that
1449                                  * do_wp_page has broken COW when necessary,
1450                                  * even if maybe_mkwrite decided not to set
1451                                  * pte_write. We can thus safely do subsequent
1452                                  * page lookups as if they were reads. But only
1453                                  * do so when looping for pte_write is futile:
1454                                  * in some cases userspace may also be wanting
1455                                  * to write to the gotten user page, which a
1456                                  * read fault here might prevent (a readonly
1457                                  * page might get reCOWed by userspace write).
1458                                  */
1459                                 if ((ret & VM_FAULT_WRITE) &&
1460                                     !(vma->vm_flags & VM_WRITE))
1461                                         foll_flags &= ~FOLL_WRITE;
1462
1463                                 //cond_resched();
1464                         }
1465                         if (IS_ERR(page))
1466                                 return i ? i : PTR_ERR(page);
1467                         if (pages) {
1468                                 pages[i] = page;
1469
1470                                 flush_anon_page(vma, page, start);
1471                                 flush_dcache_page(page);
1472                         }
1473                         if (vmas)
1474                                 vmas[i] = vma;
1475                         i++;
1476                         start += PAGE_SIZE;
1477                         len--;
1478                 } while (len && start < vma->vm_end);
1479         } while (len);
1480         return i;
1481 }
1482
1483 int get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
1484                 unsigned long start, int len, int write, int force,
1485                 struct page **pages, struct vm_area_struct **vmas)
1486 {
1487         int flags = 0;
1488
1489         if (write)
1490                 flags |= GUP_FLAGS_WRITE;
1491         if (force)
1492                 flags |= GUP_FLAGS_FORCE;
1493
1494         return __get_user_pages_uprobe(tsk, mm,
1495                                 start, len, flags,
1496                                 pages, vmas);
1497 }
1498
1499 int
1500 access_process_vm_atomic (struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
1501 {
1502
1503         
1504         struct mm_struct *mm;
1505         struct vm_area_struct *vma;
1506         void *old_buf = buf;
1507
1508         mm = get_task_mm(tsk);
1509         if (!mm)
1510                 return 0;
1511
1512         down_read(&mm->mmap_sem);
1513         /* ignore errors, just check how much was successfully transferred */
1514         while (len) {
1515                 int bytes, ret, offset;
1516                 void *maddr;
1517                 struct page *page = NULL;
1518
1519                 ret = get_user_pages_uprobe(tsk, mm, addr, 1,
1520                                 write, 1, &page, &vma);
1521                 if (ret <= 0) {
1522                         /*
1523                          * Check if this is a VM_IO | VM_PFNMAP VMA, which
1524                          * we can access using slightly different code.
1525                          */
1526 #ifdef CONFIG_HAVE_IOREMAP_PROT
1527                         vma = find_vma(mm, addr);
1528                         if (!vma)
1529                                 break;
1530                         if (vma->vm_ops && vma->vm_ops->access)
1531                                 ret = vma->vm_ops->access(vma, addr, buf,
1532                                                           len, write);
1533                         if (ret <= 0)
1534 #endif
1535                                 break;
1536                         bytes = ret;
1537                 } else {
1538                         bytes = len;
1539                         offset = addr & (PAGE_SIZE-1);
1540                         if (bytes > PAGE_SIZE-offset)
1541                                 bytes = PAGE_SIZE-offset;
1542
1543                         maddr = kmap(page);
1544                         if (write) {
1545                                 copy_to_user_page(vma, page, addr,
1546                                                   maddr + offset, buf, bytes);
1547                                 set_page_dirty_lock(page);
1548                         } else {
1549                                 copy_from_user_page(vma, page, addr,
1550                                                     buf, maddr + offset, bytes);
1551                         }
1552                         kunmap(page);
1553                         page_cache_release(page);
1554                 }
1555                 len -= bytes;
1556                 buf += bytes;
1557                 addr += bytes;
1558         }
1559         up_read(&mm->mmap_sem);
1560         mmput(mm);
1561
1562         return buf - old_buf;
1563
1564 }
1565
1566 #ifdef CONFIG_DEBUG_FS
1567 const char *(*__real_kallsyms_lookup) (unsigned long addr, unsigned long *symbolsize, unsigned long *offset, char **modname, char *namebuf);
1568 const char *
1569 kallsyms_lookup (unsigned long addr, unsigned long *symbolsize, unsigned long *offset, char **modname, char *namebuf)
1570 {
1571         return __real_kallsyms_lookup (addr, symbolsize, offset, modname, namebuf);
1572 }
1573
1574 static void __kprobes
1575 report_probe (struct seq_file *pi, struct kprobe *p, const char *sym, int offset, char *modname)
1576 {
1577         char *kprobe_type;
1578
1579         if (p->pre_handler == pre_handler_kretprobe)
1580                 if (p->tgid)
1581                         kprobe_type = "ur";
1582                 else
1583                         kprobe_type = "r";
1584         else if (p->pre_handler == setjmp_pre_handler)
1585                 if (p->tgid)
1586                         kprobe_type = "uj";
1587                 else
1588                         kprobe_type = "j";
1589         else if (p->tgid)
1590                 kprobe_type = "u";
1591         else
1592                 kprobe_type = "k";
1593         if (sym)
1594                 seq_printf (pi, "%p  %s  %s+0x%x  %s\n", p->addr, kprobe_type, sym, offset, (modname ? modname : " "));
1595         else
1596                 seq_printf (pi, "%p  %s  %p\n", p->addr, kprobe_type, p->addr);
1597 }
1598
1599 static void __kprobes *
1600 kprobe_seq_start (struct seq_file *f, loff_t * pos)
1601 {
1602         return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
1603 }
1604
1605 static void __kprobes *
1606 kprobe_seq_next (struct seq_file *f, void *v, loff_t * pos)
1607 {
1608         (*pos)++;
1609         if (*pos >= KPROBE_TABLE_SIZE)
1610                 return NULL;
1611         return pos;
1612 }
1613
1614 static void __kprobes
1615 kprobe_seq_stop (struct seq_file *f, void *v)
1616 {
1617         /* Nothing to do */
1618 }
1619
1620 struct us_proc_ip
1621 {
1622         char *name;
1623         int installed;
1624         struct jprobe jprobe;
1625         struct kretprobe retprobe;
1626         unsigned long offset;
1627 };
1628
1629 static int __kprobes
1630 show_kprobe_addr (struct seq_file *pi, void *v)
1631 {
1632         struct hlist_head *head;
1633         struct hlist_node *node;
1634         struct kprobe *p, *kp;
1635         const char *sym = NULL;
1636         unsigned int i = *(loff_t *) v;
1637         unsigned long size, offset = 0;
1638         char *modname, namebuf[128];
1639
1640         head = &kprobe_table[i];
1641         preempt_disable ();
1642         hlist_for_each_entry_rcu (p, node, head, hlist)
1643         {
1644                 /*if(p->pid){
1645                    struct us_proc_ip *up = NULL;
1646                    if (p->pre_handler == pre_handler_kretprobe){
1647                    struct kretprobe *rp = container_of(p, struct kretprobe, kp);
1648                    up = container_of(rp, struct us_proc_ip, retprobe);
1649                    }
1650                    else {//if (p->pre_handler == setjmp_pre_handler){
1651                    struct jprobe *jp = container_of(p, struct jprobe, kp);
1652                    up = container_of(jp, struct us_proc_ip, jprobe);
1653                    }
1654                    if(up){
1655                    sym = up->name;
1656                    printk("show_kprobe_addr: %s\n", sym);
1657                    }
1658                    }
1659                    else */
1660                 sym = kallsyms_lookup ((unsigned long) p->addr, &size, &offset, &modname, namebuf);
1661                 if (p->pre_handler == aggr_pre_handler)
1662                 {
1663                         list_for_each_entry_rcu (kp, &p->list, list) report_probe (pi, kp, sym, offset, modname);
1664                 }
1665                 else
1666                         report_probe (pi, p, sym, offset, modname);
1667         }
1668         //seq_printf (pi, "handled exceptions %lu\n", handled_exceptions);
1669         preempt_enable ();
1670         return 0;
1671 }
1672
1673 static struct seq_operations kprobes_seq_ops = {
1674         .start = kprobe_seq_start,
1675         .next = kprobe_seq_next,
1676         .stop = kprobe_seq_stop,
1677         .show = show_kprobe_addr
1678 };
1679
1680 static int __kprobes
1681 kprobes_open (struct inode *inode, struct file *filp)
1682 {
1683         return seq_open (filp, &kprobes_seq_ops);
1684 }
1685
1686 static struct file_operations debugfs_kprobes_operations = {
1687         .open = kprobes_open,
1688         .read = seq_read,
1689         .llseek = seq_lseek,
1690         .release = seq_release,
1691 };
1692
1693 #ifdef KPROBES_PROFILE
1694 extern unsigned long nCount;
1695 extern struct timeval probe_enter_diff_sum;
1696 static void __kprobes *
1697 kprobe_prof_seq_start (struct seq_file *f, loff_t * pos)
1698 {
1699         return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
1700 }
1701
1702 static void __kprobes *
1703 kprobe_prof_seq_next (struct seq_file *f, void *v, loff_t * pos)
1704 {
1705         (*pos)++;
1706         if (*pos >= KPROBE_TABLE_SIZE)
1707                 return NULL;
1708         return pos;
1709 }
1710
1711 static void __kprobes
1712 kprobe_prof_seq_stop (struct seq_file *f, void *v)
1713 {
1714 }
1715
1716 static void __kprobes
1717 report_probe_prof (struct seq_file *pi, struct kprobe *p, const char *sym, int offset, char *modname)
1718 {
1719         char *kprobe_type;
1720
1721         if (p->pre_handler == pre_handler_kretprobe)
1722                 if (p->pid)
1723                         kprobe_type = "ur";
1724                 else
1725                         kprobe_type = "r";
1726         else if (p->pre_handler == setjmp_pre_handler)
1727                 if (p->pid)
1728                         kprobe_type = "uj";
1729                 else
1730                         kprobe_type = "j";
1731         else if (p->pid)
1732                 kprobe_type = "u";
1733         else
1734                 kprobe_type = "k";
1735
1736         if (sym)
1737                 seq_printf (pi, "%p  %s  %s+0x%x  %s %lu.%06ld\n", p->addr, kprobe_type,
1738                             sym, offset, (modname ? modname : " "), p->count ? p->hnd_tm_sum.tv_sec / p->count : 0, p->count ? p->hnd_tm_sum.tv_usec / p->count : 0);
1739         else
1740
1741                 seq_printf (pi, "%p  %s  %p %lu.%06ld\n", p->addr, kprobe_type, p->addr, p->count ? p->hnd_tm_sum.tv_sec / p->count : 0, p->count ? p->hnd_tm_sum.tv_usec / p->count : 0);
1742 }
1743
1744 static int __kprobes
1745 show_kprobe_prof (struct seq_file *pi, void *v)
1746 {
1747         struct hlist_head *head;
1748         struct hlist_node *node;
1749         struct kprobe *p;       //, *kp;
1750         const char *sym = NULL;
1751         unsigned int i = *(loff_t *) v;
1752         unsigned long size, offset = 0;
1753         char *modname, namebuf[128];
1754         static struct timeval utv, ktv;
1755         static unsigned long ucount, kcount;
1756
1757         head = &kprobe_table[i];
1758         preempt_disable ();
1759         hlist_for_each_entry_rcu (p, node, head, hlist)
1760         {
1761                 sym = kallsyms_lookup ((unsigned long) p->addr, &size, &offset, &modname, namebuf);
1762                 /*if (p->pre_handler == aggr_pre_handler) {
1763                    list_for_each_entry_rcu(kp, &p->list, list)
1764                    report_probe_prof(pi, kp, sym, offset, modname);
1765                    } else */
1766                 report_probe_prof (pi, p, sym, offset, modname);
1767                 if (p->count)
1768                 {
1769                         if (p->pid)
1770                         {
1771                                 set_normalized_timeval (&utv, utv.tv_sec + p->hnd_tm_sum.tv_sec, utv.tv_usec + p->hnd_tm_sum.tv_usec);
1772                                 ucount += p->count;
1773                         }
1774                         else
1775                         {
1776                                 //seq_printf(pi, "kernel probe handling %lu %lu.%06ld\n", 
1777                                 //              p->count, p->hnd_tm_sum.tv_sec, p->hnd_tm_sum.tv_usec); 
1778                                 //seq_printf(pi, "kernel probe handling2 %lu %lu.%06ld\n", 
1779                                 //              kcount, ktv.tv_sec, ktv.tv_usec);       
1780                                 set_normalized_timeval (&ktv, ktv.tv_sec + p->hnd_tm_sum.tv_sec, ktv.tv_usec + p->hnd_tm_sum.tv_usec);
1781                                 kcount += p->count;
1782                                 //seq_printf(pi, "kernel probe handling3 %lu %lu.%06ld\n", 
1783                                 //              kcount, ktv.tv_sec, ktv.tv_usec);       
1784                         }
1785                 }
1786         }
1787         if (i == (KPROBE_TABLE_SIZE - 1))
1788         {
1789                 seq_printf (pi, "Average kernel probe handling %lu.%06ld\n", kcount ? ktv.tv_sec / kcount : 0, kcount ? ktv.tv_usec / kcount : 0);
1790                 seq_printf (pi, "Average user probe handling %lu.%06ld\n", ucount ? utv.tv_sec / ucount : 0, ucount ? utv.tv_usec / ucount : 0);
1791                 seq_printf (pi, "Average probe period %lu.%06ld\n", nCount ? probe_enter_diff_sum.tv_sec / nCount : 0, nCount ? probe_enter_diff_sum.tv_usec / nCount : 0);
1792                 utv.tv_sec = utv.tv_usec = ktv.tv_sec = ktv.tv_usec = 0;
1793                 ucount = kcount = 0;
1794         }
1795         preempt_enable ();
1796         return 0;
1797 }
1798
1799 static struct seq_operations kprobes_prof_seq_ops = {
1800         .start = kprobe_prof_seq_start,
1801         .next = kprobe_prof_seq_next,
1802         .stop = kprobe_prof_seq_stop,
1803         .show = show_kprobe_prof
1804 };
1805
1806 static int __kprobes
1807 kprobes_prof_open (struct inode *inode, struct file *filp)
1808 {
1809         return seq_open (filp, &kprobes_prof_seq_ops);
1810 }
1811
1812 static struct file_operations debugfs_kprobes_prof_operations = {
1813         .open = kprobes_prof_open,
1814         .read = seq_read,
1815         .llseek = seq_lseek,
1816         .release = seq_release,
1817 };
1818 #endif
1819
1820 int __kprobes debugfs_kprobe_init (void);
1821 static struct dentry *dbg_dir, *dbg_file;
1822 #ifdef KPROBES_PROFILE
1823 static struct dentry *dbg_file_prof;
1824 #endif
1825
1826 int __kprobes
1827 debugfs_kprobe_init (void)
1828 {
1829         //struct dentry *dir, *file;
1830
1831         dbg_dir = debugfs_create_dir ("kprobes", NULL);
1832         if (!dbg_dir)
1833                 return -ENOMEM;
1834
1835         dbg_file = debugfs_create_file ("list", 0444, dbg_dir, 0, &debugfs_kprobes_operations);
1836         if (!dbg_file)
1837         {
1838                 debugfs_remove (dbg_dir);
1839                 dbg_dir = NULL;
1840                 return -ENOMEM;
1841         }
1842
1843 #ifdef KPROBES_PROFILE
1844         dbg_file_prof = debugfs_create_file ("prof", 0444, dbg_dir, 0, &debugfs_kprobes_prof_operations);
1845         if (!dbg_file_prof)
1846         {
1847                 debugfs_remove (dbg_file);
1848                 debugfs_remove (dbg_dir);
1849                 dbg_dir = NULL;
1850                 return -ENOMEM;
1851         }
1852 #endif
1853         return 0;
1854 }
1855
1856 //late_initcall(debugfs_kprobe_init);
1857 extern unsigned long (*kallsyms_search) (const char *name);
1858 #endif /* CONFIG_DEBUG_FS */
1859
1860 #if defined(CONFIG_X86)
1861 static struct notifier_block kprobe_exceptions_nb = {
1862         .notifier_call = kprobe_exceptions_notify,
1863         .priority = INT_MAX
1864 };
1865 #endif
1866
1867 static int __init
1868 init_kprobes (void)
1869 {
1870         int i, err = 0;
1871
1872         /* FIXME allocate the probe table, currently defined statically */
1873         /* initialize all list heads */
1874         for (i = 0; i < KPROBE_TABLE_SIZE; i++)
1875         {
1876                 INIT_HLIST_HEAD (&kprobe_table[i]);
1877                 INIT_HLIST_HEAD (&kretprobe_inst_table[i]);
1878                 INIT_HLIST_HEAD (&uprobe_insn_slot_table[i]);
1879         }
1880         atomic_set (&kprobe_count, 0);
1881
1882         err = arch_init_kprobes ();
1883
1884         DBPRINTF ("init_kprobes: arch_init_kprobes - %d", err);
1885 #if defined(CONFIG_X86)
1886         if (!err)
1887                 err = register_die_notifier (&kprobe_exceptions_nb);
1888         DBPRINTF ("init_kprobes: register_die_notifier - %d", err);
1889 #endif // CONFIG_X86
1890
1891 #ifdef CONFIG_DEBUG_FS
1892         if (!err)
1893         {
1894                 __real_kallsyms_lookup = (void *) kallsyms_search ("kallsyms_lookup");
1895                 if (!__real_kallsyms_lookup)
1896                 {
1897                         DBPRINTF ("kallsyms_lookup is not found! Oops. Where is the kernel?");
1898                         return -ESRCH;
1899                 }
1900                 err = debugfs_kprobe_init ();
1901                 DBPRINTF ("init_kprobes: debugfs_kprobe_init - %d", err);
1902         }
1903 #endif /* CONFIG_DEBUG_FS */
1904
1905         return err;
1906 }
1907
1908 static void __exit
1909 exit_kprobes (void)
1910 {
1911 #ifdef CONFIG_DEBUG_FS
1912 #ifdef KPROBES_PROFILE
1913         if (dbg_file_prof)
1914                 debugfs_remove (dbg_file_prof);
1915 #endif
1916         if (dbg_file)
1917                 debugfs_remove (dbg_file);
1918         if (dbg_dir)
1919                 debugfs_remove (dbg_dir);
1920 #endif /* CONFIG_DEBUG_FS */
1921
1922 #if defined(CONFIG_X86)
1923         unregister_die_notifier (&kprobe_exceptions_nb);
1924 #endif // CONFIG_X86
1925         arch_exit_kprobes ();
1926 }
1927
1928 module_init (init_kprobes);
1929 module_exit (exit_kprobes);
1930
1931 EXPORT_SYMBOL_GPL (register_kprobe);
1932 EXPORT_SYMBOL_GPL (unregister_kprobe);
1933 EXPORT_SYMBOL_GPL (register_jprobe);
1934 EXPORT_SYMBOL_GPL (unregister_jprobe);
1935 EXPORT_SYMBOL_GPL (register_ujprobe);
1936 EXPORT_SYMBOL_GPL (unregister_ujprobe);
1937 EXPORT_SYMBOL_GPL (jprobe_return);
1938 EXPORT_SYMBOL_GPL (uprobe_return);
1939 EXPORT_SYMBOL_GPL (register_kretprobe);
1940 EXPORT_SYMBOL_GPL (unregister_kretprobe);
1941 EXPORT_SYMBOL_GPL (register_uretprobe);
1942 EXPORT_SYMBOL_GPL (unregister_uretprobe);
1943 EXPORT_SYMBOL_GPL (unregister_all_uprobes);
1944 EXPORT_SYMBOL_GPL (access_process_vm_atomic);
1945 #if LINUX_VERSION_CODE != KERNEL_VERSION(2,6,23)
1946 EXPORT_SYMBOL_GPL (access_process_vm);
1947 #endif
1948 #ifdef KERNEL_HAS_ISPAGEPRESENT
1949 EXPORT_SYMBOL_GPL (is_page_present);
1950 #else
1951 EXPORT_SYMBOL_GPL (page_present);
1952 #endif
1953