Merge branch 'dev' into kernel
authorVyacheslav Cherkashin <v.cherkashin@samsung.com>
Fri, 12 Apr 2013 07:49:41 +0000 (11:49 +0400)
committerVyacheslav Cherkashin <v.cherkashin@samsung.com>
Fri, 12 Apr 2013 08:01:02 +0000 (12:01 +0400)
Conflicts:
src/modules/driver/probes_manager.c
src/modules/kprobe/arch/asm-arm/dbi_kprobes.c
src/modules/kprobe/dbi_insn_slots.c
src/modules/kprobe/dbi_kprobes.c
src/modules/kprobe/dbi_uprobes.c

driver/probes_manager.c
driver/sspt/sspt_debug.h
driver/sspt/sspt_file.c
driver/storage.c
driver/us_proc_inst.c
kprobe/arch/asm-arm/dbi_kprobes.c
kprobe/dbi_insn_slots.c
kprobe/dbi_kprobes.c
kprobe/dbi_kprobes_deps.c
kprobe/dbi_kprobes_deps.h
uprobe/swap_uprobes.c

index 41c2492..1ab4db4 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <linux/percpu.h>
 #include <ksyms.h>
+#include <dbi_kprobes_deps.h>
 #include "module.h"
 #include "probes_manager.h"
 
@@ -84,7 +85,7 @@ attach_selected_probes (void)
        kernel_probe_t *p;
        struct hlist_node *node;
 
-       hlist_for_each_entry_rcu (p, node, &kernel_probes, hlist)
+       swap_hlist_for_each_entry_rcu (p, node, &kernel_probes, hlist)
        {
                partial_result = register_kernel_probe (p);
                if (partial_result)
@@ -104,9 +105,9 @@ detach_selected_probes (void)
        kernel_probe_t *p;
        struct hlist_node *node;
 
-       hlist_for_each_entry_rcu (p, node, &kernel_probes, hlist)
+       swap_hlist_for_each_entry_rcu (p, node, &kernel_probes, hlist)
                unregister_kernel_probe (p);
-       hlist_for_each_entry_rcu (p, node, &otg_kernel_probes, hlist) {
+       swap_hlist_for_each_entry_rcu (p, node, &otg_kernel_probes, hlist) {
                unregister_kernel_probe(p);
        }
 
@@ -136,12 +137,12 @@ int reset_probes(void)
        struct hlist_node *node, *tnode;
        kernel_probe_t *p;
 
-       hlist_for_each_entry_safe (p, node, tnode, &kernel_probes, hlist) {
+       swap_hlist_for_each_entry_safe (p, node, tnode, &kernel_probes, hlist) {
                hlist_del(node);
                kfree(p);
        }
 
-       hlist_for_each_entry_safe (p, node, tnode, &otg_kernel_probes, hlist) {
+       swap_hlist_for_each_entry_safe (p, node, tnode, &otg_kernel_probes, hlist) {
                hlist_del(node);
                kfree(p);
        }
index 1238e08..2b6c982 100644 (file)
@@ -77,7 +77,7 @@ static inline void print_file_probes(const struct sspt_file *file)
 
        for (i = 0; i < table_size; ++i) {
                head = &file->page_probes_table[i];
-               hlist_for_each_entry_rcu(page, node, head, hlist) {
+               swap_hlist_for_each_entry_rcu(page, node, head, hlist) {
                        print_page_probes(page);
                }
        }
index 5e408ab..c8f72d9 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/slab.h>
 #include <linux/list.h>
 #include <linux/hash.h>
+#include <dbi_kprobes_deps.h>
 
 static int calculation_hash_bits(int cnt)
 {
@@ -72,7 +73,7 @@ void sspt_file_free(struct sspt_file *file)
 
        for (i = 0; i < table_size; ++i) {
                head = &file->page_probes_table[i];
-               hlist_for_each_entry_safe(page, p, n, head, hlist) {
+               swap_hlist_for_each_entry_safe(page, p, n, head, hlist) {
                        hlist_del(&page->hlist);
                        sspt_page_free(page);
                }
@@ -125,7 +126,7 @@ struct sspt_file *sspt_file_copy(const struct sspt_file *file)
                // copy pages
                for (i = 0; i < table_size; ++i) {
                        head = &file->page_probes_table[i];
-                       hlist_for_each_entry(page, node, head, hlist) {
+                       swap_hlist_for_each_entry(page, node, head, hlist) {
                                sspt_add_page(file_out, sspt_page_copy(page));
                        }
                }
@@ -141,7 +142,7 @@ static struct sspt_page *sspt_find_page(struct sspt_file *file, unsigned long of
        struct sspt_page *page;
 
        head = &file->page_probes_table[hash_ptr((void *)offset, file->page_probes_hash_bits)];
-       hlist_for_each_entry(page, node, head, hlist) {
+       swap_hlist_for_each_entry(page, node, head, hlist) {
                if (page->offset == offset) {
                        return page;
                }
index 50adf63..b3f9e9e 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/kernel.h>
 #include <linux/time.h>
 #include <ksyms.h>
+#include <dbi_kprobes_deps.h>
 #include "module.h"
 #include "storage.h"
 #include "handlers_core.h"
@@ -193,7 +194,7 @@ int dbi_unregister_handlers_module(struct dbi_modules_handlers_info *dbi_mhi)
        list_del_rcu(&dbi_mhi->dbi_list_head);
        // Next code block is for far future possible usage in case when removing will be implemented for unsafe state
        // (i.e. between attach and stop)
-       /*hlist_for_each_entry_rcu (p, node, &kernel_probes, hlist) {
+       /*swap_hlist_for_each_entry_rcu (p, node, &kernel_probes, hlist) {
                // XXX: absent code for pre_handlers because we suppose that they are not used
                if ((p->jprobe.entry != ((kprobe_pre_entry_handler_t )def_jprobe_event_pre_handler)) ||
                                (p->retprobe.handler != ((kretprobe_handler_t )def_retprobe_event_handler))) {
@@ -1253,7 +1254,7 @@ kernel_probe_t* find_probe (unsigned long addr)
        struct hlist_node *node;
 
        //check if such probe does exist
-       hlist_for_each_entry_rcu (p, node, &kernel_probes, hlist)
+       swap_hlist_for_each_entry_rcu (p, node, &kernel_probes, hlist)
                if (p->addr == addr)
                        break;
 
index 0969457..75b5e0d 100644 (file)
@@ -647,7 +647,7 @@ static int register_us_page_probe(struct sspt_page *page,
                struct task_struct *task)
 {
        int err = 0;
-       struct us_ip *ip;
+       struct us_ip *ip, *n;
 
        spin_lock(&page->lock);
 
@@ -661,20 +661,21 @@ static int register_us_page_probe(struct sspt_page *page,
        sspt_page_assert_install(page);
        sspt_set_all_ip_addr(page, file);
 
-       list_for_each_entry(ip, &page->ip_list, list) {
+       list_for_each_entry_safe(ip, n, &page->ip_list, list) {
                err = register_usprobe_my(task, ip);
-               if (err != 0) {
-                       //TODO: ERROR
-                       goto unlock;
+               if (err == -ENOEXEC) {
+                       list_del(&ip->list);
+                       free_ip(ip);
+                       continue;
+               } else if (err) {
+                       EPRINTF("Failed to install probe");
                }
        }
-
-       sspt_page_installed(page);
-
 unlock:
+       sspt_page_installed(page);
        spin_unlock(&page->lock);
 
-       return err;
+       return 0;
 }
 
 static int unregister_us_page_probe(struct task_struct *task,
@@ -743,7 +744,7 @@ static void install_file_probes(struct task_struct *task, struct mm_struct *mm,
 
        for (i = 0; i < table_size; ++i) {
                head = &file->page_probes_table[i];
-               hlist_for_each_entry_rcu(page, node, head, hlist) {
+               swap_hlist_for_each_entry_rcu(page, node, head, hlist) {
                        register_us_page_probe(page, file, task);
                }
        }
@@ -785,7 +786,7 @@ static int check_install_pages_in_file(struct task_struct *task, struct sspt_fil
 
        for (i = 0; i < table_size; ++i) {
                head = &file->page_probes_table[i];
-               hlist_for_each_entry_safe (page, node, tmp, head, hlist) {
+               swap_hlist_for_each_entry_safe (page, node, tmp, head, hlist) {
                        if (page->install) {
                                return 1;
                        }
@@ -805,7 +806,7 @@ static int unregister_us_file_probes(struct task_struct *task, struct sspt_file
 
        for (i = 0; i < table_size; ++i) {
                head = &file->page_probes_table[i];
-               hlist_for_each_entry_safe (page, node, tmp, head, hlist) {
+               swap_hlist_for_each_entry_safe (page, node, tmp, head, hlist) {
                        err = unregister_us_page_probe(task, page, flag);
                        if (err != 0) {
                                // TODO: ERROR
index eb994ad..22c982e 100644 (file)
@@ -438,7 +438,7 @@ int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
         *       real return address, and all the rest will point to
         *       kretprobe_trampoline
         */
-       hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+       swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
                if (ri->task != current)
                        /* another task is sharing our hash bucket */
                        continue;
index ce521d9..7a02e4b 100644 (file)
@@ -50,6 +50,7 @@
 #include <linux/rculist.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
+#include <dbi_kprobes_deps.h>
 
 struct chunk {
        unsigned long *data;
@@ -169,7 +170,7 @@ void *alloc_insn_slot(struct slot_manager *sm)
        struct fixed_alloc *fa;
        struct hlist_node *pos;
 
-       hlist_for_each_entry_rcu(fa, pos, &sm->page_list, hlist) {
+       swap_hlist_for_each_entry_rcu(fa, pos, &sm->page_list, hlist) {
                free_slot = chunk_allocate(&fa->chunk, sm->slot_size);
                if (free_slot)
                        return free_slot;
@@ -191,7 +192,7 @@ void free_insn_slot(struct slot_manager *sm, void *slot)
        struct fixed_alloc *fa;
        struct hlist_node *pos;
 
-       hlist_for_each_entry_rcu(fa, pos, &sm->page_list, hlist) {
+       swap_hlist_for_each_entry_rcu(fa, pos, &sm->page_list, hlist) {
                if (!chunk_check_ptr(&fa->chunk, slot, PAGE_SIZE))
                        continue;
 
index c737575..6edfe3c 100644 (file)
@@ -165,7 +165,7 @@ struct kprobe *get_kprobe(void *addr)
        struct kprobe *p;
 
        head = &kprobe_table[hash_ptr (addr, KPROBE_HASH_BITS)];
-       hlist_for_each_entry_rcu(p, node, head, hlist) {
+       swap_hlist_for_each_entry_rcu(p, node, head, hlist) {
                if (p->addr == addr) {
                        return p;
                }
@@ -261,12 +261,12 @@ struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp)
        struct hlist_node *node;
        struct kretprobe_instance *ri;
 
-       hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
+       swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
                return ri;
        }
 
        if (!alloc_nodes_kretprobe(rp)) {
-               hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
+               swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
                        return ri;
                }
        }
@@ -281,7 +281,7 @@ struct kretprobe_instance *get_free_rp_inst_no_alloc(struct kretprobe *rp)
        struct hlist_node *node;
        struct kretprobe_instance *ri;
 
-       hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
+       swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
                return ri;
        }
 
@@ -294,7 +294,7 @@ struct kretprobe_instance *get_used_rp_inst(struct kretprobe *rp)
        struct hlist_node *node;
        struct kretprobe_instance *ri;
 
-       hlist_for_each_entry(ri, node, &rp->used_instances, uflist) {
+       swap_hlist_for_each_entry(ri, node, &rp->used_instances, uflist) {
                return ri;
        }
 
index d45ae35..18f8cad 100644 (file)
@@ -75,8 +75,11 @@ static inline void dbi_kunmap_atomic(void *kvaddr)
 }
 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36) */
 
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
+DECLARE_MOD_FUNC_DEP(do_mmap_pgoff, unsigned long, struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff, unsigned long *populate);
+DECLARE_MOD_DEP_WRAPPER(do_mmap_pgoff, unsigned long, struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff, unsigned long *populate)
+IMP_MOD_DEP_WRAPPER(do_mmap_pgoff, file, addr, len, prot, flags, pgoff, populate)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
 DECLARE_MOD_FUNC_DEP(do_mmap_pgoff, unsigned long, struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff);
 DECLARE_MOD_DEP_WRAPPER(do_mmap_pgoff, unsigned long, struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff)
 IMP_MOD_DEP_WRAPPER(do_mmap_pgoff, file, addr, len, prot, flags, pgoff)
@@ -129,10 +132,28 @@ static DECLARE_MOD_FUNC_DEP(in_gate_area_no_mm, int, unsigned long addr);
 static DECLARE_MOD_FUNC_DEP(in_gate_area_no_task, int, unsigned long addr);
 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38) */
 
-
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+static DECLARE_MOD_FUNC_DEP(follow_page_mask, \
+               struct page *, struct vm_area_struct * vma, \
+               unsigned long address, unsigned int foll_flags, \
+               unsigned int *page_mask);
+DECLARE_MOD_DEP_WRAPPER(follow_page_mask, struct page *, \
+                               struct vm_area_struct * vma, \
+                               unsigned long address, \
+                               unsigned int foll_flags, \
+                               unsigned int *page_mask)
+IMP_MOD_DEP_WRAPPER (follow_page_mask, vma, address, foll_flags, page_mask)
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
 static DECLARE_MOD_FUNC_DEP(follow_page, \
                struct page *, struct vm_area_struct * vma, \
                unsigned long address, unsigned int foll_flags);
+DECLARE_MOD_DEP_WRAPPER(follow_page, struct page *, \
+                               struct vm_area_struct * vma, \
+                               unsigned long address, \
+                               unsigned int foll_flags)
+IMP_MOD_DEP_WRAPPER (follow_page, vma, address, foll_flags)
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
+
 static DECLARE_MOD_FUNC_DEP(__flush_anon_page, \
                void, struct vm_area_struct *vma, struct page *page, \
                unsigned long vmaddr);
@@ -217,13 +238,6 @@ static inline int dbi_in_gate_area_no_xxx(unsigned long addr)
 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38) */
 }
 
-
-#if (LINUX_VERSION_CODE != KERNEL_VERSION(2, 6, 11))
-DECLARE_MOD_DEP_WRAPPER (follow_page, \
-                       struct page *, struct vm_area_struct * vma, \
-                       unsigned long address, unsigned int foll_flags)
-IMP_MOD_DEP_WRAPPER (follow_page, vma, address, foll_flags)
-#endif
 DECLARE_MOD_DEP_WRAPPER (__flush_anon_page, \
                        void, struct vm_area_struct *vma, \
                        struct page *page, unsigned long vmaddr)
@@ -243,6 +257,7 @@ IMP_MOD_DEP_WRAPPER(flush_ptrace_access, vma, page, uaddr, kaddr, len, write)
 
 int init_module_dependencies(void)
 {
+
 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)
        INIT_MOD_DEP_VAR(handle_mm_fault, handle_mm_fault);
 #endif
@@ -263,7 +278,13 @@ int init_module_dependencies(void)
        INIT_MOD_DEP_VAR(in_gate_area, in_gate_area);
 #endif
 
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38))
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+       INIT_MOD_DEP_VAR(follow_page_mask, follow_page_mask);
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
+       INIT_MOD_DEP_VAR(follow_page, follow_page);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38)
 
 #ifndef is_zero_pfn
        swap_zero_pfn = page_to_pfn(ZERO_PAGE(0));
@@ -274,8 +295,6 @@ int init_module_dependencies(void)
        INIT_MOD_DEP_VAR(in_gate_area_no_task, in_gate_area_no_task);
 #endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38))  */
 
-       INIT_MOD_DEP_VAR(follow_page, follow_page);
-
        INIT_MOD_DEP_VAR(__flush_anon_page, __flush_anon_page);
        INIT_MOD_DEP_VAR(vm_normal_page, vm_normal_page);
        INIT_MOD_DEP_VAR(access_process_vm, access_process_vm);
@@ -289,10 +308,9 @@ int init_module_dependencies(void)
 #else /*2.6.16 */
        INIT_MOD_DEP_VAR(put_task_struct, __put_task_struct_cb);
 #endif
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
        INIT_MOD_DEP_VAR(do_mmap_pgoff, do_mmap_pgoff);
-#endif
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) */
 
        return 0;
 }
@@ -322,7 +340,6 @@ static inline int use_zero_page(struct vm_area_struct *vma)
        return !vma->vm_ops || !vma->vm_ops->fault;
 }
 
-
 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38)
 
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
@@ -362,6 +379,223 @@ static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long add
                        stack_guard_page_end(vma, addr+PAGE_SIZE);
 }
 
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+
+static inline struct page *follow_page_uprobe(struct vm_area_struct *vma,
+        unsigned long address, unsigned int foll_flags)
+{
+    unsigned int unused_page_mask;
+    return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
+}
+
+long __get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
+               unsigned long start, unsigned long nr_pages,
+               unsigned int gup_flags, struct page **pages,
+               struct vm_area_struct **vmas, int *nonblocking)
+{
+       long i;
+       unsigned long vm_flags;
+       unsigned int page_mask;
+
+       if (!nr_pages)
+               return 0;
+
+       VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
+
+       /*
+        * Require read or write permissions.
+        * If FOLL_FORCE is set, we only require the "MAY" flags.
+        */
+       vm_flags  = (gup_flags & FOLL_WRITE) ?
+                       (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
+       vm_flags &= (gup_flags & FOLL_FORCE) ?
+                       (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
+
+       /*
+        * If FOLL_FORCE and FOLL_NUMA are both set, handle_mm_fault
+        * would be called on PROT_NONE ranges. We must never invoke
+        * handle_mm_fault on PROT_NONE ranges or the NUMA hinting
+        * page faults would unprotect the PROT_NONE ranges if
+        * _PAGE_NUMA and _PAGE_PROTNONE are sharing the same pte/pmd
+        * bitflag. So to avoid that, don't set FOLL_NUMA if
+        * FOLL_FORCE is set.
+        */
+       if (!(gup_flags & FOLL_FORCE))
+               gup_flags |= FOLL_NUMA;
+
+       i = 0;
+
+       do {
+               struct vm_area_struct *vma;
+
+               vma = find_extend_vma(mm, start);
+               if (!vma && dbi_in_gate_area(tsk, start)) {
+                       unsigned long pg = start & PAGE_MASK;
+                       pgd_t *pgd;
+                       pud_t *pud;
+                       pmd_t *pmd;
+                       pte_t *pte;
+
+                       /* user gate pages are read-only */
+                       if (gup_flags & FOLL_WRITE)
+                               return i ? : -EFAULT;
+                       if (pg > TASK_SIZE)
+                               pgd = pgd_offset_k(pg);
+                       else
+                               pgd = pgd_offset_gate(mm, pg);
+                       BUG_ON(pgd_none(*pgd));
+                       pud = pud_offset(pgd, pg);
+                       BUG_ON(pud_none(*pud));
+                       pmd = pmd_offset(pud, pg);
+                       if (pmd_none(*pmd))
+                               return i ? : -EFAULT;
+                       VM_BUG_ON(pmd_trans_huge(*pmd));
+                       pte = pte_offset_map(pmd, pg);
+                       if (pte_none(*pte)) {
+                               pte_unmap(pte);
+                               return i ? : -EFAULT;
+                       }
+                       vma = get_gate_vma(mm);
+                       if (pages) {
+                               struct page *page;
+
+                               page = vm_normal_page(vma, start, *pte);
+                               if (!page) {
+                                       if (!(gup_flags & FOLL_DUMP) &&
+                                            swap_is_zero_pfn(pte_pfn(*pte)))
+                                               page = pte_page(*pte);
+                                       else {
+                                               pte_unmap(pte);
+                                               return i ? : -EFAULT;
+                                       }
+                               }
+                               pages[i] = page;
+                               get_page(page);
+                       }
+                       pte_unmap(pte);
+                       page_mask = 0;
+                       goto next_page;
+               }
+
+               if (!vma ||
+                   (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
+                   !(vm_flags & vma->vm_flags))
+                       return i ? : -EFAULT;
+
+               if (is_vm_hugetlb_page(vma)) {
+                       i = follow_hugetlb_page(mm, vma, pages, vmas,
+                                       &start, &nr_pages, i, gup_flags);
+                       continue;
+               }
+
+               do {
+                       struct page *page;
+                       unsigned int foll_flags = gup_flags;
+                       unsigned int page_increm;
+
+                       /*
+                        * If we have a pending SIGKILL, don't keep faulting
+                        * pages and potentially allocating memory.
+                        */
+                       if (unlikely(fatal_signal_pending(current)))
+                               return i ? i : -ERESTARTSYS;
+
+                       /* cond_resched(); */
+                       while (!(page = follow_page_mask(vma, start,
+                                               foll_flags, &page_mask))) {
+                               int ret;
+                               unsigned int fault_flags = 0;
+
+                               /* For mlock, just skip the stack guard page. */
+                               if (foll_flags & FOLL_MLOCK) {
+                                       if (stack_guard_page(vma, start))
+                                               goto next_page;
+                               }
+                               if (foll_flags & FOLL_WRITE)
+                                       fault_flags |= FAULT_FLAG_WRITE;
+                               if (nonblocking)
+                                       fault_flags |= FAULT_FLAG_ALLOW_RETRY;
+                               if (foll_flags & FOLL_NOWAIT)
+                                       fault_flags |= (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT);
+
+                               ret = handle_mm_fault(mm, vma, start,
+                                                       fault_flags);
+
+                               if (ret & VM_FAULT_ERROR) {
+                                       if (ret & VM_FAULT_OOM)
+                                               return i ? i : -ENOMEM;
+                                       if (ret & (VM_FAULT_HWPOISON |
+                                                  VM_FAULT_HWPOISON_LARGE)) {
+                                               if (i)
+                                                       return i;
+                                               else if (gup_flags & FOLL_HWPOISON)
+                                                       return -EHWPOISON;
+                                               else
+                                                       return -EFAULT;
+                                       }
+                                       if (ret & VM_FAULT_SIGBUS)
+                                               return i ? i : -EFAULT;
+                                       BUG();
+                               }
+
+                               if (tsk) {
+                                       if (ret & VM_FAULT_MAJOR)
+                                               tsk->maj_flt++;
+                                       else
+                                               tsk->min_flt++;
+                               }
+
+                               if (ret & VM_FAULT_RETRY) {
+                                       if (nonblocking)
+                                               *nonblocking = 0;
+                                       return i;
+                               }
+
+                               /*
+                                * The VM_FAULT_WRITE bit tells us that
+                                * do_wp_page has broken COW when necessary,
+                                * even if maybe_mkwrite decided not to set
+                                * pte_write. We can thus safely do subsequent
+                                * page lookups as if they were reads. But only
+                                * do so when looping for pte_write is futile:
+                                * in some cases userspace may also be wanting
+                                * to write to the gotten user page, which a
+                                * read fault here might prevent (a readonly
+                                * page might get reCOWed by userspace write).
+                                */
+                               if ((ret & VM_FAULT_WRITE) &&
+                                   !(vma->vm_flags & VM_WRITE))
+                                       foll_flags &= ~FOLL_WRITE;
+
+                               /* cond_resched(); */
+                       }
+                       if (IS_ERR(page))
+                               return i ? i : PTR_ERR(page);
+                       if (pages) {
+                               pages[i] = page;
+
+                               flush_anon_page(vma, page, start);
+                               flush_dcache_page(page);
+                               page_mask = 0;
+                       }
+next_page:
+                       if (vmas) {
+                               vmas[i] = vma;
+                               page_mask = 0;
+                       }
+                       page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
+                       if (page_increm > nr_pages)
+                               page_increm = nr_pages;
+                       i += page_increm;
+                       start += page_increm * PAGE_SIZE;
+                       nr_pages -= page_increm;
+               } while (nr_pages && start < vma->vm_end);
+       } while (nr_pages);
+       return i;
+}
+
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
+
 static int __get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
                        unsigned long start, int nr_pages, unsigned int gup_flags,
                        struct page **pages, struct vm_area_struct **vmas,
@@ -556,6 +790,9 @@ next_page:
 
        return i;
 }
+
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
+
 #else /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38) */
 
 static int __get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
index c7a7161..bd80e8d 100644 (file)
 #include <linux/pagemap.h>
 #include "../ksyms/ksyms.h"
 
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
+#define swap_hlist_for_each_entry_rcu(tpos, pos, head, member) hlist_for_each_entry_rcu(tpos, head, member)
+#define swap_hlist_for_each_entry_safe(tpos, pos, n, head, member) hlist_for_each_entry_safe(tpos, n, head, member)
+#define swap_hlist_for_each_entry(tpos, pos, head, member) hlist_for_each_entry(tpos, head, member)
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
+#define swap_hlist_for_each_entry_rcu(tpos, pos, head, member) hlist_for_each_entry_rcu(tpos, pos, head, member)
+#define swap_hlist_for_each_entry_safe(tpos, pos, n, head, member) hlist_for_each_entry_safe(tpos, pos, n, head, member)
+#define swap_hlist_for_each_entry(tpos, pos, head, member) hlist_for_each_entry(tpos, pos, head, member)
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
 
 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12))
 #define synchronize_sched      synchronize_kernel
index 0ef88ce..c4ab6f7 100644 (file)
@@ -61,7 +61,7 @@ void print_kprobe_hash_table(void)
        // print uprobe table
        for (i = 0; i < KPROBE_TABLE_SIZE; ++i) {
                head = &kprobe_table[i];
-               hlist_for_each_entry_rcu (p, node, head, is_hlist_arm) {
+               swap_hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
                        printk("####### find K tgid=%u, addr=%x\n",
                                        p->tgid, p->addr);
                }
@@ -78,7 +78,7 @@ void print_kretprobe_hash_table(void)
        // print uprobe table
        for (i = 0; i < KPROBE_TABLE_SIZE; ++i) {
                head = &kretprobe_inst_table[i];
-               hlist_for_each_entry_rcu (p, node, head, is_hlist_arm) {
+               swap_hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
                        printk("####### find KR tgid=%u, addr=%x\n",
                                        p->tgid, p->addr);
                }
@@ -95,7 +95,7 @@ void print_uprobe_hash_table(void)
        // print uprobe table
        for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
                head = &uprobe_insn_slot_table[i];
-               hlist_for_each_entry_rcu (p, node, head, is_hlist_arm) {
+               swap_hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
                        printk("####### find U tgid=%u, addr=%x\n",
                                        p->tgid, p->addr);
                }
@@ -290,7 +290,7 @@ struct kprobe *get_ukprobe(void *addr, pid_t tgid)
        struct kprobe *p;
 
        head = &uprobe_table[hash_ptr(addr, UPROBE_HASH_BITS)];
-       hlist_for_each_entry_rcu(p, node, head, hlist) {
+       swap_hlist_for_each_entry_rcu(p, node, head, hlist) {
                if (p->addr == addr && kp2up(p)->task->tgid == tgid) {
                        return p;
                }
@@ -321,7 +321,7 @@ static struct kprobe *get_ukprobe_bis_arm(void *addr, pid_t tgid)
 
        /* TODO: test - two processes invokes instrumented function */
        head = &uprobe_insn_slot_table[hash_ptr(addr, UPROBE_HASH_BITS)];
-       hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
+       swap_hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
                if (p->ainsn.insn == addr && kp2up(p)->task->tgid == tgid) {
                        return p;
                }
@@ -338,7 +338,7 @@ static struct kprobe *get_ukprobe_bis_thumb(void *addr, pid_t tgid)
 
        /* TODO: test - two processes invokes instrumented function */
        head = &uprobe_insn_slot_table[hash_ptr(addr, UPROBE_HASH_BITS)];
-       hlist_for_each_entry_rcu(p, node, head, is_hlist_thumb) {
+       swap_hlist_for_each_entry_rcu(p, node, head, is_hlist_thumb) {
                if (p->ainsn.insn == addr && kp2up(p)->task->tgid == tgid) {
                        return p;
                }
@@ -362,7 +362,7 @@ struct kprobe *get_ukprobe_by_insn_slot(void *addr, pid_t tgid, struct pt_regs *
 
        /* TODO: test - two processes invokes instrumented function */
        head = &uprobe_insn_slot_table[hash_ptr(addr, UPROBE_HASH_BITS)];
-       hlist_for_each_entry_rcu(p, node, head, is_hlist) {
+       swap_hlist_for_each_entry_rcu(p, node, head, is_hlist) {
                if (p->ainsn.insn == addr && kp2up(p)->task->tgid == tgid) {
                        return p;
                }
@@ -428,7 +428,7 @@ static struct uretprobe_instance *get_used_urp_inst(struct uretprobe *rp)
        struct hlist_node *node;
        struct uretprobe_instance *ri;
 
-       hlist_for_each_entry(ri, node, &rp->used_instances, uflist) {
+       swap_hlist_for_each_entry(ri, node, &rp->used_instances, uflist) {
                return ri;
        }
 
@@ -441,7 +441,7 @@ struct uretprobe_instance *get_free_urp_inst_no_alloc(struct uretprobe *rp)
        struct hlist_node *node;
        struct uretprobe_instance *ri;
 
-       hlist_for_each_entry (ri, node, &rp->free_instances, uflist) {
+       swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
                return ri;
        }
 
@@ -492,12 +492,12 @@ static struct uretprobe_instance *get_free_urp_inst(struct uretprobe *rp)
        struct hlist_node *node;
        struct uretprobe_instance *ri;
 
-       hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
+       swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
                return ri;
        }
 
        if (!alloc_nodes_uretprobe(rp)) {
-               hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
+               swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
                        return ri;
                }
        }
@@ -697,7 +697,7 @@ int trampoline_uprobe_handler(struct kprobe *p, struct pt_regs *regs)
         *       real return address, and all the rest will point to
         *       uretprobe_trampoline
         */
-       hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+       swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
                if (ri->task != current) {
                        /* another task is sharing our hash bucket */
                        continue;
@@ -878,7 +878,7 @@ int dbi_disarm_urp_inst_for_task(struct task_struct *parent, struct task_struct
        struct hlist_node *node, *tmp;
        struct hlist_head *head = uretprobe_inst_table_head(parent->mm);
 
-       hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+       swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
                if (parent == ri->task) {
                        dbi_disarm_urp_inst(ri, task);
                }
@@ -940,7 +940,7 @@ void dbi_unregister_all_uprobes(struct task_struct *task, int atomic)
 
        for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
                head = &uprobe_table[i];
-               hlist_for_each_entry_safe(p, node, tnode, head, hlist) {
+               swap_hlist_for_each_entry_safe(p, node, tnode, head, hlist) {
                        if (kp2up(p)->task->tgid == task->tgid) {
                                struct uprobe *up = container_of(p, struct uprobe, kp);
                                printk("dbi_unregister_all_uprobes: delete uprobe at %p[%lx] for %s/%d\n",