Adapt new KProbe arch for Beagle and DTV.
authorEkaterina Gorelkina <ekaterina@ekaterina-desktop.(none)>
Wed, 15 Sep 2010 13:26:33 +0000 (17:26 +0400)
committerEkaterina Gorelkina <ekaterina@ekaterina-desktop.(none)>
Wed, 15 Sep 2010 13:26:33 +0000 (17:26 +0400)
kprobe/arch/asm-arm/dbi_kprobes.c
kprobe/dbi_kprobes.c
kprobe/dbi_kprobes_deps.c
kprobe/dbi_kprobes_deps.h
kprobe/dbi_uprobes.c

index db40933..e6efbf0 100644 (file)
@@ -26,6 +26,8 @@
 
  */
 
+#include<linux/module.h>
+
 #include "dbi_kprobes.h"
 #include "../dbi_kprobes.h"
 
index 6775318..752729a 100644 (file)
 
  */
 
+#include "dbi_kprobes.h"
+#include "arch/dbi_kprobes.h"
+#include "arch/asm/dbi_kprobes.h"
+
+#include "dbi_kdebug.h"
+#include "dbi_kprobes_deps.h"
+#include "dbi_insn_slots.h"
+#include "dbi_uprobes.h"
+
+
 #include <linux/version.h>
 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
 #include <linux/config.h>
 
 #include <linux/hash.h>
 #include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
 
 
-#include "dbi_kprobes.h"
-#include "arch/dbi_kprobes.h"
-#include "arch/asm/dbi_kprobes.h"
-
-#include "dbi_kdebug.h"
-#include "dbi_kprobes_deps.h"
-#include "dbi_insn_slots.h"
-#include "dbi_uprobes.h"
 
 extern unsigned int *sched_addr;
 extern unsigned int *fork_addr;
index 3d97cd1..7cfcf65 100644 (file)
@@ -26,6 +26,9 @@
  */
 
 #include <linux/module.h>
+#include <linux/sched.h>
+
+#include <asm/pgtable.h>
 
 #include "dbi_kprobes_deps.h"
 #include "dbi_kdebug.h"
@@ -146,9 +149,13 @@ int init_module_dependencies()
        return 0;
 }
 
+#define GUP_FLAGS_WRITE                  0x1
+#define GUP_FLAGS_FORCE                  0x2
+#define GUP_FLAGS_IGNORE_VMA_PERMISSIONS 0x4
+#define GUP_FLAGS_IGNORE_SIGKILL         0x8
 
-static inline 
-int use_zero_page(struct vm_area_struct *vma)
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)
+static inline int use_zero_page(struct vm_area_struct *vma)
 {
        /*
         * We don't want to optimize FOLL_ANON for make_pages_present()
@@ -166,7 +173,7 @@ int use_zero_page(struct vm_area_struct *vma)
 }
 
 int __get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
-               unsigned long start, int len, int flags,
+                    unsigned long start, int len, int flags,
                struct page **pages, struct vm_area_struct **vmas)
 {
        int i;
@@ -234,13 +241,18 @@ int __get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
                }
 
                if (!vma ||
-                               (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
-                               (!ignore && !(vm_flags & vma->vm_flags)))
+                   (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
+                   (!ignore && !(vm_flags & vma->vm_flags)))
                        return i ? : -EFAULT;
 
                if (is_vm_hugetlb_page(vma)) {
+#if  LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
+                       i = follow_hugetlb_page(mm, vma, pages, vmas,
+                                               &start, &len, i);
+#else
                        i = follow_hugetlb_page(mm, vma, pages, vmas,
-                                       &start, &len, i, write);
+                                               &start, &len, i, write);
+#endif
                        continue;
                }
 
@@ -248,14 +260,17 @@ int __get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
                if (pages)
                        foll_flags |= FOLL_GET;
 
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)
 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,30)
                if (!write && use_zero_page(vma))
-                       foll_flags |= FOLL_ANON;
+                 foll_flags |= FOLL_ANON;
+#endif
 #endif
 
                do {
                        struct page *page;
 
+#if 0
                        /*
                         * If we have a pending SIGKILL, don't keep faulting
                         * pages and potentially allocating memory, unless
@@ -264,13 +279,14 @@ int __get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
                         * we're only unlocking already resident/mapped pages.
                         */
                        if (unlikely(!ignore_sigkill &&
-                                               fatal_signal_pending(current)))
+                                       fatal_signal_pending(current)))
                                return i ? i : -ERESTARTSYS;
+#endif
 
                        if (write)
                                foll_flags |= FOLL_WRITE;
 
-
+                       
                        //cond_resched();
 
                        DBPRINTF ("pages = %p vma = %p\n", pages, vma);
@@ -278,18 +294,39 @@ int __get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
                                int ret;
                                ret = handle_mm_fault(mm, vma, start,
                                                foll_flags & FOLL_WRITE);
+
+#if  LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
+                               if (ret & VM_FAULT_WRITE)
+                                 foll_flags &= ~FOLL_WRITE;
+                               
+                               switch (ret & ~VM_FAULT_WRITE) {
+                               case VM_FAULT_MINOR:
+                                 tsk->min_flt++;
+                                 break;
+                               case VM_FAULT_MAJOR:
+                                 tsk->maj_flt++;
+                                 break;
+                               case VM_FAULT_SIGBUS:
+                                 return i ? i : -EFAULT;
+                               case VM_FAULT_OOM:
+                                 return i ? i : -ENOMEM;
+                               default:
+                                 BUG();
+                               }
+                               
+#else
                                if (ret & VM_FAULT_ERROR) {
-                                       if (ret & VM_FAULT_OOM)
-                                               return i ? i : -ENOMEM;
-                                       else if (ret & VM_FAULT_SIGBUS)
-                                               return i ? i : -EFAULT;
-                                       BUG();
+                                 if (ret & VM_FAULT_OOM)
+                                   return i ? i : -ENOMEM;
+                                 else if (ret & VM_FAULT_SIGBUS)
+                                   return i ? i : -EFAULT;
+                                 BUG();
                                }
                                if (ret & VM_FAULT_MAJOR)
-                                       tsk->maj_flt++;
+                                 tsk->maj_flt++;
                                else
-                                       tsk->min_flt++;
-
+                                 tsk->min_flt++;
+                               
                                /*
                                 * The VM_FAULT_WRITE bit tells us that
                                 * do_wp_page has broken COW when necessary,
@@ -303,17 +340,24 @@ int __get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
                                 * page might get reCOWed by userspace write).
                                 */
                                if ((ret & VM_FAULT_WRITE) &&
-                                               !(vma->vm_flags & VM_WRITE))
-                                       foll_flags &= ~FOLL_WRITE;
-
+                                   !(vma->vm_flags & VM_WRITE))
+                                 foll_flags &= ~FOLL_WRITE;
+                               
                                //cond_resched();
+#endif
+                               
                        }
+
                        if (IS_ERR(page))
                                return i ? i : PTR_ERR(page);
                        if (pages) {
                                pages[i] = page;
 
+#if  LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
+                               flush_anon_page(page, start);
+#else
                                flush_anon_page(vma, page, start);
+#endif
                                flush_dcache_page(page);
                        }
                        if (vmas)
@@ -325,6 +369,8 @@ int __get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
        } while (len);
        return i;
 }
+#endif
+
 
 int get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
                unsigned long start, int len, int write, int force,
@@ -339,136 +385,51 @@ int get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
                flags |= GUP_FLAGS_FORCE;
 
        return __get_user_pages_uprobe(tsk, mm,
-                       start, len, flags,
-                       pages, vmas);
+                               start, len, flags,
+                               pages, vmas);
 #else
-       return get_user_pages(tsk, mm,
-                             start, len, write, force,
-                             pages, vmas);
+       return get_user_pages(tsk, mm, start, len, write, force, pages, vmas);
 #endif
 }
 
-int access_process_vm_atomic (struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
-{
-
-
-       struct mm_struct *mm;
-       struct vm_area_struct *vma;
-       void *old_buf = buf;
-
-       mm = get_task_mm(tsk);
-       if (!mm)
-               return 0;
-
-       down_read(&mm->mmap_sem);
-       /* ignore errors, just check how much was successfully transferred */
-       while (len) {
-               int bytes, ret, offset;
-               void *maddr;
-               struct page *page = NULL;
-
-               ret = get_user_pages_uprobe(tsk, mm, addr, 1,
-                               write, 1, &page, &vma);
-               if (ret <= 0) {
-                       /*
-                        * Check if this is a VM_IO | VM_PFNMAP VMA, which
-                        * we can access using slightly different code.
-                        */
-#ifdef CONFIG_HAVE_IOREMAP_PROT
-                       vma = find_vma(mm, addr);
-                       if (!vma)
-                               break;
-                       if (vma->vm_ops && vma->vm_ops->access)
-                               ret = vma->vm_ops->access(vma, addr, buf,
-                                               len, write);
-                       if (ret <= 0)
-#endif
-                               break;
-                       bytes = ret;
-               } else {
-                       bytes = len;
-                       offset = addr & (PAGE_SIZE-1);
-                       if (bytes > PAGE_SIZE-offset)
-                               bytes = PAGE_SIZE-offset;
-
-                       maddr = kmap(page);
-                       if (write) {
-                               copy_to_user_page(vma, page, addr,
-                                               maddr + offset, buf, bytes);
-                               set_page_dirty_lock(page);
-                       } else {
-                               copy_from_user_page(vma, page, addr,
-                                               buf, maddr + offset, bytes);
-                       }
-                       kunmap(page);
-                       page_cache_release(page);
-               }
-               len -= bytes;
-               buf += bytes;
-               addr += bytes;
-       }
-       up_read(&mm->mmap_sem);
-       mmput(mm);
-
-       return buf - old_buf;
-
-}
-
-int page_present (struct mm_struct *mm, unsigned long addr)
+int page_present (struct mm_struct *mm, unsigned long address)
 {
        pgd_t *pgd;
-       pmd_t *pmd;
-       pte_t *pte;
-       int ret = 0;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
-       pud_t *pud;
-#endif
-
-       //printk("page_present\n");
-       //BUG_ON(down_read_trylock(&mm->mmap_sem) == 0);
-       down_read (&mm->mmap_sem);
-       spin_lock (&(mm->page_table_lock));
-       pgd = pgd_offset (mm, addr);
-       //printk("pgd %p\n", pgd);
-       if ((pgd != NULL) && pgd_present (*pgd))
-       {
-               //printk("pgd_present\n");
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
-               pud = pud_offset (pgd, addr);
-               //printk("pud %p\n", pud);
-               if ((pud != NULL) && pud_present (*pud))
-               {
-                       pmd = pmd_offset (pud, addr);
-#else
-                       {
-                               pmd = pmd_offset (pgd, addr);
-#endif
-                               //printk("pmd %p\n", pmd);
-                               if ((pmd != NULL) && pmd_present (*pmd))
-                               {
-                                       //spinlock_t *ptl;
-                                       //printk("pmd_present\n");
-                                       pte = pte_offset_map (pmd, addr);
-                                       //pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
-                                       //printk("pte %p/%lx\n", pte, addr);
-                                       if ((pte != NULL) && pte_present (*pte))
-                                       {
-                                               ret = 1;
-                                               //printk("pte_present\n");
-                                       }
-                                       pte_unmap (pte);
-                                       //pte_unmap_unlock(pte, ptl);
-                               }
-                       }
-               }
-               spin_unlock (&(mm->page_table_lock));
-               up_read (&mm->mmap_sem);
-               //printk("page_present %d\n", ret);
-               return ret;
-       }
+        pud_t *pud;
+        pmd_t *pmd;
+        pte_t *ptep, pte;
+        unsigned long pfn;
+
+        pgd = pgd_offset(mm, address);
+        if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
+                goto out;
+
+        pud = pud_offset(pgd, address);
+        if (pud_none(*pud) || unlikely(pud_bad(*pud)))
+                goto out;
+
+        pmd = pmd_offset(pud, address);
+        if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
+                goto out;
+
+        ptep = pte_offset_map(pmd, address);
+        if (!ptep)
+                goto out;
+
+        pte = *ptep;
+        pte_unmap(ptep);
+        if (pte_present(pte)) {
+                pfn = pte_pfn(pte);
+                if (pfn_valid(pfn)) {
+                        return 1;
+                }
+        }
+
+out:
+        return 0;
+}
 
 
-EXPORT_SYMBOL_GPL (access_process_vm_atomic);
 EXPORT_SYMBOL_GPL (page_present);
 EXPORT_SYMBOL_GPL (get_user_pages_uprobe);
 
index 246fcc4..efea5db 100644 (file)
@@ -74,6 +74,9 @@
        } \
 }
 
+//struct mm_struct* init_mm_ptr;
+//struct mm_struct init_mm;
+
 int init_module_dependencies(void);
 
 int access_process_vm_atomic(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
@@ -81,8 +84,8 @@ int get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
                unsigned long start, int len, int write, int force,
                struct page **pages, struct vm_area_struct **vmas);
 
-#define read_proc_vm_atomic(tsk, addr, buf, len)       access_process_vm_atomic(tsk, addr, buf, len, 0)
-#define write_proc_vm_atomic(tsk, addr, buf, len)      access_process_vm_atomic(tsk, addr, buf, len, 1)
+#define read_proc_vm_atomic(tsk, addr, buf, len)       access_process_vm (tsk, addr, buf, len, 0)
+#define write_proc_vm_atomic(tsk, addr, buf, len)      access_process_vm (tsk, addr, buf, len, 1)
 int page_present (struct mm_struct *mm, unsigned long addr);
 
 #define get_user_pages_atomic  get_user_pages_uprobe
index aadd709..7c1bd8e 100644 (file)
 
  */
 
-#include <linux/hash.h>
-#include <linux/mempolicy.h>
-#include <linux/module.h>
 
 #include "dbi_uprobes.h"
 #include "dbi_insn_slots.h"
 #include "dbi_kdebug.h"
 
+#include <linux/hash.h>
+#include <linux/mempolicy.h>
+#include <linux/module.h>
+
+
 extern atomic_t kprobe_count;
 extern struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
 extern struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];