#include "dbi_kdebug.h"
+#include <linux/slab.h>
+
unsigned int *sched_addr;
unsigned int *fork_addr;
#define GUP_FLAGS_WRITE 0x1
-#define GUP_FLAGS_WRITE 0x1
#define GUP_FLAGS_FORCE 0x2
#define GUP_FLAGS_IGNORE_VMA_PERMISSIONS 0x4
#define GUP_FLAGS_IGNORE_SIGKILL 0x8
-DECLARE_MOD_CB_DEP(kallsyms_search, unsigned long, const char *name);
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 29)
+struct mm_struct* init_mm_ptr;
+struct mm_struct init_mm;
+#endif
+
+DECLARE_MOD_CB_DEP(kallsyms_search, unsigned long, const char *name);
DECLARE_MOD_FUNC_DEP(access_process_vm, int, struct task_struct * tsk, unsigned long addr, void *buf, int len, int write);
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 32)
+DECLARE_MOD_FUNC_DEP(copy_to_user_page, void, struct vm_area_struct *vma, struct page *page, unsigned long uaddr, void *dst, const void *src, unsigned long len);
+#endif
DECLARE_MOD_FUNC_DEP(find_extend_vma, struct vm_area_struct *, struct mm_struct * mm, unsigned long addr);
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 30)
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)
DECLARE_MOD_FUNC_DEP(handle_mm_fault, int, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int write_access);
+#endif
#else
DECLARE_MOD_FUNC_DEP(handle_mm_fault, int, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags);
#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 30) */
DECLARE_MOD_FUNC_DEP(get_gate_vma, struct vm_area_struct *, struct task_struct *tsk);
+
+#ifdef CONFIG_HUGETLB_PAGE
+DECLARE_MOD_FUNC_DEP(follow_hugetlb_page, int, struct mm_struct *mm, struct vm_area_struct *vma, struct page **pages, struct vm_area_struct **vmas, unsigned long *position, int *length, int i, int write);
+#endif
+
+#ifdef __HAVE_ARCH_GATE_AREA
+DECLARE_MOD_FUNC_DEP(in_gate_area, int, struct task_struct *tsk,unsigned long addr);
+#else
DECLARE_MOD_FUNC_DEP(in_gate_area_no_task, int, unsigned long addr);
+#endif
DECLARE_MOD_FUNC_DEP(follow_page, \
struct page *, struct vm_area_struct * vma, \
unsigned long address, unsigned int foll_flags);
IMP_MOD_DEP_WRAPPER (find_extend_vma, mm, addr)
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 30)
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)
DECLARE_MOD_DEP_WRAPPER (handle_mm_fault, \
int, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int write_access)
IMP_MOD_DEP_WRAPPER (handle_mm_fault, mm, vma, address, write_access)
+#endif
#else
DECLARE_MOD_DEP_WRAPPER (handle_mm_fault, \
int, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags)
struct vm_area_struct *, struct task_struct *tsk)
IMP_MOD_DEP_WRAPPER (get_gate_vma, tsk)
+#ifdef CONFIG_HUGETLB_PAGE
+ DECLARE_MOD_DEP_WRAPPER (follow_hugetlb_page, int, struct mm_struct *mm, struct vm_area_struct *vma, struct page **pages, struct vm_area_struct **vmas, unsigned long *position, int *length, int i, int write)
+ IMP_MOD_DEP_WRAPPER (follow_hugetlb_page, mm, vma, pages, vmas, position, length, i, write)
+#endif
+
+#ifdef __HAVE_ARCH_GATE_AREA
+ DECLARE_MOD_DEP_WRAPPER (in_gate_area, int, struct task_struct *tsk, unsigned long addr)
+ IMP_MOD_DEP_WRAPPER (in_gate_area, tsk, addr)
+#else
DECLARE_MOD_DEP_WRAPPER (in_gate_area_no_task, int, unsigned long addr)
IMP_MOD_DEP_WRAPPER (in_gate_area_no_task, addr)
+#endif
+#if (LINUX_VERSION_CODE != KERNEL_VERSION(2, 6, 11))
DECLARE_MOD_DEP_WRAPPER (follow_page, \
struct page *, struct vm_area_struct * vma, \
unsigned long address, unsigned int foll_flags)
IMP_MOD_DEP_WRAPPER (follow_page, vma, address, foll_flags)
-
+#endif
DECLARE_MOD_DEP_WRAPPER (__flush_anon_page, \
void, struct vm_area_struct *vma, \
struct page *page, unsigned long vmaddr)
unsigned long uaddr, void *kaddr, unsigned long len, int write)
IMP_MOD_DEP_WRAPPER (flush_ptrace_access, vma, page, uaddr, kaddr, len, write)
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 32)
+ DECLARE_MOD_DEP_WRAPPER(copy_to_user_page, void, struct vm_area_struct *vma, struct page *page, unsigned long uaddr, void *dst, const void *src, unsigned long len)
+IMP_MOD_DEP_WRAPPER (copy_to_user_page, vma, page, uaddr, dst, src, len)
+#endif
+
+
int init_module_dependencies()
{
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 29)
+ init_mm_ptr = (struct mm_struct*) kallsyms_search ("init_mm");
+ memcmp(init_mm_ptr, &init_mm, sizeof(struct mm_struct));
+#endif
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)
INIT_MOD_DEP_VAR(handle_mm_fault, handle_mm_fault);
+#endif
+
INIT_MOD_DEP_VAR(flush_ptrace_access, flush_ptrace_access);
INIT_MOD_DEP_VAR(find_extend_vma, find_extend_vma);
INIT_MOD_DEP_VAR(get_gate_vma, get_gate_vma);
+
+#ifdef CONFIG_HUGETLB_PAGE
+ INIT_MOD_DEP_VAR(follow_hugetlb_page, follow_hugetlb_page);
+#endif
+
+#ifdef __HAVE_ARCH_GATE_AREA
+ INIT_MOD_DEP_VAR(in_gate_area, in_gate_area);
+#else
INIT_MOD_DEP_VAR(in_gate_area_no_task, in_gate_area_no_task);
+#endif
INIT_MOD_DEP_VAR(follow_page, follow_page);
+
INIT_MOD_DEP_VAR(__flush_anon_page, __flush_anon_page);
INIT_MOD_DEP_VAR(vm_normal_page, vm_normal_page);
INIT_MOD_DEP_VAR(access_process_vm, access_process_vm);
INIT_MOD_DEP_VAR(put_task_struct, __put_task_struct_cb);
#endif
+
+#if (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 32))
+ INIT_MOD_DEP_VAR(copy_to_user_page, copy_to_user_page);
+#endif
+
return 0;
}
#endif
}
+
+int access_process_vm_atomic(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
+{
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+ void *old_buf = buf;
+ unsigned long addr1 = addr;
+ unsigned int* inst_buf = (unsigned int*)old_buf;
+
+
+ mm = get_task_mm(tsk);
+ if (!mm)
+ return 0;
+
+ down_read(&mm->mmap_sem);
+ /* ignore errors, just check how much was successfully transferred */
+ while (len) {
+ int bytes, ret, offset;
+ void *maddr;
+ struct page *page = NULL;
+
+ ret = get_user_pages_uprobe(tsk, mm, addr, 1,
+ write, 1, &page, &vma);
+
+ if (ret <= 0) {
+ /*
+ * Check if this is a VM_IO | VM_PFNMAP VMA, which
+ * we can access using slightly different code.
+ */
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+ vma = find_vma(mm, addr);
+ if (!vma)
+ break;
+ if (vma->vm_ops && vma->vm_ops->access)
+ ret = vma->vm_ops->access(vma, addr, buf,
+ len, write);
+ if (ret <= 0)
+#endif
+ break;
+ bytes = ret;
+ } else {
+ bytes = len;
+ offset = addr & (PAGE_SIZE-1);
+ if (bytes > PAGE_SIZE-offset)
+ bytes = PAGE_SIZE-offset;
+
+ maddr = kmap(page);
+ if (write) {
+ copy_to_user_page(vma, page, addr,
+ maddr + offset, buf, bytes);
+ set_page_dirty_lock(page);
+ } else {
+ copy_from_user_page(vma, page, addr,
+ buf, maddr + offset, bytes);
+ }
+ kunmap(page);
+ page_cache_release(page);
+ }
+ len -= bytes;
+ buf += bytes;
+ addr += bytes;
+ }
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+
+ return buf - old_buf;
+}
+
int page_present (struct mm_struct *mm, unsigned long address)
{
pgd_t *pgd;
EXPORT_SYMBOL_GPL (page_present);
EXPORT_SYMBOL_GPL (get_user_pages_uprobe);
+EXPORT_SYMBOL_GPL (access_process_vm_atomic);