*/
#include <linux/module.h>
+#include <linux/sched.h>
+
+#include <asm/pgtable.h>
#include "dbi_kprobes_deps.h"
#include "dbi_kdebug.h"
return 0;
}
+#define GUP_FLAGS_WRITE 0x1
+#define GUP_FLAGS_FORCE 0x2
+#define GUP_FLAGS_IGNORE_VMA_PERMISSIONS 0x4
+#define GUP_FLAGS_IGNORE_SIGKILL 0x8
-static inline
-int use_zero_page(struct vm_area_struct *vma)
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)
+static inline int use_zero_page(struct vm_area_struct *vma)
{
/*
* We don't want to optimize FOLL_ANON for make_pages_present()
}
int __get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, int len, int flags,
+ unsigned long start, int len, int flags,
struct page **pages, struct vm_area_struct **vmas)
{
int i;
}
if (!vma ||
- (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
- (!ignore && !(vm_flags & vma->vm_flags)))
+ (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
+ (!ignore && !(vm_flags & vma->vm_flags)))
return i ? : -EFAULT;
if (is_vm_hugetlb_page(vma)) {
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
+ i = follow_hugetlb_page(mm, vma, pages, vmas,
+ &start, &len, i);
+#else
i = follow_hugetlb_page(mm, vma, pages, vmas,
- &start, &len, i, write);
+ &start, &len, i, write);
+#endif
continue;
}
if (pages)
foll_flags |= FOLL_GET;
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,30)
if (!write && use_zero_page(vma))
- foll_flags |= FOLL_ANON;
+ foll_flags |= FOLL_ANON;
+#endif
#endif
do {
struct page *page;
+#if 0
/*
* If we have a pending SIGKILL, don't keep faulting
* pages and potentially allocating memory, unless
* we're only unlocking already resident/mapped pages.
*/
if (unlikely(!ignore_sigkill &&
- fatal_signal_pending(current)))
+ fatal_signal_pending(current)))
return i ? i : -ERESTARTSYS;
+#endif
if (write)
foll_flags |= FOLL_WRITE;
-
+
//cond_resched();
DBPRINTF ("pages = %p vma = %p\n", pages, vma);
int ret;
ret = handle_mm_fault(mm, vma, start,
foll_flags & FOLL_WRITE);
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
+ if (ret & VM_FAULT_WRITE)
+ foll_flags &= ~FOLL_WRITE;
+
+ switch (ret & ~VM_FAULT_WRITE) {
+ case VM_FAULT_MINOR:
+ tsk->min_flt++;
+ break;
+ case VM_FAULT_MAJOR:
+ tsk->maj_flt++;
+ break;
+ case VM_FAULT_SIGBUS:
+ return i ? i : -EFAULT;
+ case VM_FAULT_OOM:
+ return i ? i : -ENOMEM;
+ default:
+ BUG();
+ }
+
+#else
if (ret & VM_FAULT_ERROR) {
- if (ret & VM_FAULT_OOM)
- return i ? i : -ENOMEM;
- else if (ret & VM_FAULT_SIGBUS)
- return i ? i : -EFAULT;
- BUG();
+ if (ret & VM_FAULT_OOM)
+ return i ? i : -ENOMEM;
+ else if (ret & VM_FAULT_SIGBUS)
+ return i ? i : -EFAULT;
+ BUG();
}
if (ret & VM_FAULT_MAJOR)
- tsk->maj_flt++;
+ tsk->maj_flt++;
else
- tsk->min_flt++;
-
+ tsk->min_flt++;
+
/*
* The VM_FAULT_WRITE bit tells us that
* do_wp_page has broken COW when necessary,
* page might get reCOWed by userspace write).
*/
if ((ret & VM_FAULT_WRITE) &&
- !(vma->vm_flags & VM_WRITE))
- foll_flags &= ~FOLL_WRITE;
-
+ !(vma->vm_flags & VM_WRITE))
+ foll_flags &= ~FOLL_WRITE;
+
//cond_resched();
+#endif
+
}
+
if (IS_ERR(page))
return i ? i : PTR_ERR(page);
if (pages) {
pages[i] = page;
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
+ flush_anon_page(page, start);
+#else
flush_anon_page(vma, page, start);
+#endif
flush_dcache_page(page);
}
if (vmas)
} while (len);
return i;
}
+#endif
+
int get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int len, int write, int force,
flags |= GUP_FLAGS_FORCE;
return __get_user_pages_uprobe(tsk, mm,
- start, len, flags,
- pages, vmas);
+ start, len, flags,
+ pages, vmas);
#else
- return get_user_pages(tsk, mm,
- start, len, write, force,
- pages, vmas);
+ return get_user_pages(tsk, mm, start, len, write, force, pages, vmas);
#endif
}
-int access_process_vm_atomic (struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
-{
-
-
- struct mm_struct *mm;
- struct vm_area_struct *vma;
- void *old_buf = buf;
-
- mm = get_task_mm(tsk);
- if (!mm)
- return 0;
-
- down_read(&mm->mmap_sem);
- /* ignore errors, just check how much was successfully transferred */
- while (len) {
- int bytes, ret, offset;
- void *maddr;
- struct page *page = NULL;
-
- ret = get_user_pages_uprobe(tsk, mm, addr, 1,
- write, 1, &page, &vma);
- if (ret <= 0) {
- /*
- * Check if this is a VM_IO | VM_PFNMAP VMA, which
- * we can access using slightly different code.
- */
-#ifdef CONFIG_HAVE_IOREMAP_PROT
- vma = find_vma(mm, addr);
- if (!vma)
- break;
- if (vma->vm_ops && vma->vm_ops->access)
- ret = vma->vm_ops->access(vma, addr, buf,
- len, write);
- if (ret <= 0)
-#endif
- break;
- bytes = ret;
- } else {
- bytes = len;
- offset = addr & (PAGE_SIZE-1);
- if (bytes > PAGE_SIZE-offset)
- bytes = PAGE_SIZE-offset;
-
- maddr = kmap(page);
- if (write) {
- copy_to_user_page(vma, page, addr,
- maddr + offset, buf, bytes);
- set_page_dirty_lock(page);
- } else {
- copy_from_user_page(vma, page, addr,
- buf, maddr + offset, bytes);
- }
- kunmap(page);
- page_cache_release(page);
- }
- len -= bytes;
- buf += bytes;
- addr += bytes;
- }
- up_read(&mm->mmap_sem);
- mmput(mm);
-
- return buf - old_buf;
-
-}
-
-int page_present (struct mm_struct *mm, unsigned long addr)
+int page_present (struct mm_struct *mm, unsigned long address)
{
pgd_t *pgd;
- pmd_t *pmd;
- pte_t *pte;
- int ret = 0;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
- pud_t *pud;
-#endif
-
- //printk("page_present\n");
- //BUG_ON(down_read_trylock(&mm->mmap_sem) == 0);
- down_read (&mm->mmap_sem);
- spin_lock (&(mm->page_table_lock));
- pgd = pgd_offset (mm, addr);
- //printk("pgd %p\n", pgd);
- if ((pgd != NULL) && pgd_present (*pgd))
- {
- //printk("pgd_present\n");
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
- pud = pud_offset (pgd, addr);
- //printk("pud %p\n", pud);
- if ((pud != NULL) && pud_present (*pud))
- {
- pmd = pmd_offset (pud, addr);
-#else
- {
- pmd = pmd_offset (pgd, addr);
-#endif
- //printk("pmd %p\n", pmd);
- if ((pmd != NULL) && pmd_present (*pmd))
- {
- //spinlock_t *ptl;
- //printk("pmd_present\n");
- pte = pte_offset_map (pmd, addr);
- //pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
- //printk("pte %p/%lx\n", pte, addr);
- if ((pte != NULL) && pte_present (*pte))
- {
- ret = 1;
- //printk("pte_present\n");
- }
- pte_unmap (pte);
- //pte_unmap_unlock(pte, ptl);
- }
- }
- }
- spin_unlock (&(mm->page_table_lock));
- up_read (&mm->mmap_sem);
- //printk("page_present %d\n", ret);
- return ret;
- }
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ptep, pte;
+ unsigned long pfn;
+
+ pgd = pgd_offset(mm, address);
+ if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
+ goto out;
+
+ pud = pud_offset(pgd, address);
+ if (pud_none(*pud) || unlikely(pud_bad(*pud)))
+ goto out;
+
+ pmd = pmd_offset(pud, address);
+ if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
+ goto out;
+
+ ptep = pte_offset_map(pmd, address);
+ if (!ptep)
+ goto out;
+
+ pte = *ptep;
+ pte_unmap(ptep);
+ if (pte_present(pte)) {
+ pfn = pte_pfn(pte);
+ if (pfn_valid(pfn)) {
+ return 1;
+ }
+ }
+
+out:
+ return 0;
+}
-EXPORT_SYMBOL_GPL (access_process_vm_atomic);
EXPORT_SYMBOL_GPL (page_present);
EXPORT_SYMBOL_GPL (get_user_pages_uprobe);