Fix kernel dump issue for tegra (problem with native kernel access_process_vm internals).
authorEkaterina Gorelkina <ekaterina@ekaterina-desktop.(none)>
Wed, 15 Sep 2010 14:08:04 +0000 (18:08 +0400)
committerEkaterina Gorelkina <ekaterina@ekaterina-desktop.(none)>
Wed, 15 Sep 2010 14:08:04 +0000 (18:08 +0400)
kprobe/dbi_kprobes_deps.c
kprobe/dbi_kprobes_deps.h

index 7cfcf65..0f3d720 100644 (file)
@@ -392,6 +392,69 @@ int get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
 #endif
 }
 
+int access_process_vm_atomic(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
+{
+       struct mm_struct *mm;
+       struct vm_area_struct *vma;
+       void *old_buf = buf;
+
+       mm = get_task_mm(tsk);
+       if (!mm)
+               return 0;
+
+       down_read(&mm->mmap_sem);
+       /* ignore errors, just check how much was successfully transferred */
+       while (len) {
+               int bytes, ret, offset;
+               void *maddr;
+               struct page *page = NULL;
+
+               ret = get_user_pages_uprobe(tsk, mm, addr, 1,
+                               write, 1, &page, &vma);
+               if (ret <= 0) {
+                       /*
+                        * Check if this is a VM_IO | VM_PFNMAP VMA, which
+                        * we can access using slightly different code.
+                        */
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+                       vma = find_vma(mm, addr);
+                       if (!vma)
+                               break;
+                       if (vma->vm_ops && vma->vm_ops->access)
+                               ret = vma->vm_ops->access(vma, addr, buf,
+                                                         len, write);
+                       if (ret <= 0)
+#endif
+                               break;
+                       bytes = ret;
+               } else {
+                       bytes = len;
+                       offset = addr & (PAGE_SIZE-1);
+                       if (bytes > PAGE_SIZE-offset)
+                               bytes = PAGE_SIZE-offset;
+
+                       maddr = kmap(page);
+                       if (write) {
+                               copy_to_user_page(vma, page, addr,
+                                                 maddr + offset, buf, bytes);
+                               set_page_dirty_lock(page);
+                       } else {
+                               copy_from_user_page(vma, page, addr,
+                                                   buf, maddr + offset, bytes);
+                       }
+                       kunmap(page);
+                       page_cache_release(page);
+               }
+               len -= bytes;
+               buf += bytes;
+               addr += bytes;
+       }
+       up_read(&mm->mmap_sem);
+       mmput(mm);
+
+       return buf - old_buf;
+}
+
 int page_present (struct mm_struct *mm, unsigned long address)
 {
        pgd_t *pgd;
@@ -432,4 +495,5 @@ out:
 
 EXPORT_SYMBOL_GPL (page_present);
 EXPORT_SYMBOL_GPL (get_user_pages_uprobe);
+EXPORT_SYMBOL_GPL (access_process_vm_atomic);
 
index efea5db..38560d6 100644 (file)
@@ -84,8 +84,8 @@ int get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
                unsigned long start, int len, int write, int force,
                struct page **pages, struct vm_area_struct **vmas);
 
-#define read_proc_vm_atomic(tsk, addr, buf, len)       access_process_vm (tsk, addr, buf, len, 0)
-#define write_proc_vm_atomic(tsk, addr, buf, len)      access_process_vm (tsk, addr, buf, len, 1)
+#define read_proc_vm_atomic(tsk, addr, buf, len)       access_process_vm_atomic (tsk, addr, buf, len, 0)
+#define write_proc_vm_atomic(tsk, addr, buf, len)      access_process_vm_atomic (tsk, addr, buf, len, 1)
 int page_present (struct mm_struct *mm, unsigned long addr);
 
 #define get_user_pages_atomic  get_user_pages_uprobe