mm: nommu: find vma using the sorted vma list
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / nommu.c
index c4c542c..e5318f8 100644 (file)
@@ -680,9 +680,9 @@ static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
  */
 static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
 {
-       struct vm_area_struct *pvma, **pp, *next;
+       struct vm_area_struct *pvma, *prev;
        struct address_space *mapping;
-       struct rb_node **p, *parent;
+       struct rb_node **p, *parent, *rb_prev;
 
        kenter(",%p", vma);
 
@@ -703,7 +703,7 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
        }
 
        /* add the VMA to the tree */
-       parent = NULL;
+       parent = rb_prev = NULL;
        p = &mm->mm_rb.rb_node;
        while (*p) {
                parent = *p;
@@ -713,17 +713,20 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
                 * (the latter is necessary as we may get identical VMAs) */
                if (vma->vm_start < pvma->vm_start)
                        p = &(*p)->rb_left;
-               else if (vma->vm_start > pvma->vm_start)
+               else if (vma->vm_start > pvma->vm_start) {
+                       rb_prev = parent;
                        p = &(*p)->rb_right;
-               else if (vma->vm_end < pvma->vm_end)
+               else if (vma->vm_end < pvma->vm_end)
                        p = &(*p)->rb_left;
-               else if (vma->vm_end > pvma->vm_end)
+               else if (vma->vm_end > pvma->vm_end) {
+                       rb_prev = parent;
                        p = &(*p)->rb_right;
-               else if (vma < pvma)
+               else if (vma < pvma)
                        p = &(*p)->rb_left;
-               else if (vma > pvma)
+               else if (vma > pvma) {
+                       rb_prev = parent;
                        p = &(*p)->rb_right;
-               else
+               else
                        BUG();
        }
 
@@ -731,20 +734,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
        rb_insert_color(&vma->vm_rb, &mm->mm_rb);
 
        /* add VMA to the VMA list also */
-       for (pp = &mm->mmap; (pvma = *pp); pp = &(*pp)->vm_next) {
-               if (pvma->vm_start > vma->vm_start)
-                       break;
-               if (pvma->vm_start < vma->vm_start)
-                       continue;
-               if (pvma->vm_end < vma->vm_end)
-                       break;
-       }
+       prev = NULL;
+       if (rb_prev)
+               prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
 
-       next = *pp;
-       *pp = vma;
-       vma->vm_next = next;
-       if (next)
-               next->vm_prev = vma;
+       __vma_link_list(mm, vma, prev, parent);
 }
 
 /*
@@ -752,7 +746,6 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
  */
 static void delete_vma_from_mm(struct vm_area_struct *vma)
 {
-       struct vm_area_struct **pp;
        struct address_space *mapping;
        struct mm_struct *mm = vma->vm_mm;
 
@@ -775,12 +768,14 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
 
        /* remove from the MM's tree and list */
        rb_erase(&vma->vm_rb, &mm->mm_rb);
-       for (pp = &mm->mmap; *pp; pp = &(*pp)->vm_next) {
-               if (*pp == vma) {
-                       *pp = vma->vm_next;
-                       break;
-               }
-       }
+
+       if (vma->vm_prev)
+               vma->vm_prev->vm_next = vma->vm_next;
+       else
+               mm->mmap = vma->vm_next;
+
+       if (vma->vm_next)
+               vma->vm_next->vm_prev = vma->vm_prev;
 
        vma->vm_mm = NULL;
 }
@@ -809,17 +804,15 @@ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 {
        struct vm_area_struct *vma;
-       struct rb_node *n = mm->mm_rb.rb_node;
 
        /* check the cache first */
        vma = mm->mmap_cache;
        if (vma && vma->vm_start <= addr && vma->vm_end > addr)
                return vma;
 
-       /* trawl the tree (there may be multiple mappings in which addr
+       /* trawl the list (there may be multiple mappings in which addr
         * resides) */
-       for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
-               vma = rb_entry(n, struct vm_area_struct, vm_rb);
+       for (vma = mm->mmap; vma; vma = vma->vm_next) {
                if (vma->vm_start > addr)
                        return NULL;
                if (vma->vm_end > addr) {
@@ -859,7 +852,6 @@ static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
                                             unsigned long len)
 {
        struct vm_area_struct *vma;
-       struct rb_node *n = mm->mm_rb.rb_node;
        unsigned long end = addr + len;
 
        /* check the cache first */
@@ -867,10 +859,9 @@ static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
        if (vma && vma->vm_start == addr && vma->vm_end == end)
                return vma;
 
-       /* trawl the tree (there may be multiple mappings in which addr
+       /* trawl the list (there may be multiple mappings in which addr
         * resides) */
-       for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
-               vma = rb_entry(n, struct vm_area_struct, vm_rb);
+       for (vma = mm->mmap; vma; vma = vma->vm_next) {
                if (vma->vm_start < addr)
                        continue;
                if (vma->vm_start > addr)
@@ -1235,7 +1226,7 @@ error_free:
 enomem:
        printk("Allocation of length %lu from process %d (%s) failed\n",
               len, current->pid, current->comm);
-       show_free_areas();
+       show_free_areas(0);
        return -ENOMEM;
 }
 
@@ -1468,14 +1459,14 @@ error_getting_vma:
        printk(KERN_WARNING "Allocation of vma for %lu byte allocation"
               " from process %d failed\n",
               len, current->pid);
-       show_free_areas();
+       show_free_areas(0);
        return -ENOMEM;
 
 error_getting_region:
        printk(KERN_WARNING "Allocation of vm region for %lu byte allocation"
               " from process %d failed\n",
               len, current->pid);
-       show_free_areas();
+       show_free_areas(0);
        return -ENOMEM;
 }
 EXPORT_SYMBOL(do_mmap_pgoff);