mm/mmap: regression fix for unmapped_area{_topdown}
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Fri, 14 Apr 2023 18:59:19 +0000 (14:59 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 26 Apr 2023 12:28:41 +0000 (14:28 +0200)
commit 58c5d0d6d522112577c7eeb71d382ea642ed7be4 upstream.

The maple tree limits the gap returned to a window that specifically fits
what was asked.  This may not be optimal in the case of switching search
directions or a gap that does not satisfy the requested space for other
reasons.  Fix the search by retrying the operation and limiting the search
window in the rare occasion that a conflict occurs.

Link: https://lkml.kernel.org/r/20230414185919.4175572-1-Liam.Howlett@oracle.com
Fixes: 3499a13168da ("mm/mmap: use maple tree for unmapped_area{_topdown}")
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Reported-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
mm/mmap.c

index fe1db60..14ca259 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1565,7 +1565,8 @@ static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
  */
 static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
 {
-       unsigned long length, gap;
+       unsigned long length, gap, low_limit;
+       struct vm_area_struct *tmp;
 
        MA_STATE(mas, &current->mm->mm_mt, 0, 0);
 
@@ -1574,12 +1575,29 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
        if (length < info->length)
                return -ENOMEM;
 
-       if (mas_empty_area(&mas, info->low_limit, info->high_limit - 1,
-                                 length))
+       low_limit = info->low_limit;
+retry:
+       if (mas_empty_area(&mas, low_limit, info->high_limit - 1, length))
                return -ENOMEM;
 
        gap = mas.index;
        gap += (info->align_offset - gap) & info->align_mask;
+       tmp = mas_next(&mas, ULONG_MAX);
+       if (tmp && (tmp->vm_flags & VM_GROWSDOWN)) { /* Avoid prev check if possible */
+               if (vm_start_gap(tmp) < gap + length - 1) {
+                       low_limit = tmp->vm_end;
+                       mas_reset(&mas);
+                       goto retry;
+               }
+       } else {
+               tmp = mas_prev(&mas, 0);
+               if (tmp && vm_end_gap(tmp) > gap) {
+                       low_limit = vm_end_gap(tmp);
+                       mas_reset(&mas);
+                       goto retry;
+               }
+       }
+
        return gap;
 }
 
@@ -1595,7 +1613,8 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
  */
 static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
 {
-       unsigned long length, gap;
+       unsigned long length, gap, high_limit, gap_end;
+       struct vm_area_struct *tmp;
 
        MA_STATE(mas, &current->mm->mm_mt, 0, 0);
        /* Adjust search length to account for worst case alignment overhead */
@@ -1603,12 +1622,31 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
        if (length < info->length)
                return -ENOMEM;
 
-       if (mas_empty_area_rev(&mas, info->low_limit, info->high_limit - 1,
+       high_limit = info->high_limit;
+retry:
+       if (mas_empty_area_rev(&mas, info->low_limit, high_limit - 1,
                                length))
                return -ENOMEM;
 
        gap = mas.last + 1 - info->length;
        gap -= (gap - info->align_offset) & info->align_mask;
+       gap_end = mas.last;
+       tmp = mas_next(&mas, ULONG_MAX);
+       if (tmp && (tmp->vm_flags & VM_GROWSDOWN)) { /* Avoid prev check if possible */
+               if (vm_start_gap(tmp) <= gap_end) {
+                       high_limit = vm_start_gap(tmp);
+                       mas_reset(&mas);
+                       goto retry;
+               }
+       } else {
+               tmp = mas_prev(&mas, 0);
+               if (tmp && vm_end_gap(tmp) > gap) {
+                       high_limit = tmp->vm_start;
+                       mas_reset(&mas);
+                       goto retry;
+               }
+       }
+
        return gap;
 }