mm/mmap: add inline munmap_vma_range() for code readability
authorLiam R. Howlett <Liam.Howlett@Oracle.com>
Sat, 17 Oct 2020 23:14:09 +0000 (16:14 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 18 Oct 2020 16:27:09 +0000 (09:27 -0700)
There are two locations that have a block of code for munmapping a vma
range.  Change those two locations to use a function and add meaningful
comments about what happens to the arguments, which was unclear in the
previous code.

Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/20200818154707.2515169-2-Liam.Howlett@Oracle.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/mmap.c

index b0b8e9e..d91ecb0 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -575,6 +575,33 @@ static inline struct vm_area_struct *vma_next(struct mm_struct *mm,
 
        return vma->vm_next;
 }
+
+/*
+ * munmap_vma_range() - munmap VMAs that overlap a range.
+ * @mm: The mm struct
+ * @start: The start of the range.
+ * @len: The length of the range.
+ * @pprev: pointer to the pointer that will be set to previous vm_area_struct
+ * @rb_link: the rb_node
+ * @rb_parent: the parent rb_node
+ *
+ * Find all the vm_area_struct that overlap from @start to
+ * @end and munmap them.  Set @pprev to the previous vm_area_struct.
+ *
+ * Returns: -ENOMEM on munmap failure or 0 on success.
+ */
+static inline int
+munmap_vma_range(struct mm_struct *mm, unsigned long start, unsigned long len,
+                struct vm_area_struct **pprev, struct rb_node ***link,
+                struct rb_node **parent, struct list_head *uf)
+{
+
+       while (find_vma_links(mm, start, start + len, pprev, link, parent))
+               if (do_munmap(mm, start, len, uf))
+                       return -ENOMEM;
+
+       return 0;
+}
 static unsigned long count_vma_pages_range(struct mm_struct *mm,
                unsigned long addr, unsigned long end)
 {
@@ -1721,13 +1748,9 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
                        return -ENOMEM;
        }
 
-       /* Clear old maps */
-       while (find_vma_links(mm, addr, addr + len, &prev, &rb_link,
-                             &rb_parent)) {
-               if (do_munmap(mm, addr, len, uf))
-                       return -ENOMEM;
-       }
-
+       /* Clear old maps, set up prev, rb_link, rb_parent, and uf */
+       if (munmap_vma_range(mm, addr, len, &prev, &rb_link, &rb_parent, uf))
+               return -ENOMEM;
        /*
         * Private writable mapping: check memory availability
         */
@@ -3063,14 +3086,9 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla
        if (error)
                return error;
 
-       /*
-        * Clear old maps.  this also does some error checking for us
-        */
-       while (find_vma_links(mm, addr, addr + len, &prev, &rb_link,
-                             &rb_parent)) {
-               if (do_munmap(mm, addr, len, uf))
-                       return -ENOMEM;
-       }
+       /* Clear old maps, set up prev, rb_link, rb_parent, and uf */
+       if (munmap_vma_range(mm, addr, len, &prev, &rb_link, &rb_parent, uf))
+               return -ENOMEM;
 
        /* Check against address space limits *after* clearing old maps... */
        if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))