mm/mmap: introduce unlock_range() for code cleanup
authorLiam Howlett <liam.howlett@oracle.com>
Tue, 29 Jun 2021 02:38:41 +0000 (19:38 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 29 Jun 2021 17:53:51 +0000 (10:53 -0700)
Both __do_munmap() and exit_mmap() unlock a range of VMAs using almost
identical code blocks.  Replace both blocks by a static inline function.

[akpm@linux-foundation.org: tweak code layout]

Link: https://lkml.kernel.org/r/20210510211021.2797427-1-Liam.Howlett@Oracle.com
Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Davidlohr Bueso <dbueso@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/mmap.c

index bb128a4..d72716f 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2802,6 +2802,22 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
        return __split_vma(mm, vma, addr, new_below);
 }
 
+static inline void
+unlock_range(struct vm_area_struct *start, unsigned long limit)
+{
+       struct mm_struct *mm = start->vm_mm;
+       struct vm_area_struct *tmp = start;
+
+       while (tmp && tmp->vm_start < limit) {
+               if (tmp->vm_flags & VM_LOCKED) {
+                       mm->locked_vm -= vma_pages(tmp);
+                       munlock_vma_pages_all(tmp);
+               }
+
+               tmp = tmp->vm_next;
+       }
+}
+
 /* Munmap is split into 2 main parts -- this part which finds
  * what needs doing, and the areas themselves, which do the
  * work.  This now handles partial unmappings.
@@ -2885,17 +2901,8 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
        /*
         * unlock any mlock()ed ranges before detaching vmas
         */
-       if (mm->locked_vm) {
-               struct vm_area_struct *tmp = vma;
-               while (tmp && tmp->vm_start < end) {
-                       if (tmp->vm_flags & VM_LOCKED) {
-                               mm->locked_vm -= vma_pages(tmp);
-                               munlock_vma_pages_all(tmp);
-                       }
-
-                       tmp = tmp->vm_next;
-               }
-       }
+       if (mm->locked_vm)
+               unlock_range(vma, end);
 
        /* Detach vmas from rbtree */
        if (!detach_vmas_to_be_unmapped(mm, vma, prev, end))
@@ -3180,14 +3187,8 @@ void exit_mmap(struct mm_struct *mm)
                mmap_write_unlock(mm);
        }
 
-       if (mm->locked_vm) {
-               vma = mm->mmap;
-               while (vma) {
-                       if (vma->vm_flags & VM_LOCKED)
-                               munlock_vma_pages_all(vma);
-                       vma = vma->vm_next;
-               }
-       }
+       if (mm->locked_vm)
+               unlock_range(mm->mmap, ULONG_MAX);
 
        arch_exit_mmap(mm);