Merge tag 'nfsd-5.15-2' of git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux
[platform/kernel/linux-starfive.git] / mm / mmap.c
index 0584e54..88dcc5c 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -148,8 +148,6 @@ void vma_set_page_prot(struct vm_area_struct *vma)
 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
                struct file *file, struct address_space *mapping)
 {
-       if (vma->vm_flags & VM_DENYWRITE)
-               allow_write_access(file);
        if (vma->vm_flags & VM_SHARED)
                mapping_unmap_writable(mapping);
 
@@ -534,6 +532,7 @@ static int find_vma_links(struct mm_struct *mm, unsigned long addr,
 {
        struct rb_node **__rb_link, *__rb_parent, *rb_prev;
 
+       mmap_assert_locked(mm);
        __rb_link = &mm->mm_rb.rb_node;
        rb_prev = __rb_parent = NULL;
 
@@ -666,8 +665,6 @@ static void __vma_link_file(struct vm_area_struct *vma)
        if (file) {
                struct address_space *mapping = file->f_mapping;
 
-               if (vma->vm_flags & VM_DENYWRITE)
-                       put_write_access(file_inode(file));
                if (vma->vm_flags & VM_SHARED)
                        mapping_allow_writable(mapping);
 
@@ -1352,9 +1349,8 @@ static inline unsigned long round_hint_to_min(unsigned long hint)
        return hint;
 }
 
-static inline int mlock_future_check(struct mm_struct *mm,
-                                    unsigned long flags,
-                                    unsigned long len)
+int mlock_future_check(struct mm_struct *mm, unsigned long flags,
+                      unsigned long len)
 {
        unsigned long locked, lock_limit;
 
@@ -1457,9 +1453,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
                return addr;
 
        if (flags & MAP_FIXED_NOREPLACE) {
-               struct vm_area_struct *vma = find_vma(mm, addr);
-
-               if (vma && vma->vm_start < addr + len)
+               if (find_vma_intersection(mm, addr, addr + len))
                        return -EEXIST;
        }
 
@@ -1520,12 +1514,6 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
                        if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
                                return -EACCES;
 
-                       /*
-                        * Make sure there are no mandatory locks on the file.
-                        */
-                       if (locks_verify_locked(file))
-                               return -EAGAIN;
-
                        vm_flags |= VM_SHARED | VM_MAYSHARE;
                        if (!(file->f_mode & FMODE_WRITE))
                                vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
@@ -1611,7 +1599,7 @@ unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
                        goto out_fput;
                }
        } else if (flags & MAP_HUGETLB) {
-               struct user_struct *user = NULL;
+               struct ucounts *ucounts = NULL;
                struct hstate *hs;
 
                hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
@@ -1627,14 +1615,12 @@ unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
                 */
                file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
                                VM_NORESERVE,
-                               &user, HUGETLB_ANONHUGE_INODE,
+                               &ucounts, HUGETLB_ANONHUGE_INODE,
                                (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
                if (IS_ERR(file))
                        return PTR_ERR(file);
        }
 
-       flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-
        retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
 out_fput:
        if (file)
@@ -1791,22 +1777,12 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
        vma->vm_pgoff = pgoff;
 
        if (file) {
-               if (vm_flags & VM_DENYWRITE) {
-                       error = deny_write_access(file);
-                       if (error)
-                               goto free_vma;
-               }
                if (vm_flags & VM_SHARED) {
                        error = mapping_map_writable(file->f_mapping);
                        if (error)
-                               goto allow_write_and_free_vma;
+                               goto free_vma;
                }
 
-               /* ->mmap() can change vma->vm_file, but must guarantee that
-                * vma_link() below can deny write-access if VM_DENYWRITE is set
-                * and map writably if VM_SHARED is set. This usually means the
-                * new file must not have been exposed to user-space, yet.
-                */
                vma->vm_file = get_file(file);
                error = call_mmap(file, vma);
                if (error)
@@ -1863,13 +1839,9 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 
        vma_link(mm, vma, prev, rb_link, rb_parent);
        /* Once vma denies write, undo our temporary denial count */
-       if (file) {
 unmap_writable:
-               if (vm_flags & VM_SHARED)
-                       mapping_unmap_writable(file->f_mapping);
-               if (vm_flags & VM_DENYWRITE)
-                       allow_write_access(file);
-       }
+       if (file && vm_flags & VM_SHARED)
+               mapping_unmap_writable(file->f_mapping);
        file = vma->vm_file;
 out:
        perf_event_mmap(vma);
@@ -1909,9 +1881,6 @@ unmap_and_free_vma:
        charged = 0;
        if (vm_flags & VM_SHARED)
                mapping_unmap_writable(file->f_mapping);
-allow_write_and_free_vma:
-       if (vm_flags & VM_DENYWRITE)
-               allow_write_access(file);
 free_vma:
        vm_area_free(vma);
 unacct_error:
@@ -2306,6 +2275,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
        struct rb_node *rb_node;
        struct vm_area_struct *vma;
 
+       mmap_assert_locked(mm);
        /* Check the cache first. */
        vma = vmacache_find(mm, addr);
        if (likely(vma))
@@ -2802,6 +2772,22 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
        return __split_vma(mm, vma, addr, new_below);
 }
 
+static inline void
+unlock_range(struct vm_area_struct *start, unsigned long limit)
+{
+       struct mm_struct *mm = start->vm_mm;
+       struct vm_area_struct *tmp = start;
+
+       while (tmp && tmp->vm_start < limit) {
+               if (tmp->vm_flags & VM_LOCKED) {
+                       mm->locked_vm -= vma_pages(tmp);
+                       munlock_vma_pages_all(tmp);
+               }
+
+               tmp = tmp->vm_next;
+       }
+}
+
 /* Munmap is split into 2 main parts -- this part which finds
  * what needs doing, and the areas themselves, which do the
  * work.  This now handles partial unmappings.
@@ -2828,16 +2814,11 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
         */
        arch_unmap(mm, start, end);
 
-       /* Find the first overlapping VMA */
-       vma = find_vma(mm, start);
+       /* Find the first overlapping VMA where start < vma->vm_end */
+       vma = find_vma_intersection(mm, start, end);
        if (!vma)
                return 0;
        prev = vma->vm_prev;
-       /* we have  start < vma->vm_end  */
-
-       /* if it doesn't overlap, we have nothing.. */
-       if (vma->vm_start >= end)
-               return 0;
 
        /*
         * If we need to split any vma, do it now to save pain later.
@@ -2890,17 +2871,8 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
        /*
         * unlock any mlock()ed ranges before detaching vmas
         */
-       if (mm->locked_vm) {
-               struct vm_area_struct *tmp = vma;
-               while (tmp && tmp->vm_start < end) {
-                       if (tmp->vm_flags & VM_LOCKED) {
-                               mm->locked_vm -= vma_pages(tmp);
-                               munlock_vma_pages_all(tmp);
-                       }
-
-                       tmp = tmp->vm_next;
-               }
-       }
+       if (mm->locked_vm)
+               unlock_range(vma, end);
 
        /* Detach vmas from rbtree */
        if (!detach_vmas_to_be_unmapped(mm, vma, prev, end))
@@ -2993,14 +2965,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
        if (mmap_write_lock_killable(mm))
                return -EINTR;
 
-       vma = find_vma(mm, start);
+       vma = vma_lookup(mm, start);
 
        if (!vma || !(vma->vm_flags & VM_SHARED))
                goto out;
 
-       if (start < vma->vm_start)
-               goto out;
-
        if (start + size > vma->vm_end) {
                struct vm_area_struct *next;
 
@@ -3185,14 +3154,8 @@ void exit_mmap(struct mm_struct *mm)
                mmap_write_unlock(mm);
        }
 
-       if (mm->locked_vm) {
-               vma = mm->mmap;
-               while (vma) {
-                       if (vma->vm_flags & VM_LOCKED)
-                               munlock_vma_pages_all(vma);
-                       vma = vma->vm_next;
-               }
-       }
+       if (mm->locked_vm)
+               unlock_range(mm->mmap, ULONG_MAX);
 
        arch_exit_mmap(mm);