btrfs: fix race between quota disable and quota assign ioctls
[platform/kernel/linux-rpi.git] / mm / mmap.c
index 88dcc5c..a0a4ead 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1684,8 +1684,12 @@ int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
            pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags)))
                return 0;
 
-       /* Do we need to track softdirty? */
-       if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY))
+       /*
+        * Do we need to track softdirty? hugetlb does not support softdirty
+        * tracking yet.
+        */
+       if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY) &&
+           !is_vm_hugetlb_page(vma))
                return 1;
 
        /* Specialty mapping? */
@@ -1832,7 +1836,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
        if (!arch_validate_flags(vma->vm_flags)) {
                error = -EINVAL;
                if (file)
-                       goto unmap_and_free_vma;
+                       goto close_and_free_vma;
                else
                        goto free_vma;
        }
@@ -1872,13 +1876,15 @@ out:
 
        return addr;
 
+close_and_free_vma:
+       if (vma->vm_ops && vma->vm_ops->close)
+               vma->vm_ops->close(vma);
 unmap_and_free_vma:
        fput(vma->vm_file);
        vma->vm_file = NULL;
 
        /* Undo any partial mapping done by a device driver. */
        unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
-       charged = 0;
        if (vm_flags & VM_SHARED)
                mapping_unmap_writable(file->f_mapping);
 free_vma:
@@ -2113,14 +2119,6 @@ unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
        return addr;
 }
 
-#ifndef arch_get_mmap_end
-#define arch_get_mmap_end(addr)        (TASK_SIZE)
-#endif
-
-#ifndef arch_get_mmap_base
-#define arch_get_mmap_base(addr, base) (base)
-#endif
-
 /* Get an address range which is currently unmapped.
  * For shmat() with addr=0.
  *
@@ -2551,7 +2549,7 @@ static int __init cmdline_parse_stack_guard_gap(char *p)
        if (!*endptr)
                stack_guard_gap = val << PAGE_SHIFT;
 
-       return 0;
+       return 1;
 }
 __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
 
@@ -2643,11 +2641,28 @@ static void unmap_region(struct mm_struct *mm,
 {
        struct vm_area_struct *next = vma_next(mm, prev);
        struct mmu_gather tlb;
+       struct vm_area_struct *cur_vma;
 
        lru_add_drain();
        tlb_gather_mmu(&tlb, mm);
        update_hiwater_rss(mm);
        unmap_vmas(&tlb, vma, start, end);
+
+       /*
+        * Ensure we have no stale TLB entries by the time this mapping is
+        * removed from the rmap.
+        * Note that we don't have to worry about nested flushes here because
+        * we're holding the mm semaphore for removing the mapping - so any
+        * concurrent flush in this region has to be coming through the rmap,
+        * and we synchronize against that using the rmap lock.
+        */
+       for (cur_vma = vma; cur_vma; cur_vma = cur_vma->vm_next) {
+               if ((cur_vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) != 0) {
+                       tlb_flush_mmu(&tlb);
+                       break;
+               }
+       }
+
        free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
                                 next ? next->vm_start : USER_PGTABLES_CEILING);
        tlb_finish_mmu(&tlb);