vm_is_stack: use for_each_thread() rather then buggy while_each_thread()
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / mmap.c
index 834b2d7..20ff0c3 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -86,6 +86,7 @@ EXPORT_SYMBOL(vm_get_page_prot);
 
 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;  /* heuristic overcommit */
 int sysctl_overcommit_ratio __read_mostly = 50;        /* default is 50% */
+unsigned long sysctl_overcommit_kbytes __read_mostly;
 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
@@ -893,7 +894,15 @@ again:                     remove_next = 1 + (end > next->vm_end);
 static inline int is_mergeable_vma(struct vm_area_struct *vma,
                        struct file *file, unsigned long vm_flags)
 {
-       if (vma->vm_flags ^ vm_flags)
+       /*
+        * VM_SOFTDIRTY should not prevent from VMA merging, if we
+        * match the flags but dirty bit -- the caller should mark
+        * merged VMA as dirty. If dirty bit won't be excluded from
+        * comparison, we increase pressue on the memory system forcing
+        * the kernel to generate new VMAs when old one could be
+        * extended instead.
+        */
+       if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
                return 0;
        if (vma->vm_file != file)
                return 0;
@@ -1082,7 +1091,7 @@ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *
        return a->vm_end == b->vm_start &&
                mpol_equal(vma_policy(a), vma_policy(b)) &&
                a->vm_file == b->vm_file &&
-               !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC)) &&
+               !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC|VM_SOFTDIRTY)) &&
                b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
 }
 
@@ -1190,6 +1199,24 @@ static inline unsigned long round_hint_to_min(unsigned long hint)
        return hint;
 }
 
+static inline int mlock_future_check(struct mm_struct *mm,
+                                    unsigned long flags,
+                                    unsigned long len)
+{
+       unsigned long locked, lock_limit;
+
+       /*  mlock MCL_FUTURE? */
+       if (flags & VM_LOCKED) {
+               locked = len >> PAGE_SHIFT;
+               locked += mm->locked_vm;
+               lock_limit = rlimit(RLIMIT_MEMLOCK);
+               lock_limit >>= PAGE_SHIFT;
+               if (locked > lock_limit && !capable(CAP_IPC_LOCK))
+                       return -EAGAIN;
+       }
+       return 0;
+}
+
 /*
  * The caller must hold down_write(&current->mm->mmap_sem).
  */
@@ -1251,16 +1278,8 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
                if (!can_do_mlock())
                        return -EPERM;
 
-       /* mlock MCL_FUTURE? */
-       if (vm_flags & VM_LOCKED) {
-               unsigned long locked, lock_limit;
-               locked = len >> PAGE_SHIFT;
-               locked += mm->locked_vm;
-               lock_limit = rlimit(RLIMIT_MEMLOCK);
-               lock_limit >>= PAGE_SHIFT;
-               if (locked > lock_limit && !capable(CAP_IPC_LOCK))
-                       return -EAGAIN;
-       }
+       if (mlock_future_check(mm, vm_flags, len))
+               return -EAGAIN;
 
        if (file) {
                struct inode *inode = file_inode(file);
@@ -2591,18 +2610,9 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
        if (error & ~PAGE_MASK)
                return error;
 
-       /*
-        * mlock MCL_FUTURE?
-        */
-       if (mm->def_flags & VM_LOCKED) {
-               unsigned long locked, lock_limit;
-               locked = len >> PAGE_SHIFT;
-               locked += mm->locked_vm;
-               lock_limit = rlimit(RLIMIT_MEMLOCK);
-               lock_limit >>= PAGE_SHIFT;
-               if (locked > lock_limit && !capable(CAP_IPC_LOCK))
-                       return -EAGAIN;
-       }
+       error = mlock_future_check(mm, mm->def_flags, len);
+       if (error)
+               return error;
 
        /*
         * mm->mmap_sem is required to protect against another thread
@@ -3140,7 +3150,7 @@ static int init_user_reserve(void)
        sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
        return 0;
 }
-module_init(init_user_reserve)
+subsys_initcall(init_user_reserve);
 
 /*
  * Initialise sysctl_admin_reserve_kbytes.
@@ -3161,7 +3171,7 @@ static int init_admin_reserve(void)
        sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
        return 0;
 }
-module_init(init_admin_reserve)
+subsys_initcall(init_admin_reserve);
 
 /*
  * Reinititalise user and admin reserves if memory is added or removed.
@@ -3231,4 +3241,4 @@ static int __meminit init_reserve_notifier(void)
 
        return 0;
 }
-module_init(init_reserve_notifier)
+subsys_initcall(init_reserve_notifier);