tizen: packaging: Add baselibs.conf to provide 64-bit kernel & modules for 32-bit...
[platform/kernel/linux-rpi.git] / mm / mmap.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm/mmap.c
4  *
5  * Written by obz.
6  *
7  * Address space accounting code        <alan@lxorguk.ukuu.org.uk>
8  */
9
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/backing-dev.h>
15 #include <linux/mm.h>
16 #include <linux/mm_inline.h>
17 #include <linux/shm.h>
18 #include <linux/mman.h>
19 #include <linux/pagemap.h>
20 #include <linux/swap.h>
21 #include <linux/syscalls.h>
22 #include <linux/capability.h>
23 #include <linux/init.h>
24 #include <linux/file.h>
25 #include <linux/fs.h>
26 #include <linux/personality.h>
27 #include <linux/security.h>
28 #include <linux/hugetlb.h>
29 #include <linux/shmem_fs.h>
30 #include <linux/profile.h>
31 #include <linux/export.h>
32 #include <linux/mount.h>
33 #include <linux/mempolicy.h>
34 #include <linux/rmap.h>
35 #include <linux/mmu_notifier.h>
36 #include <linux/mmdebug.h>
37 #include <linux/perf_event.h>
38 #include <linux/audit.h>
39 #include <linux/khugepaged.h>
40 #include <linux/uprobes.h>
41 #include <linux/notifier.h>
42 #include <linux/memory.h>
43 #include <linux/printk.h>
44 #include <linux/userfaultfd_k.h>
45 #include <linux/moduleparam.h>
46 #include <linux/pkeys.h>
47 #include <linux/oom.h>
48 #include <linux/sched/mm.h>
49 #include <linux/ksm.h>
50
51 #include <linux/uaccess.h>
52 #include <asm/cacheflush.h>
53 #include <asm/tlb.h>
54 #include <asm/mmu_context.h>
55
56 #define CREATE_TRACE_POINTS
57 #include <trace/events/mmap.h>
58
59 #include "internal.h"
60
61 #ifndef arch_mmap_check
62 #define arch_mmap_check(addr, len, flags)       (0)
63 #endif
64
65 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
66 const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN;
67 const int mmap_rnd_bits_max = CONFIG_ARCH_MMAP_RND_BITS_MAX;
68 int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS;
69 #endif
70 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
71 const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN;
72 const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX;
73 int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
74 #endif
75
76 static bool ignore_rlimit_data;
77 core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
78
79 static void unmap_region(struct mm_struct *mm, struct ma_state *mas,
80                 struct vm_area_struct *vma, struct vm_area_struct *prev,
81                 struct vm_area_struct *next, unsigned long start,
82                 unsigned long end, unsigned long tree_end, bool mm_wr_locked);
83
84 static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
85 {
86         return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
87 }
88
89 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
90 void vma_set_page_prot(struct vm_area_struct *vma)
91 {
92         unsigned long vm_flags = vma->vm_flags;
93         pgprot_t vm_page_prot;
94
95         vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
96         if (vma_wants_writenotify(vma, vm_page_prot)) {
97                 vm_flags &= ~VM_SHARED;
98                 vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags);
99         }
100         /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
101         WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
102 }
103
104 /*
105  * Requires inode->i_mapping->i_mmap_rwsem
106  */
107 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
108                 struct file *file, struct address_space *mapping)
109 {
110         if (vma->vm_flags & VM_SHARED)
111                 mapping_unmap_writable(mapping);
112
113         flush_dcache_mmap_lock(mapping);
114         vma_interval_tree_remove(vma, &mapping->i_mmap);
115         flush_dcache_mmap_unlock(mapping);
116 }
117
118 /*
119  * Unlink a file-based vm structure from its interval tree, to hide
120  * vma from rmap and vmtruncate before freeing its page tables.
121  */
122 void unlink_file_vma(struct vm_area_struct *vma)
123 {
124         struct file *file = vma->vm_file;
125
126         if (file) {
127                 struct address_space *mapping = file->f_mapping;
128                 i_mmap_lock_write(mapping);
129                 __remove_shared_vm_struct(vma, file, mapping);
130                 i_mmap_unlock_write(mapping);
131         }
132 }
133
134 /*
135  * Close a vm structure and free it.
136  */
137 static void remove_vma(struct vm_area_struct *vma, bool unreachable)
138 {
139         might_sleep();
140         if (vma->vm_ops && vma->vm_ops->close)
141                 vma->vm_ops->close(vma);
142         if (vma->vm_file)
143                 fput(vma->vm_file);
144         mpol_put(vma_policy(vma));
145         if (unreachable)
146                 __vm_area_free(vma);
147         else
148                 vm_area_free(vma);
149 }
150
151 static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
152                                                     unsigned long min)
153 {
154         return mas_prev(&vmi->mas, min);
155 }
156
157 /*
158  * check_brk_limits() - Use platform specific check of range & verify mlock
159  * limits.
160  * @addr: The address to check
161  * @len: The size of increase.
162  *
163  * Return: 0 on success.
164  */
165 static int check_brk_limits(unsigned long addr, unsigned long len)
166 {
167         unsigned long mapped_addr;
168
169         mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
170         if (IS_ERR_VALUE(mapped_addr))
171                 return mapped_addr;
172
173         return mlock_future_ok(current->mm, current->mm->def_flags, len)
174                 ? 0 : -EAGAIN;
175 }
176 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma,
177                 unsigned long addr, unsigned long request, unsigned long flags);
178 SYSCALL_DEFINE1(brk, unsigned long, brk)
179 {
180         unsigned long newbrk, oldbrk, origbrk;
181         struct mm_struct *mm = current->mm;
182         struct vm_area_struct *brkvma, *next = NULL;
183         unsigned long min_brk;
184         bool populate = false;
185         LIST_HEAD(uf);
186         struct vma_iterator vmi;
187
188         if (mmap_write_lock_killable(mm))
189                 return -EINTR;
190
191         origbrk = mm->brk;
192
193 #ifdef CONFIG_COMPAT_BRK
194         /*
195          * CONFIG_COMPAT_BRK can still be overridden by setting
196          * randomize_va_space to 2, which will still cause mm->start_brk
197          * to be arbitrarily shifted
198          */
199         if (current->brk_randomized)
200                 min_brk = mm->start_brk;
201         else
202                 min_brk = mm->end_data;
203 #else
204         min_brk = mm->start_brk;
205 #endif
206         if (brk < min_brk)
207                 goto out;
208
209         /*
210          * Check against rlimit here. If this check is done later after the test
211          * of oldbrk with newbrk then it can escape the test and let the data
212          * segment grow beyond its set limit the in case where the limit is
213          * not page aligned -Ram Gupta
214          */
215         if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
216                               mm->end_data, mm->start_data))
217                 goto out;
218
219         newbrk = PAGE_ALIGN(brk);
220         oldbrk = PAGE_ALIGN(mm->brk);
221         if (oldbrk == newbrk) {
222                 mm->brk = brk;
223                 goto success;
224         }
225
226         /* Always allow shrinking brk. */
227         if (brk <= mm->brk) {
228                 /* Search one past newbrk */
229                 vma_iter_init(&vmi, mm, newbrk);
230                 brkvma = vma_find(&vmi, oldbrk);
231                 if (!brkvma || brkvma->vm_start >= oldbrk)
232                         goto out; /* mapping intersects with an existing non-brk vma. */
233                 /*
234                  * mm->brk must be protected by write mmap_lock.
235                  * do_vma_munmap() will drop the lock on success,  so update it
236                  * before calling do_vma_munmap().
237                  */
238                 mm->brk = brk;
239                 if (do_vma_munmap(&vmi, brkvma, newbrk, oldbrk, &uf, true))
240                         goto out;
241
242                 goto success_unlocked;
243         }
244
245         if (check_brk_limits(oldbrk, newbrk - oldbrk))
246                 goto out;
247
248         /*
249          * Only check if the next VMA is within the stack_guard_gap of the
250          * expansion area
251          */
252         vma_iter_init(&vmi, mm, oldbrk);
253         next = vma_find(&vmi, newbrk + PAGE_SIZE + stack_guard_gap);
254         if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
255                 goto out;
256
257         brkvma = vma_prev_limit(&vmi, mm->start_brk);
258         /* Ok, looks good - let it rip. */
259         if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk, 0) < 0)
260                 goto out;
261
262         mm->brk = brk;
263         if (mm->def_flags & VM_LOCKED)
264                 populate = true;
265
266 success:
267         mmap_write_unlock(mm);
268 success_unlocked:
269         userfaultfd_unmap_complete(mm, &uf);
270         if (populate)
271                 mm_populate(oldbrk, newbrk - oldbrk);
272         return brk;
273
274 out:
275         mm->brk = origbrk;
276         mmap_write_unlock(mm);
277         return origbrk;
278 }
279
280 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
281 static void validate_mm(struct mm_struct *mm)
282 {
283         int bug = 0;
284         int i = 0;
285         struct vm_area_struct *vma;
286         VMA_ITERATOR(vmi, mm, 0);
287
288         mt_validate(&mm->mm_mt);
289         for_each_vma(vmi, vma) {
290 #ifdef CONFIG_DEBUG_VM_RB
291                 struct anon_vma *anon_vma = vma->anon_vma;
292                 struct anon_vma_chain *avc;
293 #endif
294                 unsigned long vmi_start, vmi_end;
295                 bool warn = 0;
296
297                 vmi_start = vma_iter_addr(&vmi);
298                 vmi_end = vma_iter_end(&vmi);
299                 if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm))
300                         warn = 1;
301
302                 if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm))
303                         warn = 1;
304
305                 if (warn) {
306                         pr_emerg("issue in %s\n", current->comm);
307                         dump_stack();
308                         dump_vma(vma);
309                         pr_emerg("tree range: %px start %lx end %lx\n", vma,
310                                  vmi_start, vmi_end - 1);
311                         vma_iter_dump_tree(&vmi);
312                 }
313
314 #ifdef CONFIG_DEBUG_VM_RB
315                 if (anon_vma) {
316                         anon_vma_lock_read(anon_vma);
317                         list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
318                                 anon_vma_interval_tree_verify(avc);
319                         anon_vma_unlock_read(anon_vma);
320                 }
321 #endif
322                 i++;
323         }
324         if (i != mm->map_count) {
325                 pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
326                 bug = 1;
327         }
328         VM_BUG_ON_MM(bug, mm);
329 }
330
331 #else /* !CONFIG_DEBUG_VM_MAPLE_TREE */
332 #define validate_mm(mm) do { } while (0)
333 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
334
335 /*
336  * vma has some anon_vma assigned, and is already inserted on that
337  * anon_vma's interval trees.
338  *
339  * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
340  * vma must be removed from the anon_vma's interval trees using
341  * anon_vma_interval_tree_pre_update_vma().
342  *
343  * After the update, the vma will be reinserted using
344  * anon_vma_interval_tree_post_update_vma().
345  *
346  * The entire update must be protected by exclusive mmap_lock and by
347  * the root anon_vma's mutex.
348  */
349 static inline void
350 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
351 {
352         struct anon_vma_chain *avc;
353
354         list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
355                 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
356 }
357
358 static inline void
359 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
360 {
361         struct anon_vma_chain *avc;
362
363         list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
364                 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
365 }
366
367 static unsigned long count_vma_pages_range(struct mm_struct *mm,
368                 unsigned long addr, unsigned long end)
369 {
370         VMA_ITERATOR(vmi, mm, addr);
371         struct vm_area_struct *vma;
372         unsigned long nr_pages = 0;
373
374         for_each_vma_range(vmi, vma, end) {
375                 unsigned long vm_start = max(addr, vma->vm_start);
376                 unsigned long vm_end = min(end, vma->vm_end);
377
378                 nr_pages += PHYS_PFN(vm_end - vm_start);
379         }
380
381         return nr_pages;
382 }
383
384 static void __vma_link_file(struct vm_area_struct *vma,
385                             struct address_space *mapping)
386 {
387         if (vma->vm_flags & VM_SHARED)
388                 mapping_allow_writable(mapping);
389
390         flush_dcache_mmap_lock(mapping);
391         vma_interval_tree_insert(vma, &mapping->i_mmap);
392         flush_dcache_mmap_unlock(mapping);
393 }
394
395 static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
396 {
397         VMA_ITERATOR(vmi, mm, 0);
398         struct address_space *mapping = NULL;
399
400         vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
401         if (vma_iter_prealloc(&vmi, vma))
402                 return -ENOMEM;
403
404         vma_start_write(vma);
405
406         vma_iter_store(&vmi, vma);
407
408         if (vma->vm_file) {
409                 mapping = vma->vm_file->f_mapping;
410                 i_mmap_lock_write(mapping);
411                 __vma_link_file(vma, mapping);
412                 i_mmap_unlock_write(mapping);
413         }
414
415         mm->map_count++;
416         validate_mm(mm);
417         return 0;
418 }
419
420 /*
421  * init_multi_vma_prep() - Initializer for struct vma_prepare
422  * @vp: The vma_prepare struct
423  * @vma: The vma that will be altered once locked
424  * @next: The next vma if it is to be adjusted
425  * @remove: The first vma to be removed
426  * @remove2: The second vma to be removed
427  */
428 static inline void init_multi_vma_prep(struct vma_prepare *vp,
429                 struct vm_area_struct *vma, struct vm_area_struct *next,
430                 struct vm_area_struct *remove, struct vm_area_struct *remove2)
431 {
432         memset(vp, 0, sizeof(struct vma_prepare));
433         vp->vma = vma;
434         vp->anon_vma = vma->anon_vma;
435         vp->remove = remove;
436         vp->remove2 = remove2;
437         vp->adj_next = next;
438         if (!vp->anon_vma && next)
439                 vp->anon_vma = next->anon_vma;
440
441         vp->file = vma->vm_file;
442         if (vp->file)
443                 vp->mapping = vma->vm_file->f_mapping;
444
445 }
446
447 /*
448  * init_vma_prep() - Initializer wrapper for vma_prepare struct
449  * @vp: The vma_prepare struct
450  * @vma: The vma that will be altered once locked
451  */
452 static inline void init_vma_prep(struct vma_prepare *vp,
453                                  struct vm_area_struct *vma)
454 {
455         init_multi_vma_prep(vp, vma, NULL, NULL, NULL);
456 }
457
458
459 /*
460  * vma_prepare() - Helper function for handling locking VMAs prior to altering
461  * @vp: The initialized vma_prepare struct
462  */
463 static inline void vma_prepare(struct vma_prepare *vp)
464 {
465         if (vp->file) {
466                 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end);
467
468                 if (vp->adj_next)
469                         uprobe_munmap(vp->adj_next, vp->adj_next->vm_start,
470                                       vp->adj_next->vm_end);
471
472                 i_mmap_lock_write(vp->mapping);
473                 if (vp->insert && vp->insert->vm_file) {
474                         /*
475                          * Put into interval tree now, so instantiated pages
476                          * are visible to arm/parisc __flush_dcache_page
477                          * throughout; but we cannot insert into address
478                          * space until vma start or end is updated.
479                          */
480                         __vma_link_file(vp->insert,
481                                         vp->insert->vm_file->f_mapping);
482                 }
483         }
484
485         if (vp->anon_vma) {
486                 anon_vma_lock_write(vp->anon_vma);
487                 anon_vma_interval_tree_pre_update_vma(vp->vma);
488                 if (vp->adj_next)
489                         anon_vma_interval_tree_pre_update_vma(vp->adj_next);
490         }
491
492         if (vp->file) {
493                 flush_dcache_mmap_lock(vp->mapping);
494                 vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap);
495                 if (vp->adj_next)
496                         vma_interval_tree_remove(vp->adj_next,
497                                                  &vp->mapping->i_mmap);
498         }
499
500 }
501
502 /*
503  * vma_complete- Helper function for handling the unlocking after altering VMAs,
504  * or for inserting a VMA.
505  *
506  * @vp: The vma_prepare struct
507  * @vmi: The vma iterator
508  * @mm: The mm_struct
509  */
510 static inline void vma_complete(struct vma_prepare *vp,
511                                 struct vma_iterator *vmi, struct mm_struct *mm)
512 {
513         if (vp->file) {
514                 if (vp->adj_next)
515                         vma_interval_tree_insert(vp->adj_next,
516                                                  &vp->mapping->i_mmap);
517                 vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap);
518                 flush_dcache_mmap_unlock(vp->mapping);
519         }
520
521         if (vp->remove && vp->file) {
522                 __remove_shared_vm_struct(vp->remove, vp->file, vp->mapping);
523                 if (vp->remove2)
524                         __remove_shared_vm_struct(vp->remove2, vp->file,
525                                                   vp->mapping);
526         } else if (vp->insert) {
527                 /*
528                  * split_vma has split insert from vma, and needs
529                  * us to insert it before dropping the locks
530                  * (it may either follow vma or precede it).
531                  */
532                 vma_iter_store(vmi, vp->insert);
533                 mm->map_count++;
534         }
535
536         if (vp->anon_vma) {
537                 anon_vma_interval_tree_post_update_vma(vp->vma);
538                 if (vp->adj_next)
539                         anon_vma_interval_tree_post_update_vma(vp->adj_next);
540                 anon_vma_unlock_write(vp->anon_vma);
541         }
542
543         if (vp->file) {
544                 i_mmap_unlock_write(vp->mapping);
545                 uprobe_mmap(vp->vma);
546
547                 if (vp->adj_next)
548                         uprobe_mmap(vp->adj_next);
549         }
550
551         if (vp->remove) {
552 again:
553                 vma_mark_detached(vp->remove, true);
554                 if (vp->file) {
555                         uprobe_munmap(vp->remove, vp->remove->vm_start,
556                                       vp->remove->vm_end);
557                         fput(vp->file);
558                 }
559                 if (vp->remove->anon_vma)
560                         anon_vma_merge(vp->vma, vp->remove);
561                 mm->map_count--;
562                 mpol_put(vma_policy(vp->remove));
563                 if (!vp->remove2)
564                         WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end);
565                 vm_area_free(vp->remove);
566
567                 /*
568                  * In mprotect's case 6 (see comments on vma_merge),
569                  * we are removing both mid and next vmas
570                  */
571                 if (vp->remove2) {
572                         vp->remove = vp->remove2;
573                         vp->remove2 = NULL;
574                         goto again;
575                 }
576         }
577         if (vp->insert && vp->file)
578                 uprobe_mmap(vp->insert);
579         validate_mm(mm);
580 }
581
582 /*
583  * dup_anon_vma() - Helper function to duplicate anon_vma
584  * @dst: The destination VMA
585  * @src: The source VMA
586  * @dup: Pointer to the destination VMA when successful.
587  *
588  * Returns: 0 on success.
589  */
590 static inline int dup_anon_vma(struct vm_area_struct *dst,
591                 struct vm_area_struct *src, struct vm_area_struct **dup)
592 {
593         /*
594          * Easily overlooked: when mprotect shifts the boundary, make sure the
595          * expanding vma has anon_vma set if the shrinking vma had, to cover any
596          * anon pages imported.
597          */
598         if (src->anon_vma && !dst->anon_vma) {
599                 int ret;
600
601                 vma_assert_write_locked(dst);
602                 dst->anon_vma = src->anon_vma;
603                 ret = anon_vma_clone(dst, src);
604                 if (ret)
605                         return ret;
606
607                 *dup = dst;
608         }
609
610         return 0;
611 }
612
613 /*
614  * vma_expand - Expand an existing VMA
615  *
616  * @vmi: The vma iterator
617  * @vma: The vma to expand
618  * @start: The start of the vma
619  * @end: The exclusive end of the vma
620  * @pgoff: The page offset of vma
621  * @next: The current of next vma.
622  *
623  * Expand @vma to @start and @end.  Can expand off the start and end.  Will
624  * expand over @next if it's different from @vma and @end == @next->vm_end.
625  * Checking if the @vma can expand and merge with @next needs to be handled by
626  * the caller.
627  *
628  * Returns: 0 on success
629  */
630 int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
631                unsigned long start, unsigned long end, pgoff_t pgoff,
632                struct vm_area_struct *next)
633 {
634         struct vm_area_struct *anon_dup = NULL;
635         bool remove_next = false;
636         struct vma_prepare vp;
637
638         vma_start_write(vma);
639         if (next && (vma != next) && (end == next->vm_end)) {
640                 int ret;
641
642                 remove_next = true;
643                 vma_start_write(next);
644                 ret = dup_anon_vma(vma, next, &anon_dup);
645                 if (ret)
646                         return ret;
647         }
648
649         init_multi_vma_prep(&vp, vma, NULL, remove_next ? next : NULL, NULL);
650         /* Not merging but overwriting any part of next is not handled. */
651         VM_WARN_ON(next && !vp.remove &&
652                   next != vma && end > next->vm_start);
653         /* Only handles expanding */
654         VM_WARN_ON(vma->vm_start < start || vma->vm_end > end);
655
656         /* Note: vma iterator must be pointing to 'start' */
657         vma_iter_config(vmi, start, end);
658         if (vma_iter_prealloc(vmi, vma))
659                 goto nomem;
660
661         vma_prepare(&vp);
662         vma_adjust_trans_huge(vma, start, end, 0);
663         vma->vm_start = start;
664         vma->vm_end = end;
665         vma->vm_pgoff = pgoff;
666         vma_iter_store(vmi, vma);
667
668         vma_complete(&vp, vmi, vma->vm_mm);
669         return 0;
670
671 nomem:
672         if (anon_dup)
673                 unlink_anon_vmas(anon_dup);
674         return -ENOMEM;
675 }
676
677 /*
678  * vma_shrink() - Reduce an existing VMAs memory area
679  * @vmi: The vma iterator
680  * @vma: The VMA to modify
681  * @start: The new start
682  * @end: The new end
683  *
684  * Returns: 0 on success, -ENOMEM otherwise
685  */
686 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
687                unsigned long start, unsigned long end, pgoff_t pgoff)
688 {
689         struct vma_prepare vp;
690
691         WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
692
693         if (vma->vm_start < start)
694                 vma_iter_config(vmi, vma->vm_start, start);
695         else
696                 vma_iter_config(vmi, end, vma->vm_end);
697
698         if (vma_iter_prealloc(vmi, NULL))
699                 return -ENOMEM;
700
701         vma_start_write(vma);
702
703         init_vma_prep(&vp, vma);
704         vma_prepare(&vp);
705         vma_adjust_trans_huge(vma, start, end, 0);
706
707         vma_iter_clear(vmi);
708         vma->vm_start = start;
709         vma->vm_end = end;
710         vma->vm_pgoff = pgoff;
711         vma_complete(&vp, vmi, vma->vm_mm);
712         return 0;
713 }
714
715 /*
716  * If the vma has a ->close operation then the driver probably needs to release
717  * per-vma resources, so we don't attempt to merge those if the caller indicates
718  * the current vma may be removed as part of the merge.
719  */
720 static inline bool is_mergeable_vma(struct vm_area_struct *vma,
721                 struct file *file, unsigned long vm_flags,
722                 struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
723                 struct anon_vma_name *anon_name, bool may_remove_vma)
724 {
725         /*
726          * VM_SOFTDIRTY should not prevent from VMA merging, if we
727          * match the flags but dirty bit -- the caller should mark
728          * merged VMA as dirty. If dirty bit won't be excluded from
729          * comparison, we increase pressure on the memory system forcing
730          * the kernel to generate new VMAs when old one could be
731          * extended instead.
732          */
733         if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
734                 return false;
735         if (vma->vm_file != file)
736                 return false;
737         if (may_remove_vma && vma->vm_ops && vma->vm_ops->close)
738                 return false;
739         if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
740                 return false;
741         if (!anon_vma_name_eq(anon_vma_name(vma), anon_name))
742                 return false;
743         return true;
744 }
745
746 static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1,
747                  struct anon_vma *anon_vma2, struct vm_area_struct *vma)
748 {
749         /*
750          * The list_is_singular() test is to avoid merging VMA cloned from
751          * parents. This can improve scalability caused by anon_vma lock.
752          */
753         if ((!anon_vma1 || !anon_vma2) && (!vma ||
754                 list_is_singular(&vma->anon_vma_chain)))
755                 return true;
756         return anon_vma1 == anon_vma2;
757 }
758
759 /*
760  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
761  * in front of (at a lower virtual address and file offset than) the vma.
762  *
763  * We cannot merge two vmas if they have differently assigned (non-NULL)
764  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
765  *
766  * We don't check here for the merged mmap wrapping around the end of pagecache
767  * indices (16TB on ia32) because do_mmap() does not permit mmap's which
768  * wrap, nor mmaps which cover the final page at index -1UL.
769  *
770  * We assume the vma may be removed as part of the merge.
771  */
772 static bool
773 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
774                 struct anon_vma *anon_vma, struct file *file,
775                 pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
776                 struct anon_vma_name *anon_name)
777 {
778         if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, true) &&
779             is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
780                 if (vma->vm_pgoff == vm_pgoff)
781                         return true;
782         }
783         return false;
784 }
785
786 /*
787  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
788  * beyond (at a higher virtual address and file offset than) the vma.
789  *
790  * We cannot merge two vmas if they have differently assigned (non-NULL)
791  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
792  *
793  * We assume that vma is not removed as part of the merge.
794  */
795 static bool
796 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
797                 struct anon_vma *anon_vma, struct file *file,
798                 pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
799                 struct anon_vma_name *anon_name)
800 {
801         if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, false) &&
802             is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
803                 pgoff_t vm_pglen;
804                 vm_pglen = vma_pages(vma);
805                 if (vma->vm_pgoff + vm_pglen == vm_pgoff)
806                         return true;
807         }
808         return false;
809 }
810
811 /*
812  * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
813  * figure out whether that can be merged with its predecessor or its
814  * successor.  Or both (it neatly fills a hole).
815  *
816  * In most cases - when called for mmap, brk or mremap - [addr,end) is
817  * certain not to be mapped by the time vma_merge is called; but when
818  * called for mprotect, it is certain to be already mapped (either at
819  * an offset within prev, or at the start of next), and the flags of
820  * this area are about to be changed to vm_flags - and the no-change
821  * case has already been eliminated.
822  *
823  * The following mprotect cases have to be considered, where **** is
824  * the area passed down from mprotect_fixup, never extending beyond one
825  * vma, PPPP is the previous vma, CCCC is a concurrent vma that starts
826  * at the same address as **** and is of the same or larger span, and
827  * NNNN the next vma after ****:
828  *
829  *     ****             ****                   ****
830  *    PPPPPPNNNNNN    PPPPPPNNNNNN       PPPPPPCCCCCC
831  *    cannot merge    might become       might become
832  *                    PPNNNNNNNNNN       PPPPPPPPPPCC
833  *    mmap, brk or    case 4 below       case 5 below
834  *    mremap move:
835  *                        ****               ****
836  *                    PPPP    NNNN       PPPPCCCCNNNN
837  *                    might become       might become
838  *                    PPPPPPPPPPPP 1 or  PPPPPPPPPPPP 6 or
839  *                    PPPPPPPPNNNN 2 or  PPPPPPPPNNNN 7 or
840  *                    PPPPNNNNNNNN 3     PPPPNNNNNNNN 8
841  *
842  * It is important for case 8 that the vma CCCC overlapping the
843  * region **** is never going to extended over NNNN. Instead NNNN must
844  * be extended in region **** and CCCC must be removed. This way in
845  * all cases where vma_merge succeeds, the moment vma_merge drops the
846  * rmap_locks, the properties of the merged vma will be already
847  * correct for the whole merged range. Some of those properties like
848  * vm_page_prot/vm_flags may be accessed by rmap_walks and they must
849  * be correct for the whole merged range immediately after the
850  * rmap_locks are released. Otherwise if NNNN would be removed and
851  * CCCC would be extended over the NNNN range, remove_migration_ptes
852  * or other rmap walkers (if working on addresses beyond the "end"
853  * parameter) may establish ptes with the wrong permissions of CCCC
854  * instead of the right permissions of NNNN.
855  *
856  * In the code below:
857  * PPPP is represented by *prev
858  * CCCC is represented by *curr or not represented at all (NULL)
859  * NNNN is represented by *next or not represented at all (NULL)
860  * **** is not represented - it will be merged and the vma containing the
861  *      area is returned, or the function will return NULL
862  */
863 struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
864                         struct vm_area_struct *prev, unsigned long addr,
865                         unsigned long end, unsigned long vm_flags,
866                         struct anon_vma *anon_vma, struct file *file,
867                         pgoff_t pgoff, struct mempolicy *policy,
868                         struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
869                         struct anon_vma_name *anon_name)
870 {
871         struct vm_area_struct *curr, *next, *res;
872         struct vm_area_struct *vma, *adjust, *remove, *remove2;
873         struct vm_area_struct *anon_dup = NULL;
874         struct vma_prepare vp;
875         pgoff_t vma_pgoff;
876         int err = 0;
877         bool merge_prev = false;
878         bool merge_next = false;
879         bool vma_expanded = false;
880         unsigned long vma_start = addr;
881         unsigned long vma_end = end;
882         pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
883         long adj_start = 0;
884
885         /*
886          * We later require that vma->vm_flags == vm_flags,
887          * so this tests vma->vm_flags & VM_SPECIAL, too.
888          */
889         if (vm_flags & VM_SPECIAL)
890                 return NULL;
891
892         /* Does the input range span an existing VMA? (cases 5 - 8) */
893         curr = find_vma_intersection(mm, prev ? prev->vm_end : 0, end);
894
895         if (!curr ||                    /* cases 1 - 4 */
896             end == curr->vm_end)        /* cases 6 - 8, adjacent VMA */
897                 next = vma_lookup(mm, end);
898         else
899                 next = NULL;            /* case 5 */
900
901         if (prev) {
902                 vma_start = prev->vm_start;
903                 vma_pgoff = prev->vm_pgoff;
904
905                 /* Can we merge the predecessor? */
906                 if (addr == prev->vm_end && mpol_equal(vma_policy(prev), policy)
907                     && can_vma_merge_after(prev, vm_flags, anon_vma, file,
908                                            pgoff, vm_userfaultfd_ctx, anon_name)) {
909                         merge_prev = true;
910                         vma_prev(vmi);
911                 }
912         }
913
914         /* Can we merge the successor? */
915         if (next && mpol_equal(policy, vma_policy(next)) &&
916             can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen,
917                                  vm_userfaultfd_ctx, anon_name)) {
918                 merge_next = true;
919         }
920
921         /* Verify some invariant that must be enforced by the caller. */
922         VM_WARN_ON(prev && addr <= prev->vm_start);
923         VM_WARN_ON(curr && (addr != curr->vm_start || end > curr->vm_end));
924         VM_WARN_ON(addr >= end);
925
926         if (!merge_prev && !merge_next)
927                 return NULL; /* Not mergeable. */
928
929         if (merge_prev)
930                 vma_start_write(prev);
931
932         res = vma = prev;
933         remove = remove2 = adjust = NULL;
934
935         /* Can we merge both the predecessor and the successor? */
936         if (merge_prev && merge_next &&
937             is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) {
938                 vma_start_write(next);
939                 remove = next;                          /* case 1 */
940                 vma_end = next->vm_end;
941                 err = dup_anon_vma(prev, next, &anon_dup);
942                 if (curr) {                             /* case 6 */
943                         vma_start_write(curr);
944                         remove = curr;
945                         remove2 = next;
946                         if (!next->anon_vma)
947                                 err = dup_anon_vma(prev, curr, &anon_dup);
948                 }
949         } else if (merge_prev) {                        /* case 2 */
950                 if (curr) {
951                         vma_start_write(curr);
952                         err = dup_anon_vma(prev, curr, &anon_dup);
953                         if (end == curr->vm_end) {      /* case 7 */
954                                 remove = curr;
955                         } else {                        /* case 5 */
956                                 adjust = curr;
957                                 adj_start = (end - curr->vm_start);
958                         }
959                 }
960         } else { /* merge_next */
961                 vma_start_write(next);
962                 res = next;
963                 if (prev && addr < prev->vm_end) {      /* case 4 */
964                         vma_start_write(prev);
965                         vma_end = addr;
966                         adjust = next;
967                         adj_start = -(prev->vm_end - addr);
968                         err = dup_anon_vma(next, prev, &anon_dup);
969                 } else {
970                         /*
971                          * Note that cases 3 and 8 are the ONLY ones where prev
972                          * is permitted to be (but is not necessarily) NULL.
973                          */
974                         vma = next;                     /* case 3 */
975                         vma_start = addr;
976                         vma_end = next->vm_end;
977                         vma_pgoff = next->vm_pgoff - pglen;
978                         if (curr) {                     /* case 8 */
979                                 vma_pgoff = curr->vm_pgoff;
980                                 vma_start_write(curr);
981                                 remove = curr;
982                                 err = dup_anon_vma(next, curr, &anon_dup);
983                         }
984                 }
985         }
986
987         /* Error in anon_vma clone. */
988         if (err)
989                 goto anon_vma_fail;
990
991         if (vma_start < vma->vm_start || vma_end > vma->vm_end)
992                 vma_expanded = true;
993
994         if (vma_expanded) {
995                 vma_iter_config(vmi, vma_start, vma_end);
996         } else {
997                 vma_iter_config(vmi, adjust->vm_start + adj_start,
998                                 adjust->vm_end);
999         }
1000
1001         if (vma_iter_prealloc(vmi, vma))
1002                 goto prealloc_fail;
1003
1004         init_multi_vma_prep(&vp, vma, adjust, remove, remove2);
1005         VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
1006                    vp.anon_vma != adjust->anon_vma);
1007
1008         vma_prepare(&vp);
1009         vma_adjust_trans_huge(vma, vma_start, vma_end, adj_start);
1010
1011         vma->vm_start = vma_start;
1012         vma->vm_end = vma_end;
1013         vma->vm_pgoff = vma_pgoff;
1014
1015         if (vma_expanded)
1016                 vma_iter_store(vmi, vma);
1017
1018         if (adj_start) {
1019                 adjust->vm_start += adj_start;
1020                 adjust->vm_pgoff += adj_start >> PAGE_SHIFT;
1021                 if (adj_start < 0) {
1022                         WARN_ON(vma_expanded);
1023                         vma_iter_store(vmi, next);
1024                 }
1025         }
1026
1027         vma_complete(&vp, vmi, mm);
1028         khugepaged_enter_vma(res, vm_flags);
1029         return res;
1030
1031 prealloc_fail:
1032         if (anon_dup)
1033                 unlink_anon_vmas(anon_dup);
1034
1035 anon_vma_fail:
1036         vma_iter_set(vmi, addr);
1037         vma_iter_load(vmi);
1038         return NULL;
1039 }
1040
1041 /*
1042  * Rough compatibility check to quickly see if it's even worth looking
1043  * at sharing an anon_vma.
1044  *
1045  * They need to have the same vm_file, and the flags can only differ
1046  * in things that mprotect may change.
1047  *
1048  * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1049  * we can merge the two vma's. For example, we refuse to merge a vma if
1050  * there is a vm_ops->close() function, because that indicates that the
1051  * driver is doing some kind of reference counting. But that doesn't
1052  * really matter for the anon_vma sharing case.
1053  */
1054 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1055 {
1056         return a->vm_end == b->vm_start &&
1057                 mpol_equal(vma_policy(a), vma_policy(b)) &&
1058                 a->vm_file == b->vm_file &&
1059                 !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) &&
1060                 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1061 }
1062
1063 /*
1064  * Do some basic sanity checking to see if we can re-use the anon_vma
1065  * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1066  * the same as 'old', the other will be the new one that is trying
1067  * to share the anon_vma.
1068  *
1069  * NOTE! This runs with mmap_lock held for reading, so it is possible that
1070  * the anon_vma of 'old' is concurrently in the process of being set up
1071  * by another page fault trying to merge _that_. But that's ok: if it
1072  * is being set up, that automatically means that it will be a singleton
1073  * acceptable for merging, so we can do all of this optimistically. But
1074  * we do that READ_ONCE() to make sure that we never re-load the pointer.
1075  *
1076  * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1077  * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1078  * is to return an anon_vma that is "complex" due to having gone through
1079  * a fork).
1080  *
1081  * We also make sure that the two vma's are compatible (adjacent,
1082  * and with the same memory policies). That's all stable, even with just
1083  * a read lock on the mmap_lock.
1084  */
1085 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
1086 {
1087         if (anon_vma_compatible(a, b)) {
1088                 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
1089
1090                 if (anon_vma && list_is_singular(&old->anon_vma_chain))
1091                         return anon_vma;
1092         }
1093         return NULL;
1094 }
1095
1096 /*
1097  * find_mergeable_anon_vma is used by anon_vma_prepare, to check
1098  * neighbouring vmas for a suitable anon_vma, before it goes off
1099  * to allocate a new anon_vma.  It checks because a repetitive
1100  * sequence of mprotects and faults may otherwise lead to distinct
1101  * anon_vmas being allocated, preventing vma merge in subsequent
1102  * mprotect.
1103  */
1104 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1105 {
1106         MA_STATE(mas, &vma->vm_mm->mm_mt, vma->vm_end, vma->vm_end);
1107         struct anon_vma *anon_vma = NULL;
1108         struct vm_area_struct *prev, *next;
1109
1110         /* Try next first. */
1111         next = mas_walk(&mas);
1112         if (next) {
1113                 anon_vma = reusable_anon_vma(next, vma, next);
1114                 if (anon_vma)
1115                         return anon_vma;
1116         }
1117
1118         prev = mas_prev(&mas, 0);
1119         VM_BUG_ON_VMA(prev != vma, vma);
1120         prev = mas_prev(&mas, 0);
1121         /* Try prev next. */
1122         if (prev)
1123                 anon_vma = reusable_anon_vma(prev, prev, vma);
1124
1125         /*
1126          * We might reach here with anon_vma == NULL if we can't find
1127          * any reusable anon_vma.
1128          * There's no absolute need to look only at touching neighbours:
1129          * we could search further afield for "compatible" anon_vmas.
1130          * But it would probably just be a waste of time searching,
1131          * or lead to too many vmas hanging off the same anon_vma.
1132          * We're trying to allow mprotect remerging later on,
1133          * not trying to minimize memory used for anon_vmas.
1134          */
1135         return anon_vma;
1136 }
1137
1138 /*
1139  * If a hint addr is less than mmap_min_addr change hint to be as
1140  * low as possible but still greater than mmap_min_addr
1141  */
1142 static inline unsigned long round_hint_to_min(unsigned long hint)
1143 {
1144         hint &= PAGE_MASK;
1145         if (((void *)hint != NULL) &&
1146             (hint < mmap_min_addr))
1147                 return PAGE_ALIGN(mmap_min_addr);
1148         return hint;
1149 }
1150
1151 bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
1152                         unsigned long bytes)
1153 {
1154         unsigned long locked_pages, limit_pages;
1155
1156         if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK))
1157                 return true;
1158
1159         locked_pages = bytes >> PAGE_SHIFT;
1160         locked_pages += mm->locked_vm;
1161
1162         limit_pages = rlimit(RLIMIT_MEMLOCK);
1163         limit_pages >>= PAGE_SHIFT;
1164
1165         return locked_pages <= limit_pages;
1166 }
1167
1168 static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
1169 {
1170         if (S_ISREG(inode->i_mode))
1171                 return MAX_LFS_FILESIZE;
1172
1173         if (S_ISBLK(inode->i_mode))
1174                 return MAX_LFS_FILESIZE;
1175
1176         if (S_ISSOCK(inode->i_mode))
1177                 return MAX_LFS_FILESIZE;
1178
1179         /* Special "we do even unsigned file positions" case */
1180         if (file->f_mode & FMODE_UNSIGNED_OFFSET)
1181                 return 0;
1182
1183         /* Yes, random drivers might want more. But I'm tired of buggy drivers */
1184         return ULONG_MAX;
1185 }
1186
1187 static inline bool file_mmap_ok(struct file *file, struct inode *inode,
1188                                 unsigned long pgoff, unsigned long len)
1189 {
1190         u64 maxsize = file_mmap_size_max(file, inode);
1191
1192         if (maxsize && len > maxsize)
1193                 return false;
1194         maxsize -= len;
1195         if (pgoff > maxsize >> PAGE_SHIFT)
1196                 return false;
1197         return true;
1198 }
1199
1200 /*
1201  * The caller must write-lock current->mm->mmap_lock.
1202  */
1203 unsigned long do_mmap(struct file *file, unsigned long addr,
1204                         unsigned long len, unsigned long prot,
1205                         unsigned long flags, vm_flags_t vm_flags,
1206                         unsigned long pgoff, unsigned long *populate,
1207                         struct list_head *uf)
1208 {
1209         struct mm_struct *mm = current->mm;
1210         int pkey = 0;
1211
1212         *populate = 0;
1213
1214         if (!len)
1215                 return -EINVAL;
1216
1217         /*
1218          * Does the application expect PROT_READ to imply PROT_EXEC?
1219          *
1220          * (the exception is when the underlying filesystem is noexec
1221          *  mounted, in which case we dont add PROT_EXEC.)
1222          */
1223         if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
1224                 if (!(file && path_noexec(&file->f_path)))
1225                         prot |= PROT_EXEC;
1226
1227         /* force arch specific MAP_FIXED handling in get_unmapped_area */
1228         if (flags & MAP_FIXED_NOREPLACE)
1229                 flags |= MAP_FIXED;
1230
1231         if (!(flags & MAP_FIXED))
1232                 addr = round_hint_to_min(addr);
1233
1234         /* Careful about overflows.. */
1235         len = PAGE_ALIGN(len);
1236         if (!len)
1237                 return -ENOMEM;
1238
1239         /* offset overflow? */
1240         if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
1241                 return -EOVERFLOW;
1242
1243         /* Too many mappings? */
1244         if (mm->map_count > sysctl_max_map_count)
1245                 return -ENOMEM;
1246
1247         /* Obtain the address to map to. we verify (or select) it and ensure
1248          * that it represents a valid section of the address space.
1249          */
1250         addr = get_unmapped_area(file, addr, len, pgoff, flags);
1251         if (IS_ERR_VALUE(addr))
1252                 return addr;
1253
1254         if (flags & MAP_FIXED_NOREPLACE) {
1255                 if (find_vma_intersection(mm, addr, addr + len))
1256                         return -EEXIST;
1257         }
1258
1259         if (prot == PROT_EXEC) {
1260                 pkey = execute_only_pkey(mm);
1261                 if (pkey < 0)
1262                         pkey = 0;
1263         }
1264
1265         /* Do simple checking here so the lower-level routines won't have
1266          * to. we assume access permissions have been handled by the open
1267          * of the memory object, so we don't do any here.
1268          */
1269         vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
1270                         mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1271
1272         if (flags & MAP_LOCKED)
1273                 if (!can_do_mlock())
1274                         return -EPERM;
1275
1276         if (!mlock_future_ok(mm, vm_flags, len))
1277                 return -EAGAIN;
1278
1279         if (file) {
1280                 struct inode *inode = file_inode(file);
1281                 unsigned long flags_mask;
1282
1283                 if (!file_mmap_ok(file, inode, pgoff, len))
1284                         return -EOVERFLOW;
1285
1286                 flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags;
1287
1288                 switch (flags & MAP_TYPE) {
1289                 case MAP_SHARED:
1290                         /*
1291                          * Force use of MAP_SHARED_VALIDATE with non-legacy
1292                          * flags. E.g. MAP_SYNC is dangerous to use with
1293                          * MAP_SHARED as you don't know which consistency model
1294                          * you will get. We silently ignore unsupported flags
1295                          * with MAP_SHARED to preserve backward compatibility.
1296                          */
1297                         flags &= LEGACY_MAP_MASK;
1298                         fallthrough;
1299                 case MAP_SHARED_VALIDATE:
1300                         if (flags & ~flags_mask)
1301                                 return -EOPNOTSUPP;
1302                         if (prot & PROT_WRITE) {
1303                                 if (!(file->f_mode & FMODE_WRITE))
1304                                         return -EACCES;
1305                                 if (IS_SWAPFILE(file->f_mapping->host))
1306                                         return -ETXTBSY;
1307                         }
1308
1309                         /*
1310                          * Make sure we don't allow writing to an append-only
1311                          * file..
1312                          */
1313                         if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
1314                                 return -EACCES;
1315
1316                         vm_flags |= VM_SHARED | VM_MAYSHARE;
1317                         if (!(file->f_mode & FMODE_WRITE))
1318                                 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
1319                         fallthrough;
1320                 case MAP_PRIVATE:
1321                         if (!(file->f_mode & FMODE_READ))
1322                                 return -EACCES;
1323                         if (path_noexec(&file->f_path)) {
1324                                 if (vm_flags & VM_EXEC)
1325                                         return -EPERM;
1326                                 vm_flags &= ~VM_MAYEXEC;
1327                         }
1328
1329                         if (!file->f_op->mmap)
1330                                 return -ENODEV;
1331                         if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1332                                 return -EINVAL;
1333                         break;
1334
1335                 default:
1336                         return -EINVAL;
1337                 }
1338         } else {
1339                 switch (flags & MAP_TYPE) {
1340                 case MAP_SHARED:
1341                         if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1342                                 return -EINVAL;
1343                         /*
1344                          * Ignore pgoff.
1345                          */
1346                         pgoff = 0;
1347                         vm_flags |= VM_SHARED | VM_MAYSHARE;
1348                         break;
1349                 case MAP_PRIVATE:
1350                         /*
1351                          * Set pgoff according to addr for anon_vma.
1352                          */
1353                         pgoff = addr >> PAGE_SHIFT;
1354                         break;
1355                 default:
1356                         return -EINVAL;
1357                 }
1358         }
1359
1360         /*
1361          * Set 'VM_NORESERVE' if we should not account for the
1362          * memory use of this mapping.
1363          */
1364         if (flags & MAP_NORESERVE) {
1365                 /* We honor MAP_NORESERVE if allowed to overcommit */
1366                 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
1367                         vm_flags |= VM_NORESERVE;
1368
1369                 /* hugetlb applies strict overcommit unless MAP_NORESERVE */
1370                 if (file && is_file_hugepages(file))
1371                         vm_flags |= VM_NORESERVE;
1372         }
1373
1374         addr = mmap_region(file, addr, len, vm_flags, pgoff, uf);
1375         if (!IS_ERR_VALUE(addr) &&
1376             ((vm_flags & VM_LOCKED) ||
1377              (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
1378                 *populate = len;
1379         return addr;
1380 }
1381
1382 unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
1383                               unsigned long prot, unsigned long flags,
1384                               unsigned long fd, unsigned long pgoff)
1385 {
1386         struct file *file = NULL;
1387         unsigned long retval;
1388
1389         if (!(flags & MAP_ANONYMOUS)) {
1390                 audit_mmap_fd(fd, flags);
1391                 file = fget(fd);
1392                 if (!file)
1393                         return -EBADF;
1394                 if (is_file_hugepages(file)) {
1395                         len = ALIGN(len, huge_page_size(hstate_file(file)));
1396                 } else if (unlikely(flags & MAP_HUGETLB)) {
1397                         retval = -EINVAL;
1398                         goto out_fput;
1399                 }
1400         } else if (flags & MAP_HUGETLB) {
1401                 struct hstate *hs;
1402
1403                 hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
1404                 if (!hs)
1405                         return -EINVAL;
1406
1407                 len = ALIGN(len, huge_page_size(hs));
1408                 /*
1409                  * VM_NORESERVE is used because the reservations will be
1410                  * taken when vm_ops->mmap() is called
1411                  */
1412                 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
1413                                 VM_NORESERVE,
1414                                 HUGETLB_ANONHUGE_INODE,
1415                                 (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
1416                 if (IS_ERR(file))
1417                         return PTR_ERR(file);
1418         }
1419
1420         retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1421 out_fput:
1422         if (file)
1423                 fput(file);
1424         return retval;
1425 }
1426
1427 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1428                 unsigned long, prot, unsigned long, flags,
1429                 unsigned long, fd, unsigned long, pgoff)
1430 {
1431         return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
1432 }
1433
1434 #ifdef __ARCH_WANT_SYS_OLD_MMAP
1435 struct mmap_arg_struct {
1436         unsigned long addr;
1437         unsigned long len;
1438         unsigned long prot;
1439         unsigned long flags;
1440         unsigned long fd;
1441         unsigned long offset;
1442 };
1443
1444 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1445 {
1446         struct mmap_arg_struct a;
1447
1448         if (copy_from_user(&a, arg, sizeof(a)))
1449                 return -EFAULT;
1450         if (offset_in_page(a.offset))
1451                 return -EINVAL;
1452
1453         return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1454                                a.offset >> PAGE_SHIFT);
1455 }
1456 #endif /* __ARCH_WANT_SYS_OLD_MMAP */
1457
1458 static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops)
1459 {
1460         return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite);
1461 }
1462
1463 static bool vma_is_shared_writable(struct vm_area_struct *vma)
1464 {
1465         return (vma->vm_flags & (VM_WRITE | VM_SHARED)) ==
1466                 (VM_WRITE | VM_SHARED);
1467 }
1468
1469 static bool vma_fs_can_writeback(struct vm_area_struct *vma)
1470 {
1471         /* No managed pages to writeback. */
1472         if (vma->vm_flags & VM_PFNMAP)
1473                 return false;
1474
1475         return vma->vm_file && vma->vm_file->f_mapping &&
1476                 mapping_can_writeback(vma->vm_file->f_mapping);
1477 }
1478
1479 /*
1480  * Does this VMA require the underlying folios to have their dirty state
1481  * tracked?
1482  */
1483 bool vma_needs_dirty_tracking(struct vm_area_struct *vma)
1484 {
1485         /* Only shared, writable VMAs require dirty tracking. */
1486         if (!vma_is_shared_writable(vma))
1487                 return false;
1488
1489         /* Does the filesystem need to be notified? */
1490         if (vm_ops_needs_writenotify(vma->vm_ops))
1491                 return true;
1492
1493         /*
1494          * Even if the filesystem doesn't indicate a need for writenotify, if it
1495          * can writeback, dirty tracking is still required.
1496          */
1497         return vma_fs_can_writeback(vma);
1498 }
1499
1500 /*
1501  * Some shared mappings will want the pages marked read-only
1502  * to track write events. If so, we'll downgrade vm_page_prot
1503  * to the private version (using protection_map[] without the
1504  * VM_SHARED bit).
1505  */
1506 int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
1507 {
1508         /* If it was private or non-writable, the write bit is already clear */
1509         if (!vma_is_shared_writable(vma))
1510                 return 0;
1511
1512         /* The backer wishes to know when pages are first written to? */
1513         if (vm_ops_needs_writenotify(vma->vm_ops))
1514                 return 1;
1515
1516         /* The open routine did something to the protections that pgprot_modify
1517          * won't preserve? */
1518         if (pgprot_val(vm_page_prot) !=
1519             pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags)))
1520                 return 0;
1521
1522         /*
1523          * Do we need to track softdirty? hugetlb does not support softdirty
1524          * tracking yet.
1525          */
1526         if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
1527                 return 1;
1528
1529         /* Do we need write faults for uffd-wp tracking? */
1530         if (userfaultfd_wp(vma))
1531                 return 1;
1532
1533         /* Can the mapping track the dirty pages? */
1534         return vma_fs_can_writeback(vma);
1535 }
1536
1537 /*
1538  * We account for memory if it's a private writeable mapping,
1539  * not hugepages and VM_NORESERVE wasn't set.
1540  */
1541 static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
1542 {
1543         /*
1544          * hugetlb has its own accounting separate from the core VM
1545          * VM_HUGETLB may not be set yet so we cannot check for that flag.
1546          */
1547         if (file && is_file_hugepages(file))
1548                 return 0;
1549
1550         return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
1551 }
1552
1553 /**
1554  * unmapped_area() - Find an area between the low_limit and the high_limit with
1555  * the correct alignment and offset, all from @info. Note: current->mm is used
1556  * for the search.
1557  *
1558  * @info: The unmapped area information including the range [low_limit -
1559  * high_limit), the alignment offset and mask.
1560  *
1561  * Return: A memory address or -ENOMEM.
1562  */
1563 static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
1564 {
1565         unsigned long length, gap;
1566         unsigned long low_limit, high_limit;
1567         struct vm_area_struct *tmp;
1568
1569         MA_STATE(mas, &current->mm->mm_mt, 0, 0);
1570
1571         /* Adjust search length to account for worst case alignment overhead */
1572         length = info->length + info->align_mask;
1573         if (length < info->length)
1574                 return -ENOMEM;
1575
1576         low_limit = info->low_limit;
1577         if (low_limit < mmap_min_addr)
1578                 low_limit = mmap_min_addr;
1579         high_limit = info->high_limit;
1580 retry:
1581         if (mas_empty_area(&mas, low_limit, high_limit - 1, length))
1582                 return -ENOMEM;
1583
1584         gap = mas.index;
1585         gap += (info->align_offset - gap) & info->align_mask;
1586         tmp = mas_next(&mas, ULONG_MAX);
1587         if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
1588                 if (vm_start_gap(tmp) < gap + length - 1) {
1589                         low_limit = tmp->vm_end;
1590                         mas_reset(&mas);
1591                         goto retry;
1592                 }
1593         } else {
1594                 tmp = mas_prev(&mas, 0);
1595                 if (tmp && vm_end_gap(tmp) > gap) {
1596                         low_limit = vm_end_gap(tmp);
1597                         mas_reset(&mas);
1598                         goto retry;
1599                 }
1600         }
1601
1602         return gap;
1603 }
1604
1605 /**
1606  * unmapped_area_topdown() - Find an area between the low_limit and the
1607  * high_limit with the correct alignment and offset at the highest available
1608  * address, all from @info. Note: current->mm is used for the search.
1609  *
1610  * @info: The unmapped area information including the range [low_limit -
1611  * high_limit), the alignment offset and mask.
1612  *
1613  * Return: A memory address or -ENOMEM.
1614  */
1615 static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
1616 {
1617         unsigned long length, gap, gap_end;
1618         unsigned long low_limit, high_limit;
1619         struct vm_area_struct *tmp;
1620
1621         MA_STATE(mas, &current->mm->mm_mt, 0, 0);
1622         /* Adjust search length to account for worst case alignment overhead */
1623         length = info->length + info->align_mask;
1624         if (length < info->length)
1625                 return -ENOMEM;
1626
1627         low_limit = info->low_limit;
1628         if (low_limit < mmap_min_addr)
1629                 low_limit = mmap_min_addr;
1630         high_limit = info->high_limit;
1631 retry:
1632         if (mas_empty_area_rev(&mas, low_limit, high_limit - 1, length))
1633                 return -ENOMEM;
1634
1635         gap = mas.last + 1 - info->length;
1636         gap -= (gap - info->align_offset) & info->align_mask;
1637         gap_end = mas.last;
1638         tmp = mas_next(&mas, ULONG_MAX);
1639         if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
1640                 if (vm_start_gap(tmp) <= gap_end) {
1641                         high_limit = vm_start_gap(tmp);
1642                         mas_reset(&mas);
1643                         goto retry;
1644                 }
1645         } else {
1646                 tmp = mas_prev(&mas, 0);
1647                 if (tmp && vm_end_gap(tmp) > gap) {
1648                         high_limit = tmp->vm_start;
1649                         mas_reset(&mas);
1650                         goto retry;
1651                 }
1652         }
1653
1654         return gap;
1655 }
1656
1657 /*
1658  * Search for an unmapped address range.
1659  *
1660  * We are looking for a range that:
1661  * - does not intersect with any VMA;
1662  * - is contained within the [low_limit, high_limit) interval;
1663  * - is at least the desired size.
1664  * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
1665  */
1666 unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
1667 {
1668         unsigned long addr;
1669
1670         if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
1671                 addr = unmapped_area_topdown(info);
1672         else
1673                 addr = unmapped_area(info);
1674
1675         trace_vm_unmapped_area(addr, info);
1676         return addr;
1677 }
1678
1679 /* Get an address range which is currently unmapped.
1680  * For shmat() with addr=0.
1681  *
1682  * Ugly calling convention alert:
1683  * Return value with the low bits set means error value,
1684  * ie
1685  *      if (ret & ~PAGE_MASK)
1686  *              error = ret;
1687  *
1688  * This function "knows" that -ENOMEM has the bits set.
1689  */
1690 unsigned long
1691 generic_get_unmapped_area(struct file *filp, unsigned long addr,
1692                           unsigned long len, unsigned long pgoff,
1693                           unsigned long flags)
1694 {
1695         struct mm_struct *mm = current->mm;
1696         struct vm_area_struct *vma, *prev;
1697         struct vm_unmapped_area_info info;
1698         const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
1699
1700         if (len > mmap_end - mmap_min_addr)
1701                 return -ENOMEM;
1702
1703         if (flags & MAP_FIXED)
1704                 return addr;
1705
1706         if (addr) {
1707                 addr = PAGE_ALIGN(addr);
1708                 vma = find_vma_prev(mm, addr, &prev);
1709                 if (mmap_end - len >= addr && addr >= mmap_min_addr &&
1710                     (!vma || addr + len <= vm_start_gap(vma)) &&
1711                     (!prev || addr >= vm_end_gap(prev)))
1712                         return addr;
1713         }
1714
1715         info.flags = 0;
1716         info.length = len;
1717         info.low_limit = mm->mmap_base;
1718         info.high_limit = mmap_end;
1719         info.align_mask = 0;
1720         info.align_offset = 0;
1721         return vm_unmapped_area(&info);
1722 }
1723
1724 #ifndef HAVE_ARCH_UNMAPPED_AREA
1725 unsigned long
1726 arch_get_unmapped_area(struct file *filp, unsigned long addr,
1727                        unsigned long len, unsigned long pgoff,
1728                        unsigned long flags)
1729 {
1730         return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
1731 }
1732 #endif
1733
1734 /*
1735  * This mmap-allocator allocates new areas top-down from below the
1736  * stack's low limit (the base):
1737  */
1738 unsigned long
1739 generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
1740                                   unsigned long len, unsigned long pgoff,
1741                                   unsigned long flags)
1742 {
1743         struct vm_area_struct *vma, *prev;
1744         struct mm_struct *mm = current->mm;
1745         struct vm_unmapped_area_info info;
1746         const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
1747
1748         /* requested length too big for entire address space */
1749         if (len > mmap_end - mmap_min_addr)
1750                 return -ENOMEM;
1751
1752         if (flags & MAP_FIXED)
1753                 return addr;
1754
1755         /* requesting a specific address */
1756         if (addr) {
1757                 addr = PAGE_ALIGN(addr);
1758                 vma = find_vma_prev(mm, addr, &prev);
1759                 if (mmap_end - len >= addr && addr >= mmap_min_addr &&
1760                                 (!vma || addr + len <= vm_start_gap(vma)) &&
1761                                 (!prev || addr >= vm_end_gap(prev)))
1762                         return addr;
1763         }
1764
1765         info.flags = VM_UNMAPPED_AREA_TOPDOWN;
1766         info.length = len;
1767         info.low_limit = PAGE_SIZE;
1768         info.high_limit = arch_get_mmap_base(addr, mm->mmap_base);
1769         info.align_mask = 0;
1770         info.align_offset = 0;
1771         addr = vm_unmapped_area(&info);
1772
1773         /*
1774          * A failed mmap() very likely causes application failure,
1775          * so fall back to the bottom-up function here. This scenario
1776          * can happen with large stack limits and large mmap()
1777          * allocations.
1778          */
1779         if (offset_in_page(addr)) {
1780                 VM_BUG_ON(addr != -ENOMEM);
1781                 info.flags = 0;
1782                 info.low_limit = TASK_UNMAPPED_BASE;
1783                 info.high_limit = mmap_end;
1784                 addr = vm_unmapped_area(&info);
1785         }
1786
1787         return addr;
1788 }
1789
1790 #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1791 unsigned long
1792 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
1793                                unsigned long len, unsigned long pgoff,
1794                                unsigned long flags)
1795 {
1796         return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
1797 }
1798 #endif
1799
1800 unsigned long
1801 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
1802                 unsigned long pgoff, unsigned long flags)
1803 {
1804         unsigned long (*get_area)(struct file *, unsigned long,
1805                                   unsigned long, unsigned long, unsigned long);
1806
1807         unsigned long error = arch_mmap_check(addr, len, flags);
1808         if (error)
1809                 return error;
1810
1811         /* Careful about overflows.. */
1812         if (len > TASK_SIZE)
1813                 return -ENOMEM;
1814
1815         get_area = current->mm->get_unmapped_area;
1816         if (file) {
1817                 if (file->f_op->get_unmapped_area)
1818                         get_area = file->f_op->get_unmapped_area;
1819         } else if (flags & MAP_SHARED) {
1820                 /*
1821                  * mmap_region() will call shmem_zero_setup() to create a file,
1822                  * so use shmem's get_unmapped_area in case it can be huge.
1823                  * do_mmap() will clear pgoff, so match alignment.
1824                  */
1825                 pgoff = 0;
1826                 get_area = shmem_get_unmapped_area;
1827         }
1828
1829         addr = get_area(file, addr, len, pgoff, flags);
1830         if (IS_ERR_VALUE(addr))
1831                 return addr;
1832
1833         if (addr > TASK_SIZE - len)
1834                 return -ENOMEM;
1835         if (offset_in_page(addr))
1836                 return -EINVAL;
1837
1838         error = security_mmap_addr(addr);
1839         return error ? error : addr;
1840 }
1841
1842 EXPORT_SYMBOL(get_unmapped_area);
1843
1844 /**
1845  * find_vma_intersection() - Look up the first VMA which intersects the interval
1846  * @mm: The process address space.
1847  * @start_addr: The inclusive start user address.
1848  * @end_addr: The exclusive end user address.
1849  *
1850  * Returns: The first VMA within the provided range, %NULL otherwise.  Assumes
1851  * start_addr < end_addr.
1852  */
1853 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
1854                                              unsigned long start_addr,
1855                                              unsigned long end_addr)
1856 {
1857         unsigned long index = start_addr;
1858
1859         mmap_assert_locked(mm);
1860         return mt_find(&mm->mm_mt, &index, end_addr - 1);
1861 }
1862 EXPORT_SYMBOL(find_vma_intersection);
1863
1864 /**
1865  * find_vma() - Find the VMA for a given address, or the next VMA.
1866  * @mm: The mm_struct to check
1867  * @addr: The address
1868  *
1869  * Returns: The VMA associated with addr, or the next VMA.
1870  * May return %NULL in the case of no VMA at addr or above.
1871  */
1872 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
1873 {
1874         unsigned long index = addr;
1875
1876         mmap_assert_locked(mm);
1877         return mt_find(&mm->mm_mt, &index, ULONG_MAX);
1878 }
1879 EXPORT_SYMBOL(find_vma);
1880
1881 /**
1882  * find_vma_prev() - Find the VMA for a given address, or the next vma and
1883  * set %pprev to the previous VMA, if any.
1884  * @mm: The mm_struct to check
1885  * @addr: The address
1886  * @pprev: The pointer to set to the previous VMA
1887  *
1888  * Note that RCU lock is missing here since the external mmap_lock() is used
1889  * instead.
1890  *
1891  * Returns: The VMA associated with @addr, or the next vma.
1892  * May return %NULL in the case of no vma at addr or above.
1893  */
1894 struct vm_area_struct *
1895 find_vma_prev(struct mm_struct *mm, unsigned long addr,
1896                         struct vm_area_struct **pprev)
1897 {
1898         struct vm_area_struct *vma;
1899         MA_STATE(mas, &mm->mm_mt, addr, addr);
1900
1901         vma = mas_walk(&mas);
1902         *pprev = mas_prev(&mas, 0);
1903         if (!vma)
1904                 vma = mas_next(&mas, ULONG_MAX);
1905         return vma;
1906 }
1907
1908 /*
1909  * Verify that the stack growth is acceptable and
1910  * update accounting. This is shared with both the
1911  * grow-up and grow-down cases.
1912  */
1913 static int acct_stack_growth(struct vm_area_struct *vma,
1914                              unsigned long size, unsigned long grow)
1915 {
1916         struct mm_struct *mm = vma->vm_mm;
1917         unsigned long new_start;
1918
1919         /* address space limit tests */
1920         if (!may_expand_vm(mm, vma->vm_flags, grow))
1921                 return -ENOMEM;
1922
1923         /* Stack limit test */
1924         if (size > rlimit(RLIMIT_STACK))
1925                 return -ENOMEM;
1926
1927         /* mlock limit tests */
1928         if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT))
1929                 return -ENOMEM;
1930
1931         /* Check to ensure the stack will not grow into a hugetlb-only region */
1932         new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
1933                         vma->vm_end - size;
1934         if (is_hugepage_only_range(vma->vm_mm, new_start, size))
1935                 return -EFAULT;
1936
1937         /*
1938          * Overcommit..  This must be the final test, as it will
1939          * update security statistics.
1940          */
1941         if (security_vm_enough_memory_mm(mm, grow))
1942                 return -ENOMEM;
1943
1944         return 0;
1945 }
1946
1947 #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
1948 /*
1949  * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
1950  * vma is the last one with address > vma->vm_end.  Have to extend vma.
1951  */
1952 static int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1953 {
1954         struct mm_struct *mm = vma->vm_mm;
1955         struct vm_area_struct *next;
1956         unsigned long gap_addr;
1957         int error = 0;
1958         MA_STATE(mas, &mm->mm_mt, vma->vm_start, address);
1959
1960         if (!(vma->vm_flags & VM_GROWSUP))
1961                 return -EFAULT;
1962
1963         /* Guard against exceeding limits of the address space. */
1964         address &= PAGE_MASK;
1965         if (address >= (TASK_SIZE & PAGE_MASK))
1966                 return -ENOMEM;
1967         address += PAGE_SIZE;
1968
1969         /* Enforce stack_guard_gap */
1970         gap_addr = address + stack_guard_gap;
1971
1972         /* Guard against overflow */
1973         if (gap_addr < address || gap_addr > TASK_SIZE)
1974                 gap_addr = TASK_SIZE;
1975
1976         next = find_vma_intersection(mm, vma->vm_end, gap_addr);
1977         if (next && vma_is_accessible(next)) {
1978                 if (!(next->vm_flags & VM_GROWSUP))
1979                         return -ENOMEM;
1980                 /* Check that both stack segments have the same anon_vma? */
1981         }
1982
1983         if (next)
1984                 mas_prev_range(&mas, address);
1985
1986         __mas_set_range(&mas, vma->vm_start, address - 1);
1987         if (mas_preallocate(&mas, vma, GFP_KERNEL))
1988                 return -ENOMEM;
1989
1990         /* We must make sure the anon_vma is allocated. */
1991         if (unlikely(anon_vma_prepare(vma))) {
1992                 mas_destroy(&mas);
1993                 return -ENOMEM;
1994         }
1995
1996         /* Lock the VMA before expanding to prevent concurrent page faults */
1997         vma_start_write(vma);
1998         /*
1999          * vma->vm_start/vm_end cannot change under us because the caller
2000          * is required to hold the mmap_lock in read mode.  We need the
2001          * anon_vma lock to serialize against concurrent expand_stacks.
2002          */
2003         anon_vma_lock_write(vma->anon_vma);
2004
2005         /* Somebody else might have raced and expanded it already */
2006         if (address > vma->vm_end) {
2007                 unsigned long size, grow;
2008
2009                 size = address - vma->vm_start;
2010                 grow = (address - vma->vm_end) >> PAGE_SHIFT;
2011
2012                 error = -ENOMEM;
2013                 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
2014                         error = acct_stack_growth(vma, size, grow);
2015                         if (!error) {
2016                                 /*
2017                                  * We only hold a shared mmap_lock lock here, so
2018                                  * we need to protect against concurrent vma
2019                                  * expansions.  anon_vma_lock_write() doesn't
2020                                  * help here, as we don't guarantee that all
2021                                  * growable vmas in a mm share the same root
2022                                  * anon vma.  So, we reuse mm->page_table_lock
2023                                  * to guard against concurrent vma expansions.
2024                                  */
2025                                 spin_lock(&mm->page_table_lock);
2026                                 if (vma->vm_flags & VM_LOCKED)
2027                                         mm->locked_vm += grow;
2028                                 vm_stat_account(mm, vma->vm_flags, grow);
2029                                 anon_vma_interval_tree_pre_update_vma(vma);
2030                                 vma->vm_end = address;
2031                                 /* Overwrite old entry in mtree. */
2032                                 mas_store_prealloc(&mas, vma);
2033                                 anon_vma_interval_tree_post_update_vma(vma);
2034                                 spin_unlock(&mm->page_table_lock);
2035
2036                                 perf_event_mmap(vma);
2037                         }
2038                 }
2039         }
2040         anon_vma_unlock_write(vma->anon_vma);
2041         khugepaged_enter_vma(vma, vma->vm_flags);
2042         mas_destroy(&mas);
2043         validate_mm(mm);
2044         return error;
2045 }
2046 #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
2047
2048 /*
2049  * vma is the first one with address < vma->vm_start.  Have to extend vma.
2050  * mmap_lock held for writing.
2051  */
2052 int expand_downwards(struct vm_area_struct *vma, unsigned long address)
2053 {
2054         struct mm_struct *mm = vma->vm_mm;
2055         MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_start);
2056         struct vm_area_struct *prev;
2057         int error = 0;
2058
2059         if (!(vma->vm_flags & VM_GROWSDOWN))
2060                 return -EFAULT;
2061
2062         address &= PAGE_MASK;
2063         if (address < mmap_min_addr || address < FIRST_USER_ADDRESS)
2064                 return -EPERM;
2065
2066         /* Enforce stack_guard_gap */
2067         prev = mas_prev(&mas, 0);
2068         /* Check that both stack segments have the same anon_vma? */
2069         if (prev) {
2070                 if (!(prev->vm_flags & VM_GROWSDOWN) &&
2071                     vma_is_accessible(prev) &&
2072                     (address - prev->vm_end < stack_guard_gap))
2073                         return -ENOMEM;
2074         }
2075
2076         if (prev)
2077                 mas_next_range(&mas, vma->vm_start);
2078
2079         __mas_set_range(&mas, address, vma->vm_end - 1);
2080         if (mas_preallocate(&mas, vma, GFP_KERNEL))
2081                 return -ENOMEM;
2082
2083         /* We must make sure the anon_vma is allocated. */
2084         if (unlikely(anon_vma_prepare(vma))) {
2085                 mas_destroy(&mas);
2086                 return -ENOMEM;
2087         }
2088
2089         /* Lock the VMA before expanding to prevent concurrent page faults */
2090         vma_start_write(vma);
2091         /*
2092          * vma->vm_start/vm_end cannot change under us because the caller
2093          * is required to hold the mmap_lock in read mode.  We need the
2094          * anon_vma lock to serialize against concurrent expand_stacks.
2095          */
2096         anon_vma_lock_write(vma->anon_vma);
2097
2098         /* Somebody else might have raced and expanded it already */
2099         if (address < vma->vm_start) {
2100                 unsigned long size, grow;
2101
2102                 size = vma->vm_end - address;
2103                 grow = (vma->vm_start - address) >> PAGE_SHIFT;
2104
2105                 error = -ENOMEM;
2106                 if (grow <= vma->vm_pgoff) {
2107                         error = acct_stack_growth(vma, size, grow);
2108                         if (!error) {
2109                                 /*
2110                                  * We only hold a shared mmap_lock lock here, so
2111                                  * we need to protect against concurrent vma
2112                                  * expansions.  anon_vma_lock_write() doesn't
2113                                  * help here, as we don't guarantee that all
2114                                  * growable vmas in a mm share the same root
2115                                  * anon vma.  So, we reuse mm->page_table_lock
2116                                  * to guard against concurrent vma expansions.
2117                                  */
2118                                 spin_lock(&mm->page_table_lock);
2119                                 if (vma->vm_flags & VM_LOCKED)
2120                                         mm->locked_vm += grow;
2121                                 vm_stat_account(mm, vma->vm_flags, grow);
2122                                 anon_vma_interval_tree_pre_update_vma(vma);
2123                                 vma->vm_start = address;
2124                                 vma->vm_pgoff -= grow;
2125                                 /* Overwrite old entry in mtree. */
2126                                 mas_store_prealloc(&mas, vma);
2127                                 anon_vma_interval_tree_post_update_vma(vma);
2128                                 spin_unlock(&mm->page_table_lock);
2129
2130                                 perf_event_mmap(vma);
2131                         }
2132                 }
2133         }
2134         anon_vma_unlock_write(vma->anon_vma);
2135         khugepaged_enter_vma(vma, vma->vm_flags);
2136         mas_destroy(&mas);
2137         validate_mm(mm);
2138         return error;
2139 }
2140
2141 /* enforced gap between the expanding stack and other mappings. */
2142 unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
2143
2144 static int __init cmdline_parse_stack_guard_gap(char *p)
2145 {
2146         unsigned long val;
2147         char *endptr;
2148
2149         val = simple_strtoul(p, &endptr, 10);
2150         if (!*endptr)
2151                 stack_guard_gap = val << PAGE_SHIFT;
2152
2153         return 1;
2154 }
2155 __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
2156
2157 #ifdef CONFIG_STACK_GROWSUP
2158 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
2159 {
2160         return expand_upwards(vma, address);
2161 }
2162
2163 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
2164 {
2165         struct vm_area_struct *vma, *prev;
2166
2167         addr &= PAGE_MASK;
2168         vma = find_vma_prev(mm, addr, &prev);
2169         if (vma && (vma->vm_start <= addr))
2170                 return vma;
2171         if (!prev)
2172                 return NULL;
2173         if (expand_stack_locked(prev, addr))
2174                 return NULL;
2175         if (prev->vm_flags & VM_LOCKED)
2176                 populate_vma_page_range(prev, addr, prev->vm_end, NULL);
2177         return prev;
2178 }
2179 #else
2180 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
2181 {
2182         if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
2183                 return -EINVAL;
2184         return expand_downwards(vma, address);
2185 }
2186
2187 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
2188 {
2189         struct vm_area_struct *vma;
2190         unsigned long start;
2191
2192         addr &= PAGE_MASK;
2193         vma = find_vma(mm, addr);
2194         if (!vma)
2195                 return NULL;
2196         if (vma->vm_start <= addr)
2197                 return vma;
2198         start = vma->vm_start;
2199         if (expand_stack_locked(vma, addr))
2200                 return NULL;
2201         if (vma->vm_flags & VM_LOCKED)
2202                 populate_vma_page_range(vma, addr, start, NULL);
2203         return vma;
2204 }
2205 #endif
2206
2207 /*
2208  * IA64 has some horrid mapping rules: it can expand both up and down,
2209  * but with various special rules.
2210  *
2211  * We'll get rid of this architecture eventually, so the ugliness is
2212  * temporary.
2213  */
2214 #ifdef CONFIG_IA64
2215 static inline bool vma_expand_ok(struct vm_area_struct *vma, unsigned long addr)
2216 {
2217         return REGION_NUMBER(addr) == REGION_NUMBER(vma->vm_start) &&
2218                 REGION_OFFSET(addr) < RGN_MAP_LIMIT;
2219 }
2220
2221 /*
2222  * IA64 stacks grow down, but there's a special register backing store
2223  * that can grow up. Only sequentially, though, so the new address must
2224  * match vm_end.
2225  */
2226 static inline int vma_expand_up(struct vm_area_struct *vma, unsigned long addr)
2227 {
2228         if (!vma_expand_ok(vma, addr))
2229                 return -EFAULT;
2230         if (vma->vm_end != (addr & PAGE_MASK))
2231                 return -EFAULT;
2232         return expand_upwards(vma, addr);
2233 }
2234
2235 static inline bool vma_expand_down(struct vm_area_struct *vma, unsigned long addr)
2236 {
2237         if (!vma_expand_ok(vma, addr))
2238                 return -EFAULT;
2239         return expand_downwards(vma, addr);
2240 }
2241
2242 #elif defined(CONFIG_STACK_GROWSUP)
2243
2244 #define vma_expand_up(vma,addr) expand_upwards(vma, addr)
2245 #define vma_expand_down(vma, addr) (-EFAULT)
2246
2247 #else
2248
2249 #define vma_expand_up(vma,addr) (-EFAULT)
2250 #define vma_expand_down(vma, addr) expand_downwards(vma, addr)
2251
2252 #endif
2253
2254 /*
2255  * expand_stack(): legacy interface for page faulting. Don't use unless
2256  * you have to.
2257  *
2258  * This is called with the mm locked for reading, drops the lock, takes
2259  * the lock for writing, tries to look up a vma again, expands it if
2260  * necessary, and downgrades the lock to reading again.
2261  *
2262  * If no vma is found or it can't be expanded, it returns NULL and has
2263  * dropped the lock.
2264  */
2265 struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
2266 {
2267         struct vm_area_struct *vma, *prev;
2268
2269         mmap_read_unlock(mm);
2270         if (mmap_write_lock_killable(mm))
2271                 return NULL;
2272
2273         vma = find_vma_prev(mm, addr, &prev);
2274         if (vma && vma->vm_start <= addr)
2275                 goto success;
2276
2277         if (prev && !vma_expand_up(prev, addr)) {
2278                 vma = prev;
2279                 goto success;
2280         }
2281
2282         if (vma && !vma_expand_down(vma, addr))
2283                 goto success;
2284
2285         mmap_write_unlock(mm);
2286         return NULL;
2287
2288 success:
2289         mmap_write_downgrade(mm);
2290         return vma;
2291 }
2292
2293 /*
2294  * Ok - we have the memory areas we should free on a maple tree so release them,
2295  * and do the vma updates.
2296  *
2297  * Called with the mm semaphore held.
2298  */
2299 static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas)
2300 {
2301         unsigned long nr_accounted = 0;
2302         struct vm_area_struct *vma;
2303
2304         /* Update high watermark before we lower total_vm */
2305         update_hiwater_vm(mm);
2306         mas_for_each(mas, vma, ULONG_MAX) {
2307                 long nrpages = vma_pages(vma);
2308
2309                 if (vma->vm_flags & VM_ACCOUNT)
2310                         nr_accounted += nrpages;
2311                 vm_stat_account(mm, vma->vm_flags, -nrpages);
2312                 remove_vma(vma, false);
2313         }
2314         vm_unacct_memory(nr_accounted);
2315 }
2316
2317 /*
2318  * Get rid of page table information in the indicated region.
2319  *
2320  * Called with the mm semaphore held.
2321  */
2322 static void unmap_region(struct mm_struct *mm, struct ma_state *mas,
2323                 struct vm_area_struct *vma, struct vm_area_struct *prev,
2324                 struct vm_area_struct *next, unsigned long start,
2325                 unsigned long end, unsigned long tree_end, bool mm_wr_locked)
2326 {
2327         struct mmu_gather tlb;
2328         unsigned long mt_start = mas->index;
2329
2330         lru_add_drain();
2331         tlb_gather_mmu(&tlb, mm);
2332         update_hiwater_rss(mm);
2333         unmap_vmas(&tlb, mas, vma, start, end, tree_end, mm_wr_locked);
2334         mas_set(mas, mt_start);
2335         free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
2336                                  next ? next->vm_start : USER_PGTABLES_CEILING,
2337                                  mm_wr_locked);
2338         tlb_finish_mmu(&tlb);
2339 }
2340
2341 /*
2342  * __split_vma() bypasses sysctl_max_map_count checking.  We use this where it
2343  * has already been checked or doesn't make sense to fail.
2344  * VMA Iterator will point to the end VMA.
2345  */
2346 int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
2347                 unsigned long addr, int new_below)
2348 {
2349         struct vma_prepare vp;
2350         struct vm_area_struct *new;
2351         int err;
2352
2353         WARN_ON(vma->vm_start >= addr);
2354         WARN_ON(vma->vm_end <= addr);
2355
2356         if (vma->vm_ops && vma->vm_ops->may_split) {
2357                 err = vma->vm_ops->may_split(vma, addr);
2358                 if (err)
2359                         return err;
2360         }
2361
2362         new = vm_area_dup(vma);
2363         if (!new)
2364                 return -ENOMEM;
2365
2366         if (new_below) {
2367                 new->vm_end = addr;
2368         } else {
2369                 new->vm_start = addr;
2370                 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
2371         }
2372
2373         err = -ENOMEM;
2374         vma_iter_config(vmi, new->vm_start, new->vm_end);
2375         if (vma_iter_prealloc(vmi, new))
2376                 goto out_free_vma;
2377
2378         err = vma_dup_policy(vma, new);
2379         if (err)
2380                 goto out_free_vmi;
2381
2382         err = anon_vma_clone(new, vma);
2383         if (err)
2384                 goto out_free_mpol;
2385
2386         if (new->vm_file)
2387                 get_file(new->vm_file);
2388
2389         if (new->vm_ops && new->vm_ops->open)
2390                 new->vm_ops->open(new);
2391
2392         vma_start_write(vma);
2393         vma_start_write(new);
2394
2395         init_vma_prep(&vp, vma);
2396         vp.insert = new;
2397         vma_prepare(&vp);
2398         vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
2399
2400         if (new_below) {
2401                 vma->vm_start = addr;
2402                 vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT;
2403         } else {
2404                 vma->vm_end = addr;
2405         }
2406
2407         /* vma_complete stores the new vma */
2408         vma_complete(&vp, vmi, vma->vm_mm);
2409
2410         /* Success. */
2411         if (new_below)
2412                 vma_next(vmi);
2413         return 0;
2414
2415 out_free_mpol:
2416         mpol_put(vma_policy(new));
2417 out_free_vmi:
2418         vma_iter_free(vmi);
2419 out_free_vma:
2420         vm_area_free(new);
2421         return err;
2422 }
2423
2424 /*
2425  * Split a vma into two pieces at address 'addr', a new vma is allocated
2426  * either for the first part or the tail.
2427  */
2428 int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
2429               unsigned long addr, int new_below)
2430 {
2431         if (vma->vm_mm->map_count >= sysctl_max_map_count)
2432                 return -ENOMEM;
2433
2434         return __split_vma(vmi, vma, addr, new_below);
2435 }
2436
2437 /*
2438  * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
2439  * @vmi: The vma iterator
2440  * @vma: The starting vm_area_struct
2441  * @mm: The mm_struct
2442  * @start: The aligned start address to munmap.
2443  * @end: The aligned end address to munmap.
2444  * @uf: The userfaultfd list_head
2445  * @unlock: Set to true to drop the mmap_lock.  unlocking only happens on
2446  * success.
2447  *
2448  * Return: 0 on success and drops the lock if so directed, error and leaves the
2449  * lock held otherwise.
2450  */
2451 static int
2452 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
2453                     struct mm_struct *mm, unsigned long start,
2454                     unsigned long end, struct list_head *uf, bool unlock)
2455 {
2456         struct vm_area_struct *prev, *next = NULL;
2457         struct maple_tree mt_detach;
2458         int count = 0;
2459         int error = -ENOMEM;
2460         unsigned long locked_vm = 0;
2461         MA_STATE(mas_detach, &mt_detach, 0, 0);
2462         mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
2463         mt_on_stack(mt_detach);
2464
2465         /*
2466          * If we need to split any vma, do it now to save pain later.
2467          *
2468          * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
2469          * unmapped vm_area_struct will remain in use: so lower split_vma
2470          * places tmp vma above, and higher split_vma places tmp vma below.
2471          */
2472
2473         /* Does it split the first one? */
2474         if (start > vma->vm_start) {
2475
2476                 /*
2477                  * Make sure that map_count on return from munmap() will
2478                  * not exceed its limit; but let map_count go just above
2479                  * its limit temporarily, to help free resources as expected.
2480                  */
2481                 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
2482                         goto map_count_exceeded;
2483
2484                 error = __split_vma(vmi, vma, start, 1);
2485                 if (error)
2486                         goto start_split_failed;
2487         }
2488
2489         /*
2490          * Detach a range of VMAs from the mm. Using next as a temp variable as
2491          * it is always overwritten.
2492          */
2493         next = vma;
2494         do {
2495                 /* Does it split the end? */
2496                 if (next->vm_end > end) {
2497                         error = __split_vma(vmi, next, end, 0);
2498                         if (error)
2499                                 goto end_split_failed;
2500                 }
2501                 vma_start_write(next);
2502                 mas_set(&mas_detach, count);
2503                 error = mas_store_gfp(&mas_detach, next, GFP_KERNEL);
2504                 if (error)
2505                         goto munmap_gather_failed;
2506                 vma_mark_detached(next, true);
2507                 if (next->vm_flags & VM_LOCKED)
2508                         locked_vm += vma_pages(next);
2509
2510                 count++;
2511                 if (unlikely(uf)) {
2512                         /*
2513                          * If userfaultfd_unmap_prep returns an error the vmas
2514                          * will remain split, but userland will get a
2515                          * highly unexpected error anyway. This is no
2516                          * different than the case where the first of the two
2517                          * __split_vma fails, but we don't undo the first
2518                          * split, despite we could. This is unlikely enough
2519                          * failure that it's not worth optimizing it for.
2520                          */
2521                         error = userfaultfd_unmap_prep(next, start, end, uf);
2522
2523                         if (error)
2524                                 goto userfaultfd_error;
2525                 }
2526 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
2527                 BUG_ON(next->vm_start < start);
2528                 BUG_ON(next->vm_start > end);
2529 #endif
2530         } for_each_vma_range(*vmi, next, end);
2531
2532 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
2533         /* Make sure no VMAs are about to be lost. */
2534         {
2535                 MA_STATE(test, &mt_detach, 0, 0);
2536                 struct vm_area_struct *vma_mas, *vma_test;
2537                 int test_count = 0;
2538
2539                 vma_iter_set(vmi, start);
2540                 rcu_read_lock();
2541                 vma_test = mas_find(&test, count - 1);
2542                 for_each_vma_range(*vmi, vma_mas, end) {
2543                         BUG_ON(vma_mas != vma_test);
2544                         test_count++;
2545                         vma_test = mas_next(&test, count - 1);
2546                 }
2547                 rcu_read_unlock();
2548                 BUG_ON(count != test_count);
2549         }
2550 #endif
2551
2552         while (vma_iter_addr(vmi) > start)
2553                 vma_iter_prev_range(vmi);
2554
2555         error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL);
2556         if (error)
2557                 goto clear_tree_failed;
2558
2559         /* Point of no return */
2560         mm->locked_vm -= locked_vm;
2561         mm->map_count -= count;
2562         if (unlock)
2563                 mmap_write_downgrade(mm);
2564
2565         prev = vma_iter_prev_range(vmi);
2566         next = vma_next(vmi);
2567         if (next)
2568                 vma_iter_prev_range(vmi);
2569
2570         /*
2571          * We can free page tables without write-locking mmap_lock because VMAs
2572          * were isolated before we downgraded mmap_lock.
2573          */
2574         mas_set(&mas_detach, 1);
2575         unmap_region(mm, &mas_detach, vma, prev, next, start, end, count,
2576                      !unlock);
2577         /* Statistics and freeing VMAs */
2578         mas_set(&mas_detach, 0);
2579         remove_mt(mm, &mas_detach);
2580         validate_mm(mm);
2581         if (unlock)
2582                 mmap_read_unlock(mm);
2583
2584         __mt_destroy(&mt_detach);
2585         return 0;
2586
2587 clear_tree_failed:
2588 userfaultfd_error:
2589 munmap_gather_failed:
2590 end_split_failed:
2591         mas_set(&mas_detach, 0);
2592         mas_for_each(&mas_detach, next, end)
2593                 vma_mark_detached(next, false);
2594
2595         __mt_destroy(&mt_detach);
2596 start_split_failed:
2597 map_count_exceeded:
2598         validate_mm(mm);
2599         return error;
2600 }
2601
2602 /*
2603  * do_vmi_munmap() - munmap a given range.
2604  * @vmi: The vma iterator
2605  * @mm: The mm_struct
2606  * @start: The start address to munmap
2607  * @len: The length of the range to munmap
2608  * @uf: The userfaultfd list_head
2609  * @unlock: set to true if the user wants to drop the mmap_lock on success
2610  *
2611  * This function takes a @mas that is either pointing to the previous VMA or set
2612  * to MA_START and sets it up to remove the mapping(s).  The @len will be
2613  * aligned and any arch_unmap work will be preformed.
2614  *
2615  * Return: 0 on success and drops the lock if so directed, error and leaves the
2616  * lock held otherwise.
2617  */
2618 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
2619                   unsigned long start, size_t len, struct list_head *uf,
2620                   bool unlock)
2621 {
2622         unsigned long end;
2623         struct vm_area_struct *vma;
2624
2625         if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
2626                 return -EINVAL;
2627
2628         end = start + PAGE_ALIGN(len);
2629         if (end == start)
2630                 return -EINVAL;
2631
2632          /* arch_unmap() might do unmaps itself.  */
2633         arch_unmap(mm, start, end);
2634
2635         /* Find the first overlapping VMA */
2636         vma = vma_find(vmi, end);
2637         if (!vma) {
2638                 if (unlock)
2639                         mmap_write_unlock(mm);
2640                 return 0;
2641         }
2642
2643         return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
2644 }
2645
2646 /* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls.
2647  * @mm: The mm_struct
2648  * @start: The start address to munmap
2649  * @len: The length to be munmapped.
2650  * @uf: The userfaultfd list_head
2651  *
2652  * Return: 0 on success, error otherwise.
2653  */
2654 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
2655               struct list_head *uf)
2656 {
2657         VMA_ITERATOR(vmi, mm, start);
2658
2659         return do_vmi_munmap(&vmi, mm, start, len, uf, false);
2660 }
2661
2662 unsigned long mmap_region(struct file *file, unsigned long addr,
2663                 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2664                 struct list_head *uf)
2665 {
2666         struct mm_struct *mm = current->mm;
2667         struct vm_area_struct *vma = NULL;
2668         struct vm_area_struct *next, *prev, *merge;
2669         pgoff_t pglen = len >> PAGE_SHIFT;
2670         unsigned long charged = 0;
2671         unsigned long end = addr + len;
2672         unsigned long merge_start = addr, merge_end = end;
2673         pgoff_t vm_pgoff;
2674         int error;
2675         VMA_ITERATOR(vmi, mm, addr);
2676
2677         /* Check against address space limit. */
2678         if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
2679                 unsigned long nr_pages;
2680
2681                 /*
2682                  * MAP_FIXED may remove pages of mappings that intersects with
2683                  * requested mapping. Account for the pages it would unmap.
2684                  */
2685                 nr_pages = count_vma_pages_range(mm, addr, end);
2686
2687                 if (!may_expand_vm(mm, vm_flags,
2688                                         (len >> PAGE_SHIFT) - nr_pages))
2689                         return -ENOMEM;
2690         }
2691
2692         /* Unmap any existing mapping in the area */
2693         if (do_vmi_munmap(&vmi, mm, addr, len, uf, false))
2694                 return -ENOMEM;
2695
2696         /*
2697          * Private writable mapping: check memory availability
2698          */
2699         if (accountable_mapping(file, vm_flags)) {
2700                 charged = len >> PAGE_SHIFT;
2701                 if (security_vm_enough_memory_mm(mm, charged))
2702                         return -ENOMEM;
2703                 vm_flags |= VM_ACCOUNT;
2704         }
2705
2706         next = vma_next(&vmi);
2707         prev = vma_prev(&vmi);
2708         if (vm_flags & VM_SPECIAL) {
2709                 if (prev)
2710                         vma_iter_next_range(&vmi);
2711                 goto cannot_expand;
2712         }
2713
2714         /* Attempt to expand an old mapping */
2715         /* Check next */
2716         if (next && next->vm_start == end && !vma_policy(next) &&
2717             can_vma_merge_before(next, vm_flags, NULL, file, pgoff+pglen,
2718                                  NULL_VM_UFFD_CTX, NULL)) {
2719                 merge_end = next->vm_end;
2720                 vma = next;
2721                 vm_pgoff = next->vm_pgoff - pglen;
2722         }
2723
2724         /* Check prev */
2725         if (prev && prev->vm_end == addr && !vma_policy(prev) &&
2726             (vma ? can_vma_merge_after(prev, vm_flags, vma->anon_vma, file,
2727                                        pgoff, vma->vm_userfaultfd_ctx, NULL) :
2728                    can_vma_merge_after(prev, vm_flags, NULL, file, pgoff,
2729                                        NULL_VM_UFFD_CTX, NULL))) {
2730                 merge_start = prev->vm_start;
2731                 vma = prev;
2732                 vm_pgoff = prev->vm_pgoff;
2733         } else if (prev) {
2734                 vma_iter_next_range(&vmi);
2735         }
2736
2737         /* Actually expand, if possible */
2738         if (vma &&
2739             !vma_expand(&vmi, vma, merge_start, merge_end, vm_pgoff, next)) {
2740                 khugepaged_enter_vma(vma, vm_flags);
2741                 goto expanded;
2742         }
2743
2744         if (vma == prev)
2745                 vma_iter_set(&vmi, addr);
2746 cannot_expand:
2747
2748         /*
2749          * Determine the object being mapped and call the appropriate
2750          * specific mapper. the address has already been validated, but
2751          * not unmapped, but the maps are removed from the list.
2752          */
2753         vma = vm_area_alloc(mm);
2754         if (!vma) {
2755                 error = -ENOMEM;
2756                 goto unacct_error;
2757         }
2758
2759         vma_iter_config(&vmi, addr, end);
2760         vma->vm_start = addr;
2761         vma->vm_end = end;
2762         vm_flags_init(vma, vm_flags);
2763         vma->vm_page_prot = vm_get_page_prot(vm_flags);
2764         vma->vm_pgoff = pgoff;
2765
2766         if (file) {
2767                 if (vm_flags & VM_SHARED) {
2768                         error = mapping_map_writable(file->f_mapping);
2769                         if (error)
2770                                 goto free_vma;
2771                 }
2772
2773                 vma->vm_file = get_file(file);
2774                 error = call_mmap(file, vma);
2775                 if (error)
2776                         goto unmap_and_free_vma;
2777
2778                 /*
2779                  * Expansion is handled above, merging is handled below.
2780                  * Drivers should not alter the address of the VMA.
2781                  */
2782                 error = -EINVAL;
2783                 if (WARN_ON((addr != vma->vm_start)))
2784                         goto close_and_free_vma;
2785
2786                 vma_iter_config(&vmi, addr, end);
2787                 /*
2788                  * If vm_flags changed after call_mmap(), we should try merge
2789                  * vma again as we may succeed this time.
2790                  */
2791                 if (unlikely(vm_flags != vma->vm_flags && prev)) {
2792                         merge = vma_merge(&vmi, mm, prev, vma->vm_start,
2793                                     vma->vm_end, vma->vm_flags, NULL,
2794                                     vma->vm_file, vma->vm_pgoff, NULL,
2795                                     NULL_VM_UFFD_CTX, NULL);
2796                         if (merge) {
2797                                 /*
2798                                  * ->mmap() can change vma->vm_file and fput
2799                                  * the original file. So fput the vma->vm_file
2800                                  * here or we would add an extra fput for file
2801                                  * and cause general protection fault
2802                                  * ultimately.
2803                                  */
2804                                 fput(vma->vm_file);
2805                                 vm_area_free(vma);
2806                                 vma = merge;
2807                                 /* Update vm_flags to pick up the change. */
2808                                 vm_flags = vma->vm_flags;
2809                                 goto unmap_writable;
2810                         }
2811                 }
2812
2813                 vm_flags = vma->vm_flags;
2814         } else if (vm_flags & VM_SHARED) {
2815                 error = shmem_zero_setup(vma);
2816                 if (error)
2817                         goto free_vma;
2818         } else {
2819                 vma_set_anonymous(vma);
2820         }
2821
2822         if (map_deny_write_exec(vma, vma->vm_flags)) {
2823                 error = -EACCES;
2824                 goto close_and_free_vma;
2825         }
2826
2827         /* Allow architectures to sanity-check the vm_flags */
2828         error = -EINVAL;
2829         if (!arch_validate_flags(vma->vm_flags))
2830                 goto close_and_free_vma;
2831
2832         error = -ENOMEM;
2833         if (vma_iter_prealloc(&vmi, vma))
2834                 goto close_and_free_vma;
2835
2836         /* Lock the VMA since it is modified after insertion into VMA tree */
2837         vma_start_write(vma);
2838         vma_iter_store(&vmi, vma);
2839         mm->map_count++;
2840         if (vma->vm_file) {
2841                 i_mmap_lock_write(vma->vm_file->f_mapping);
2842                 if (vma->vm_flags & VM_SHARED)
2843                         mapping_allow_writable(vma->vm_file->f_mapping);
2844
2845                 flush_dcache_mmap_lock(vma->vm_file->f_mapping);
2846                 vma_interval_tree_insert(vma, &vma->vm_file->f_mapping->i_mmap);
2847                 flush_dcache_mmap_unlock(vma->vm_file->f_mapping);
2848                 i_mmap_unlock_write(vma->vm_file->f_mapping);
2849         }
2850
2851         /*
2852          * vma_merge() calls khugepaged_enter_vma() either, the below
2853          * call covers the non-merge case.
2854          */
2855         khugepaged_enter_vma(vma, vma->vm_flags);
2856
2857         /* Once vma denies write, undo our temporary denial count */
2858 unmap_writable:
2859         if (file && vm_flags & VM_SHARED)
2860                 mapping_unmap_writable(file->f_mapping);
2861         file = vma->vm_file;
2862         ksm_add_vma(vma);
2863 expanded:
2864         perf_event_mmap(vma);
2865
2866         vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
2867         if (vm_flags & VM_LOCKED) {
2868                 if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
2869                                         is_vm_hugetlb_page(vma) ||
2870                                         vma == get_gate_vma(current->mm))
2871                         vm_flags_clear(vma, VM_LOCKED_MASK);
2872                 else
2873                         mm->locked_vm += (len >> PAGE_SHIFT);
2874         }
2875
2876         if (file)
2877                 uprobe_mmap(vma);
2878
2879         /*
2880          * New (or expanded) vma always get soft dirty status.
2881          * Otherwise user-space soft-dirty page tracker won't
2882          * be able to distinguish situation when vma area unmapped,
2883          * then new mapped in-place (which must be aimed as
2884          * a completely new data area).
2885          */
2886         vm_flags_set(vma, VM_SOFTDIRTY);
2887
2888         vma_set_page_prot(vma);
2889
2890         validate_mm(mm);
2891         return addr;
2892
2893 close_and_free_vma:
2894         if (file && vma->vm_ops && vma->vm_ops->close)
2895                 vma->vm_ops->close(vma);
2896
2897         if (file || vma->vm_file) {
2898 unmap_and_free_vma:
2899                 fput(vma->vm_file);
2900                 vma->vm_file = NULL;
2901
2902                 vma_iter_set(&vmi, vma->vm_end);
2903                 /* Undo any partial mapping done by a device driver. */
2904                 unmap_region(mm, &vmi.mas, vma, prev, next, vma->vm_start,
2905                              vma->vm_end, vma->vm_end, true);
2906         }
2907         if (file && (vm_flags & VM_SHARED))
2908                 mapping_unmap_writable(file->f_mapping);
2909 free_vma:
2910         vm_area_free(vma);
2911 unacct_error:
2912         if (charged)
2913                 vm_unacct_memory(charged);
2914         validate_mm(mm);
2915         return error;
2916 }
2917
2918 static int __vm_munmap(unsigned long start, size_t len, bool unlock)
2919 {
2920         int ret;
2921         struct mm_struct *mm = current->mm;
2922         LIST_HEAD(uf);
2923         VMA_ITERATOR(vmi, mm, start);
2924
2925         if (mmap_write_lock_killable(mm))
2926                 return -EINTR;
2927
2928         ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock);
2929         if (ret || !unlock)
2930                 mmap_write_unlock(mm);
2931
2932         userfaultfd_unmap_complete(mm, &uf);
2933         return ret;
2934 }
2935
2936 int vm_munmap(unsigned long start, size_t len)
2937 {
2938         return __vm_munmap(start, len, false);
2939 }
2940 EXPORT_SYMBOL(vm_munmap);
2941
2942 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
2943 {
2944         addr = untagged_addr(addr);
2945         return __vm_munmap(addr, len, true);
2946 }
2947
2948
2949 /*
2950  * Emulation of deprecated remap_file_pages() syscall.
2951  */
2952 SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
2953                 unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
2954 {
2955
2956         struct mm_struct *mm = current->mm;
2957         struct vm_area_struct *vma;
2958         unsigned long populate = 0;
2959         unsigned long ret = -EINVAL;
2960         struct file *file;
2961
2962         pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n",
2963                      current->comm, current->pid);
2964
2965         if (prot)
2966                 return ret;
2967         start = start & PAGE_MASK;
2968         size = size & PAGE_MASK;
2969
2970         if (start + size <= start)
2971                 return ret;
2972
2973         /* Does pgoff wrap? */
2974         if (pgoff + (size >> PAGE_SHIFT) < pgoff)
2975                 return ret;
2976
2977         if (mmap_write_lock_killable(mm))
2978                 return -EINTR;
2979
2980         vma = vma_lookup(mm, start);
2981
2982         if (!vma || !(vma->vm_flags & VM_SHARED))
2983                 goto out;
2984
2985         if (start + size > vma->vm_end) {
2986                 VMA_ITERATOR(vmi, mm, vma->vm_end);
2987                 struct vm_area_struct *next, *prev = vma;
2988
2989                 for_each_vma_range(vmi, next, start + size) {
2990                         /* hole between vmas ? */
2991                         if (next->vm_start != prev->vm_end)
2992                                 goto out;
2993
2994                         if (next->vm_file != vma->vm_file)
2995                                 goto out;
2996
2997                         if (next->vm_flags != vma->vm_flags)
2998                                 goto out;
2999
3000                         if (start + size <= next->vm_end)
3001                                 break;
3002
3003                         prev = next;
3004                 }
3005
3006                 if (!next)
3007                         goto out;
3008         }
3009
3010         prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
3011         prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0;
3012         prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0;
3013
3014         flags &= MAP_NONBLOCK;
3015         flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
3016         if (vma->vm_flags & VM_LOCKED)
3017                 flags |= MAP_LOCKED;
3018
3019         file = get_file(vma->vm_file);
3020         ret = do_mmap(vma->vm_file, start, size,
3021                         prot, flags, 0, pgoff, &populate, NULL);
3022         fput(file);
3023 out:
3024         mmap_write_unlock(mm);
3025         if (populate)
3026                 mm_populate(ret, populate);
3027         if (!IS_ERR_VALUE(ret))
3028                 ret = 0;
3029         return ret;
3030 }
3031
3032 /*
3033  * do_vma_munmap() - Unmap a full or partial vma.
3034  * @vmi: The vma iterator pointing at the vma
3035  * @vma: The first vma to be munmapped
3036  * @start: the start of the address to unmap
3037  * @end: The end of the address to unmap
3038  * @uf: The userfaultfd list_head
3039  * @unlock: Drop the lock on success
3040  *
3041  * unmaps a VMA mapping when the vma iterator is already in position.
3042  * Does not handle alignment.
3043  *
3044  * Return: 0 on success drops the lock of so directed, error on failure and will
3045  * still hold the lock.
3046  */
3047 int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
3048                 unsigned long start, unsigned long end, struct list_head *uf,
3049                 bool unlock)
3050 {
3051         struct mm_struct *mm = vma->vm_mm;
3052
3053         arch_unmap(mm, start, end);
3054         return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
3055 }
3056
3057 /*
3058  * do_brk_flags() - Increase the brk vma if the flags match.
3059  * @vmi: The vma iterator
3060  * @addr: The start address
3061  * @len: The length of the increase
3062  * @vma: The vma,
3063  * @flags: The VMA Flags
3064  *
3065  * Extend the brk VMA from addr to addr + len.  If the VMA is NULL or the flags
3066  * do not match then create a new anonymous VMA.  Eventually we may be able to
3067  * do some brk-specific accounting here.
3068  */
3069 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
3070                 unsigned long addr, unsigned long len, unsigned long flags)
3071 {
3072         struct mm_struct *mm = current->mm;
3073         struct vma_prepare vp;
3074
3075         /*
3076          * Check against address space limits by the changed size
3077          * Note: This happens *after* clearing old mappings in some code paths.
3078          */
3079         flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
3080         if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
3081                 return -ENOMEM;
3082
3083         if (mm->map_count > sysctl_max_map_count)
3084                 return -ENOMEM;
3085
3086         if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
3087                 return -ENOMEM;
3088
3089         /*
3090          * Expand the existing vma if possible; Note that singular lists do not
3091          * occur after forking, so the expand will only happen on new VMAs.
3092          */
3093         if (vma && vma->vm_end == addr && !vma_policy(vma) &&
3094             can_vma_merge_after(vma, flags, NULL, NULL,
3095                                 addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL)) {
3096                 vma_iter_config(vmi, vma->vm_start, addr + len);
3097                 if (vma_iter_prealloc(vmi, vma))
3098                         goto unacct_fail;
3099
3100                 vma_start_write(vma);
3101
3102                 init_vma_prep(&vp, vma);
3103                 vma_prepare(&vp);
3104                 vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0);
3105                 vma->vm_end = addr + len;
3106                 vm_flags_set(vma, VM_SOFTDIRTY);
3107                 vma_iter_store(vmi, vma);
3108
3109                 vma_complete(&vp, vmi, mm);
3110                 khugepaged_enter_vma(vma, flags);
3111                 goto out;
3112         }
3113
3114         if (vma)
3115                 vma_iter_next_range(vmi);
3116         /* create a vma struct for an anonymous mapping */
3117         vma = vm_area_alloc(mm);
3118         if (!vma)
3119                 goto unacct_fail;
3120
3121         vma_set_anonymous(vma);
3122         vma->vm_start = addr;
3123         vma->vm_end = addr + len;
3124         vma->vm_pgoff = addr >> PAGE_SHIFT;
3125         vm_flags_init(vma, flags);
3126         vma->vm_page_prot = vm_get_page_prot(flags);
3127         vma_start_write(vma);
3128         if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
3129                 goto mas_store_fail;
3130
3131         mm->map_count++;
3132         validate_mm(mm);
3133         ksm_add_vma(vma);
3134 out:
3135         perf_event_mmap(vma);
3136         mm->total_vm += len >> PAGE_SHIFT;
3137         mm->data_vm += len >> PAGE_SHIFT;
3138         if (flags & VM_LOCKED)
3139                 mm->locked_vm += (len >> PAGE_SHIFT);
3140         vm_flags_set(vma, VM_SOFTDIRTY);
3141         return 0;
3142
3143 mas_store_fail:
3144         vm_area_free(vma);
3145 unacct_fail:
3146         vm_unacct_memory(len >> PAGE_SHIFT);
3147         return -ENOMEM;
3148 }
3149
3150 int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
3151 {
3152         struct mm_struct *mm = current->mm;
3153         struct vm_area_struct *vma = NULL;
3154         unsigned long len;
3155         int ret;
3156         bool populate;
3157         LIST_HEAD(uf);
3158         VMA_ITERATOR(vmi, mm, addr);
3159
3160         len = PAGE_ALIGN(request);
3161         if (len < request)
3162                 return -ENOMEM;
3163         if (!len)
3164                 return 0;
3165
3166         /* Until we need other flags, refuse anything except VM_EXEC. */
3167         if ((flags & (~VM_EXEC)) != 0)
3168                 return -EINVAL;
3169
3170         if (mmap_write_lock_killable(mm))
3171                 return -EINTR;
3172
3173         ret = check_brk_limits(addr, len);
3174         if (ret)
3175                 goto limits_failed;
3176
3177         ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0);
3178         if (ret)
3179                 goto munmap_failed;
3180
3181         vma = vma_prev(&vmi);
3182         ret = do_brk_flags(&vmi, vma, addr, len, flags);
3183         populate = ((mm->def_flags & VM_LOCKED) != 0);
3184         mmap_write_unlock(mm);
3185         userfaultfd_unmap_complete(mm, &uf);
3186         if (populate && !ret)
3187                 mm_populate(addr, len);
3188         return ret;
3189
3190 munmap_failed:
3191 limits_failed:
3192         mmap_write_unlock(mm);
3193         return ret;
3194 }
3195 EXPORT_SYMBOL(vm_brk_flags);
3196
3197 int vm_brk(unsigned long addr, unsigned long len)
3198 {
3199         return vm_brk_flags(addr, len, 0);
3200 }
3201 EXPORT_SYMBOL(vm_brk);
3202
3203 /* Release all mmaps. */
3204 void exit_mmap(struct mm_struct *mm)
3205 {
3206         struct mmu_gather tlb;
3207         struct vm_area_struct *vma;
3208         unsigned long nr_accounted = 0;
3209         MA_STATE(mas, &mm->mm_mt, 0, 0);
3210         int count = 0;
3211
3212         /* mm's last user has gone, and its about to be pulled down */
3213         mmu_notifier_release(mm);
3214
3215         mmap_read_lock(mm);
3216         arch_exit_mmap(mm);
3217
3218         vma = mas_find(&mas, ULONG_MAX);
3219         if (!vma) {
3220                 /* Can happen if dup_mmap() received an OOM */
3221                 mmap_read_unlock(mm);
3222                 return;
3223         }
3224
3225         lru_add_drain();
3226         flush_cache_mm(mm);
3227         tlb_gather_mmu_fullmm(&tlb, mm);
3228         /* update_hiwater_rss(mm) here? but nobody should be looking */
3229         /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */
3230         unmap_vmas(&tlb, &mas, vma, 0, ULONG_MAX, ULONG_MAX, false);
3231         mmap_read_unlock(mm);
3232
3233         /*
3234          * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper
3235          * because the memory has been already freed.
3236          */
3237         set_bit(MMF_OOM_SKIP, &mm->flags);
3238         mmap_write_lock(mm);
3239         mt_clear_in_rcu(&mm->mm_mt);
3240         mas_set(&mas, vma->vm_end);
3241         free_pgtables(&tlb, &mas, vma, FIRST_USER_ADDRESS,
3242                       USER_PGTABLES_CEILING, true);
3243         tlb_finish_mmu(&tlb);
3244
3245         /*
3246          * Walk the list again, actually closing and freeing it, with preemption
3247          * enabled, without holding any MM locks besides the unreachable
3248          * mmap_write_lock.
3249          */
3250         mas_set(&mas, vma->vm_end);
3251         do {
3252                 if (vma->vm_flags & VM_ACCOUNT)
3253                         nr_accounted += vma_pages(vma);
3254                 remove_vma(vma, true);
3255                 count++;
3256                 cond_resched();
3257         } while ((vma = mas_find(&mas, ULONG_MAX)) != NULL);
3258
3259         BUG_ON(count != mm->map_count);
3260
3261         trace_exit_mmap(mm);
3262         __mt_destroy(&mm->mm_mt);
3263         mmap_write_unlock(mm);
3264         vm_unacct_memory(nr_accounted);
3265 }
3266
3267 /* Insert vm structure into process list sorted by address
3268  * and into the inode's i_mmap tree.  If vm_file is non-NULL
3269  * then i_mmap_rwsem is taken here.
3270  */
3271 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
3272 {
3273         unsigned long charged = vma_pages(vma);
3274
3275
3276         if (find_vma_intersection(mm, vma->vm_start, vma->vm_end))
3277                 return -ENOMEM;
3278
3279         if ((vma->vm_flags & VM_ACCOUNT) &&
3280              security_vm_enough_memory_mm(mm, charged))
3281                 return -ENOMEM;
3282
3283         /*
3284          * The vm_pgoff of a purely anonymous vma should be irrelevant
3285          * until its first write fault, when page's anon_vma and index
3286          * are set.  But now set the vm_pgoff it will almost certainly
3287          * end up with (unless mremap moves it elsewhere before that
3288          * first wfault), so /proc/pid/maps tells a consistent story.
3289          *
3290          * By setting it to reflect the virtual start address of the
3291          * vma, merges and splits can happen in a seamless way, just
3292          * using the existing file pgoff checks and manipulations.
3293          * Similarly in do_mmap and in do_brk_flags.
3294          */
3295         if (vma_is_anonymous(vma)) {
3296                 BUG_ON(vma->anon_vma);
3297                 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
3298         }
3299
3300         if (vma_link(mm, vma)) {
3301                 vm_unacct_memory(charged);
3302                 return -ENOMEM;
3303         }
3304
3305         return 0;
3306 }
3307
3308 /*
3309  * Copy the vma structure to a new location in the same mm,
3310  * prior to moving page table entries, to effect an mremap move.
3311  */
3312 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
3313         unsigned long addr, unsigned long len, pgoff_t pgoff,
3314         bool *need_rmap_locks)
3315 {
3316         struct vm_area_struct *vma = *vmap;
3317         unsigned long vma_start = vma->vm_start;
3318         struct mm_struct *mm = vma->vm_mm;
3319         struct vm_area_struct *new_vma, *prev;
3320         bool faulted_in_anon_vma = true;
3321         VMA_ITERATOR(vmi, mm, addr);
3322
3323         /*
3324          * If anonymous vma has not yet been faulted, update new pgoff
3325          * to match new location, to increase its chance of merging.
3326          */
3327         if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
3328                 pgoff = addr >> PAGE_SHIFT;
3329                 faulted_in_anon_vma = false;
3330         }
3331
3332         new_vma = find_vma_prev(mm, addr, &prev);
3333         if (new_vma && new_vma->vm_start < addr + len)
3334                 return NULL;    /* should never get here */
3335
3336         new_vma = vma_merge(&vmi, mm, prev, addr, addr + len, vma->vm_flags,
3337                             vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
3338                             vma->vm_userfaultfd_ctx, anon_vma_name(vma));
3339         if (new_vma) {
3340                 /*
3341                  * Source vma may have been merged into new_vma
3342                  */
3343                 if (unlikely(vma_start >= new_vma->vm_start &&
3344                              vma_start < new_vma->vm_end)) {
3345                         /*
3346                          * The only way we can get a vma_merge with
3347                          * self during an mremap is if the vma hasn't
3348                          * been faulted in yet and we were allowed to
3349                          * reset the dst vma->vm_pgoff to the
3350                          * destination address of the mremap to allow
3351                          * the merge to happen. mremap must change the
3352                          * vm_pgoff linearity between src and dst vmas
3353                          * (in turn preventing a vma_merge) to be
3354                          * safe. It is only safe to keep the vm_pgoff
3355                          * linear if there are no pages mapped yet.
3356                          */
3357                         VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
3358                         *vmap = vma = new_vma;
3359                 }
3360                 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
3361         } else {
3362                 new_vma = vm_area_dup(vma);
3363                 if (!new_vma)
3364                         goto out;
3365                 new_vma->vm_start = addr;
3366                 new_vma->vm_end = addr + len;
3367                 new_vma->vm_pgoff = pgoff;
3368                 if (vma_dup_policy(vma, new_vma))
3369                         goto out_free_vma;
3370                 if (anon_vma_clone(new_vma, vma))
3371                         goto out_free_mempol;
3372                 if (new_vma->vm_file)
3373                         get_file(new_vma->vm_file);
3374                 if (new_vma->vm_ops && new_vma->vm_ops->open)
3375                         new_vma->vm_ops->open(new_vma);
3376                 if (vma_link(mm, new_vma))
3377                         goto out_vma_link;
3378                 *need_rmap_locks = false;
3379         }
3380         return new_vma;
3381
3382 out_vma_link:
3383         if (new_vma->vm_ops && new_vma->vm_ops->close)
3384                 new_vma->vm_ops->close(new_vma);
3385
3386         if (new_vma->vm_file)
3387                 fput(new_vma->vm_file);
3388
3389         unlink_anon_vmas(new_vma);
3390 out_free_mempol:
3391         mpol_put(vma_policy(new_vma));
3392 out_free_vma:
3393         vm_area_free(new_vma);
3394 out:
3395         return NULL;
3396 }
3397
3398 /*
3399  * Return true if the calling process may expand its vm space by the passed
3400  * number of pages
3401  */
3402 bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
3403 {
3404         if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
3405                 return false;
3406
3407         if (is_data_mapping(flags) &&
3408             mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
3409                 /* Workaround for Valgrind */
3410                 if (rlimit(RLIMIT_DATA) == 0 &&
3411                     mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT)
3412                         return true;
3413
3414                 pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n",
3415                              current->comm, current->pid,
3416                              (mm->data_vm + npages) << PAGE_SHIFT,
3417                              rlimit(RLIMIT_DATA),
3418                              ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data");
3419
3420                 if (!ignore_rlimit_data)
3421                         return false;
3422         }
3423
3424         return true;
3425 }
3426
3427 void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
3428 {
3429         WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages);
3430
3431         if (is_exec_mapping(flags))
3432                 mm->exec_vm += npages;
3433         else if (is_stack_mapping(flags))
3434                 mm->stack_vm += npages;
3435         else if (is_data_mapping(flags))
3436                 mm->data_vm += npages;
3437 }
3438
3439 static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
3440
3441 /*
3442  * Having a close hook prevents vma merging regardless of flags.
3443  */
3444 static void special_mapping_close(struct vm_area_struct *vma)
3445 {
3446 }
3447
3448 static const char *special_mapping_name(struct vm_area_struct *vma)
3449 {
3450         return ((struct vm_special_mapping *)vma->vm_private_data)->name;
3451 }
3452
3453 static int special_mapping_mremap(struct vm_area_struct *new_vma)
3454 {
3455         struct vm_special_mapping *sm = new_vma->vm_private_data;
3456
3457         if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
3458                 return -EFAULT;
3459
3460         if (sm->mremap)
3461                 return sm->mremap(sm, new_vma);
3462
3463         return 0;
3464 }
3465
3466 static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr)
3467 {
3468         /*
3469          * Forbid splitting special mappings - kernel has expectations over
3470          * the number of pages in mapping. Together with VM_DONTEXPAND
3471          * the size of vma should stay the same over the special mapping's
3472          * lifetime.
3473          */
3474         return -EINVAL;
3475 }
3476
3477 static const struct vm_operations_struct special_mapping_vmops = {
3478         .close = special_mapping_close,
3479         .fault = special_mapping_fault,
3480         .mremap = special_mapping_mremap,
3481         .name = special_mapping_name,
3482         /* vDSO code relies that VVAR can't be accessed remotely */
3483         .access = NULL,
3484         .may_split = special_mapping_split,
3485 };
3486
3487 static const struct vm_operations_struct legacy_special_mapping_vmops = {
3488         .close = special_mapping_close,
3489         .fault = special_mapping_fault,
3490 };
3491
3492 static vm_fault_t special_mapping_fault(struct vm_fault *vmf)
3493 {
3494         struct vm_area_struct *vma = vmf->vma;
3495         pgoff_t pgoff;
3496         struct page **pages;
3497
3498         if (vma->vm_ops == &legacy_special_mapping_vmops) {
3499                 pages = vma->vm_private_data;
3500         } else {
3501                 struct vm_special_mapping *sm = vma->vm_private_data;
3502
3503                 if (sm->fault)
3504                         return sm->fault(sm, vmf->vma, vmf);
3505
3506                 pages = sm->pages;
3507         }
3508
3509         for (pgoff = vmf->pgoff; pgoff && *pages; ++pages)
3510                 pgoff--;
3511
3512         if (*pages) {
3513                 struct page *page = *pages;
3514                 get_page(page);
3515                 vmf->page = page;
3516                 return 0;
3517         }
3518
3519         return VM_FAULT_SIGBUS;
3520 }
3521
3522 static struct vm_area_struct *__install_special_mapping(
3523         struct mm_struct *mm,
3524         unsigned long addr, unsigned long len,
3525         unsigned long vm_flags, void *priv,
3526         const struct vm_operations_struct *ops)
3527 {
3528         int ret;
3529         struct vm_area_struct *vma;
3530
3531         vma = vm_area_alloc(mm);
3532         if (unlikely(vma == NULL))
3533                 return ERR_PTR(-ENOMEM);
3534
3535         vma->vm_start = addr;
3536         vma->vm_end = addr + len;
3537
3538         vm_flags_init(vma, (vm_flags | mm->def_flags |
3539                       VM_DONTEXPAND | VM_SOFTDIRTY) & ~VM_LOCKED_MASK);
3540         vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
3541
3542         vma->vm_ops = ops;
3543         vma->vm_private_data = priv;
3544
3545         ret = insert_vm_struct(mm, vma);
3546         if (ret)
3547                 goto out;
3548
3549         vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
3550
3551         perf_event_mmap(vma);
3552
3553         return vma;
3554
3555 out:
3556         vm_area_free(vma);
3557         return ERR_PTR(ret);
3558 }
3559
3560 bool vma_is_special_mapping(const struct vm_area_struct *vma,
3561         const struct vm_special_mapping *sm)
3562 {
3563         return vma->vm_private_data == sm &&
3564                 (vma->vm_ops == &special_mapping_vmops ||
3565                  vma->vm_ops == &legacy_special_mapping_vmops);
3566 }
3567
3568 /*
3569  * Called with mm->mmap_lock held for writing.
3570  * Insert a new vma covering the given region, with the given flags.
3571  * Its pages are supplied by the given array of struct page *.
3572  * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
3573  * The region past the last page supplied will always produce SIGBUS.
3574  * The array pointer and the pages it points to are assumed to stay alive
3575  * for as long as this mapping might exist.
3576  */
3577 struct vm_area_struct *_install_special_mapping(
3578         struct mm_struct *mm,
3579         unsigned long addr, unsigned long len,
3580         unsigned long vm_flags, const struct vm_special_mapping *spec)
3581 {
3582         return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec,
3583                                         &special_mapping_vmops);
3584 }
3585
3586 int install_special_mapping(struct mm_struct *mm,
3587                             unsigned long addr, unsigned long len,
3588                             unsigned long vm_flags, struct page **pages)
3589 {
3590         struct vm_area_struct *vma = __install_special_mapping(
3591                 mm, addr, len, vm_flags, (void *)pages,
3592                 &legacy_special_mapping_vmops);
3593
3594         return PTR_ERR_OR_ZERO(vma);
3595 }
3596
3597 static DEFINE_MUTEX(mm_all_locks_mutex);
3598
3599 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
3600 {
3601         if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
3602                 /*
3603                  * The LSB of head.next can't change from under us
3604                  * because we hold the mm_all_locks_mutex.
3605                  */
3606                 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
3607                 /*
3608                  * We can safely modify head.next after taking the
3609                  * anon_vma->root->rwsem. If some other vma in this mm shares
3610                  * the same anon_vma we won't take it again.
3611                  *
3612                  * No need of atomic instructions here, head.next
3613                  * can't change from under us thanks to the
3614                  * anon_vma->root->rwsem.
3615                  */
3616                 if (__test_and_set_bit(0, (unsigned long *)
3617                                        &anon_vma->root->rb_root.rb_root.rb_node))
3618                         BUG();
3619         }
3620 }
3621
3622 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
3623 {
3624         if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3625                 /*
3626                  * AS_MM_ALL_LOCKS can't change from under us because
3627                  * we hold the mm_all_locks_mutex.
3628                  *
3629                  * Operations on ->flags have to be atomic because
3630                  * even if AS_MM_ALL_LOCKS is stable thanks to the
3631                  * mm_all_locks_mutex, there may be other cpus
3632                  * changing other bitflags in parallel to us.
3633                  */
3634                 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
3635                         BUG();
3636                 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
3637         }
3638 }
3639
3640 /*
3641  * This operation locks against the VM for all pte/vma/mm related
3642  * operations that could ever happen on a certain mm. This includes
3643  * vmtruncate, try_to_unmap, and all page faults.
3644  *
3645  * The caller must take the mmap_lock in write mode before calling
3646  * mm_take_all_locks(). The caller isn't allowed to release the
3647  * mmap_lock until mm_drop_all_locks() returns.
3648  *
3649  * mmap_lock in write mode is required in order to block all operations
3650  * that could modify pagetables and free pages without need of
3651  * altering the vma layout. It's also needed in write mode to avoid new
3652  * anon_vmas to be associated with existing vmas.
3653  *
3654  * A single task can't take more than one mm_take_all_locks() in a row
3655  * or it would deadlock.
3656  *
3657  * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
3658  * mapping->flags avoid to take the same lock twice, if more than one
3659  * vma in this mm is backed by the same anon_vma or address_space.
3660  *
3661  * We take locks in following order, accordingly to comment at beginning
3662  * of mm/rmap.c:
3663  *   - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
3664  *     hugetlb mapping);
3665  *   - all vmas marked locked
3666  *   - all i_mmap_rwsem locks;
3667  *   - all anon_vma->rwseml
3668  *
3669  * We can take all locks within these types randomly because the VM code
3670  * doesn't nest them and we protected from parallel mm_take_all_locks() by
3671  * mm_all_locks_mutex.
3672  *
3673  * mm_take_all_locks() and mm_drop_all_locks are expensive operations
3674  * that may have to take thousand of locks.
3675  *
3676  * mm_take_all_locks() can fail if it's interrupted by signals.
3677  */
3678 int mm_take_all_locks(struct mm_struct *mm)
3679 {
3680         struct vm_area_struct *vma;
3681         struct anon_vma_chain *avc;
3682         MA_STATE(mas, &mm->mm_mt, 0, 0);
3683
3684         mmap_assert_write_locked(mm);
3685
3686         mutex_lock(&mm_all_locks_mutex);
3687
3688         /*
3689          * vma_start_write() does not have a complement in mm_drop_all_locks()
3690          * because vma_start_write() is always asymmetrical; it marks a VMA as
3691          * being written to until mmap_write_unlock() or mmap_write_downgrade()
3692          * is reached.
3693          */
3694         mas_for_each(&mas, vma, ULONG_MAX) {
3695                 if (signal_pending(current))
3696                         goto out_unlock;
3697                 vma_start_write(vma);
3698         }
3699
3700         mas_set(&mas, 0);
3701         mas_for_each(&mas, vma, ULONG_MAX) {
3702                 if (signal_pending(current))
3703                         goto out_unlock;
3704                 if (vma->vm_file && vma->vm_file->f_mapping &&
3705                                 is_vm_hugetlb_page(vma))
3706                         vm_lock_mapping(mm, vma->vm_file->f_mapping);
3707         }
3708
3709         mas_set(&mas, 0);
3710         mas_for_each(&mas, vma, ULONG_MAX) {
3711                 if (signal_pending(current))
3712                         goto out_unlock;
3713                 if (vma->vm_file && vma->vm_file->f_mapping &&
3714                                 !is_vm_hugetlb_page(vma))
3715                         vm_lock_mapping(mm, vma->vm_file->f_mapping);
3716         }
3717
3718         mas_set(&mas, 0);
3719         mas_for_each(&mas, vma, ULONG_MAX) {
3720                 if (signal_pending(current))
3721                         goto out_unlock;
3722                 if (vma->anon_vma)
3723                         list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3724                                 vm_lock_anon_vma(mm, avc->anon_vma);
3725         }
3726
3727         return 0;
3728
3729 out_unlock:
3730         mm_drop_all_locks(mm);
3731         return -EINTR;
3732 }
3733
3734 static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
3735 {
3736         if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
3737                 /*
3738                  * The LSB of head.next can't change to 0 from under
3739                  * us because we hold the mm_all_locks_mutex.
3740                  *
3741                  * We must however clear the bitflag before unlocking
3742                  * the vma so the users using the anon_vma->rb_root will
3743                  * never see our bitflag.
3744                  *
3745                  * No need of atomic instructions here, head.next
3746                  * can't change from under us until we release the
3747                  * anon_vma->root->rwsem.
3748                  */
3749                 if (!__test_and_clear_bit(0, (unsigned long *)
3750                                           &anon_vma->root->rb_root.rb_root.rb_node))
3751                         BUG();
3752                 anon_vma_unlock_write(anon_vma);
3753         }
3754 }
3755
3756 static void vm_unlock_mapping(struct address_space *mapping)
3757 {
3758         if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3759                 /*
3760                  * AS_MM_ALL_LOCKS can't change to 0 from under us
3761                  * because we hold the mm_all_locks_mutex.
3762                  */
3763                 i_mmap_unlock_write(mapping);
3764                 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
3765                                         &mapping->flags))
3766                         BUG();
3767         }
3768 }
3769
3770 /*
3771  * The mmap_lock cannot be released by the caller until
3772  * mm_drop_all_locks() returns.
3773  */
3774 void mm_drop_all_locks(struct mm_struct *mm)
3775 {
3776         struct vm_area_struct *vma;
3777         struct anon_vma_chain *avc;
3778         MA_STATE(mas, &mm->mm_mt, 0, 0);
3779
3780         mmap_assert_write_locked(mm);
3781         BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
3782
3783         mas_for_each(&mas, vma, ULONG_MAX) {
3784                 if (vma->anon_vma)
3785                         list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3786                                 vm_unlock_anon_vma(avc->anon_vma);
3787                 if (vma->vm_file && vma->vm_file->f_mapping)
3788                         vm_unlock_mapping(vma->vm_file->f_mapping);
3789         }
3790
3791         mutex_unlock(&mm_all_locks_mutex);
3792 }
3793
3794 /*
3795  * initialise the percpu counter for VM
3796  */
3797 void __init mmap_init(void)
3798 {
3799         int ret;
3800
3801         ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
3802         VM_BUG_ON(ret);
3803 }
3804
3805 /*
3806  * Initialise sysctl_user_reserve_kbytes.
3807  *
3808  * This is intended to prevent a user from starting a single memory hogging
3809  * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
3810  * mode.
3811  *
3812  * The default value is min(3% of free memory, 128MB)
3813  * 128MB is enough to recover with sshd/login, bash, and top/kill.
3814  */
3815 static int init_user_reserve(void)
3816 {
3817         unsigned long free_kbytes;
3818
3819         free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
3820
3821         sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
3822         return 0;
3823 }
3824 subsys_initcall(init_user_reserve);
3825
3826 /*
3827  * Initialise sysctl_admin_reserve_kbytes.
3828  *
3829  * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
3830  * to log in and kill a memory hogging process.
3831  *
3832  * Systems with more than 256MB will reserve 8MB, enough to recover
3833  * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
3834  * only reserve 3% of free pages by default.
3835  */
3836 static int init_admin_reserve(void)
3837 {
3838         unsigned long free_kbytes;
3839
3840         free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
3841
3842         sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
3843         return 0;
3844 }
3845 subsys_initcall(init_admin_reserve);
3846
3847 /*
3848  * Reinititalise user and admin reserves if memory is added or removed.
3849  *
3850  * The default user reserve max is 128MB, and the default max for the
3851  * admin reserve is 8MB. These are usually, but not always, enough to
3852  * enable recovery from a memory hogging process using login/sshd, a shell,
3853  * and tools like top. It may make sense to increase or even disable the
3854  * reserve depending on the existence of swap or variations in the recovery
3855  * tools. So, the admin may have changed them.
3856  *
3857  * If memory is added and the reserves have been eliminated or increased above
3858  * the default max, then we'll trust the admin.
3859  *
3860  * If memory is removed and there isn't enough free memory, then we
3861  * need to reset the reserves.
3862  *
3863  * Otherwise keep the reserve set by the admin.
3864  */
3865 static int reserve_mem_notifier(struct notifier_block *nb,
3866                              unsigned long action, void *data)
3867 {
3868         unsigned long tmp, free_kbytes;
3869
3870         switch (action) {
3871         case MEM_ONLINE:
3872                 /* Default max is 128MB. Leave alone if modified by operator. */
3873                 tmp = sysctl_user_reserve_kbytes;
3874                 if (0 < tmp && tmp < (1UL << 17))
3875                         init_user_reserve();
3876
3877                 /* Default max is 8MB.  Leave alone if modified by operator. */
3878                 tmp = sysctl_admin_reserve_kbytes;
3879                 if (0 < tmp && tmp < (1UL << 13))
3880                         init_admin_reserve();
3881
3882                 break;
3883         case MEM_OFFLINE:
3884                 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
3885
3886                 if (sysctl_user_reserve_kbytes > free_kbytes) {
3887                         init_user_reserve();
3888                         pr_info("vm.user_reserve_kbytes reset to %lu\n",
3889                                 sysctl_user_reserve_kbytes);
3890                 }
3891
3892                 if (sysctl_admin_reserve_kbytes > free_kbytes) {
3893                         init_admin_reserve();
3894                         pr_info("vm.admin_reserve_kbytes reset to %lu\n",
3895                                 sysctl_admin_reserve_kbytes);
3896                 }
3897                 break;
3898         default:
3899                 break;
3900         }
3901         return NOTIFY_OK;
3902 }
3903
3904 static int __meminit init_reserve_notifier(void)
3905 {
3906         if (hotplug_memory_notifier(reserve_mem_notifier, DEFAULT_CALLBACK_PRI))
3907                 pr_err("Failed registering memory add/remove notifier for admin reserve\n");
3908
3909         return 0;
3910 }
3911 subsys_initcall(init_reserve_notifier);