5ac533f88e99ccf1ef2b13a146b8c076e7949402
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / mmap.c
1 /*
2  * mm/mmap.c
3  *
4  * Written by obz.
5  *
6  * Address space accounting code        <alan@lxorguk.ukuu.org.uk>
7  */
8
9 #include <linux/slab.h>
10 #include <linux/backing-dev.h>
11 #include <linux/mm.h>
12 #include <linux/shm.h>
13 #include <linux/mman.h>
14 #include <linux/pagemap.h>
15 #include <linux/swap.h>
16 #include <linux/syscalls.h>
17 #include <linux/capability.h>
18 #include <linux/init.h>
19 #include <linux/file.h>
20 #include <linux/fs.h>
21 #include <linux/personality.h>
22 #include <linux/security.h>
23 #include <linux/hugetlb.h>
24 #include <linux/profile.h>
25 #include <linux/export.h>
26 #include <linux/mount.h>
27 #include <linux/mempolicy.h>
28 #include <linux/rmap.h>
29 #include <linux/mmu_notifier.h>
30 #include <linux/perf_event.h>
31 #include <linux/audit.h>
32 #include <linux/khugepaged.h>
33 #include <linux/uprobes.h>
34
35 #include <asm/uaccess.h>
36 #include <asm/cacheflush.h>
37 #include <asm/tlb.h>
38 #include <asm/mmu_context.h>
39
40 #include "internal.h"
41
42 #ifndef arch_mmap_check
43 #define arch_mmap_check(addr, len, flags)       (0)
44 #endif
45
46 #ifndef arch_rebalance_pgtables
47 #define arch_rebalance_pgtables(addr, len)              (addr)
48 #endif
49
50 static void unmap_region(struct mm_struct *mm,
51                 struct vm_area_struct *vma, struct vm_area_struct *prev,
52                 unsigned long start, unsigned long end);
53
54 /*
55  * WARNING: the debugging will use recursive algorithms so never enable this
56  * unless you know what you are doing.
57  */
58 #undef DEBUG_MM_RB
59
60 /* description of effects of mapping type and prot in current implementation.
61  * this is due to the limited x86 page protection hardware.  The expected
62  * behavior is in parens:
63  *
64  * map_type     prot
65  *              PROT_NONE       PROT_READ       PROT_WRITE      PROT_EXEC
66  * MAP_SHARED   r: (no) no      r: (yes) yes    r: (no) yes     r: (no) yes
67  *              w: (no) no      w: (no) no      w: (yes) yes    w: (no) no
68  *              x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
69  *              
70  * MAP_PRIVATE  r: (no) no      r: (yes) yes    r: (no) yes     r: (no) yes
71  *              w: (no) no      w: (no) no      w: (copy) copy  w: (no) no
72  *              x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
73  *
74  */
75 pgprot_t protection_map[16] = {
76         __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
77         __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
78 };
79
80 pgprot_t vm_get_page_prot(unsigned long vm_flags)
81 {
82         return __pgprot(pgprot_val(protection_map[vm_flags &
83                                 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
84                         pgprot_val(arch_vm_get_page_prot(vm_flags)));
85 }
86 EXPORT_SYMBOL(vm_get_page_prot);
87
88 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;  /* heuristic overcommit */
89 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
90 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
91 /*
92  * Make sure vm_committed_as in one cacheline and not cacheline shared with
93  * other variables. It can be updated by several CPUs frequently.
94  */
95 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
96
97 /*
98  * Check that a process has enough memory to allocate a new virtual
99  * mapping. 0 means there is enough memory for the allocation to
100  * succeed and -ENOMEM implies there is not.
101  *
102  * We currently support three overcommit policies, which are set via the
103  * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
104  *
105  * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
106  * Additional code 2002 Jul 20 by Robert Love.
107  *
108  * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
109  *
110  * Note this is a helper function intended to be used by LSMs which
111  * wish to use this logic.
112  */
113 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
114 {
115         unsigned long free, allowed;
116
117         vm_acct_memory(pages);
118
119         /*
120          * Sometimes we want to use more memory than we have
121          */
122         if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
123                 return 0;
124
125         if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
126                 free = global_page_state(NR_FREE_PAGES);
127                 free += global_page_state(NR_FILE_PAGES);
128
129                 /*
130                  * shmem pages shouldn't be counted as free in this
131                  * case, they can't be purged, only swapped out, and
132                  * that won't affect the overall amount of available
133                  * memory in the system.
134                  */
135                 free -= global_page_state(NR_SHMEM);
136
137                 free += nr_swap_pages;
138
139                 /*
140                  * Any slabs which are created with the
141                  * SLAB_RECLAIM_ACCOUNT flag claim to have contents
142                  * which are reclaimable, under pressure.  The dentry
143                  * cache and most inode caches should fall into this
144                  */
145                 free += global_page_state(NR_SLAB_RECLAIMABLE);
146
147                 /*
148                  * Leave reserved pages. The pages are not for anonymous pages.
149                  */
150                 if (free <= totalreserve_pages)
151                         goto error;
152                 else
153                         free -= totalreserve_pages;
154
155                 /*
156                  * Leave the last 3% for root
157                  */
158                 if (!cap_sys_admin)
159                         free -= free / 32;
160
161                 if (free > pages)
162                         return 0;
163
164                 goto error;
165         }
166
167         allowed = (totalram_pages - hugetlb_total_pages())
168                 * sysctl_overcommit_ratio / 100;
169         /*
170          * Leave the last 3% for root
171          */
172         if (!cap_sys_admin)
173                 allowed -= allowed / 32;
174         allowed += total_swap_pages;
175
176         /* Don't let a single process grow too big:
177            leave 3% of the size of this process for other processes */
178         if (mm)
179                 allowed -= mm->total_vm / 32;
180
181         if (percpu_counter_read_positive(&vm_committed_as) < allowed)
182                 return 0;
183 error:
184         vm_unacct_memory(pages);
185
186         return -ENOMEM;
187 }
188
189 /*
190  * Requires inode->i_mapping->i_mmap_mutex
191  */
192 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
193                 struct file *file, struct address_space *mapping)
194 {
195         if (vma->vm_flags & VM_DENYWRITE)
196                 atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
197         if (vma->vm_flags & VM_SHARED)
198                 mapping->i_mmap_writable--;
199
200         flush_dcache_mmap_lock(mapping);
201         if (unlikely(vma->vm_flags & VM_NONLINEAR))
202                 list_del_init(&vma->shared.nonlinear);
203         else
204                 vma_interval_tree_remove(vma, &mapping->i_mmap);
205         flush_dcache_mmap_unlock(mapping);
206 }
207
208 /*
209  * Unlink a file-based vm structure from its interval tree, to hide
210  * vma from rmap and vmtruncate before freeing its page tables.
211  */
212 void unlink_file_vma(struct vm_area_struct *vma)
213 {
214         struct file *file = vma->vm_file;
215
216         if (file) {
217                 struct address_space *mapping = file->f_mapping;
218                 mutex_lock(&mapping->i_mmap_mutex);
219                 __remove_shared_vm_struct(vma, file, mapping);
220                 mutex_unlock(&mapping->i_mmap_mutex);
221         }
222 }
223
224 /*
225  * Close a vm structure and free it, returning the next.
226  */
227 static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
228 {
229         struct vm_area_struct *next = vma->vm_next;
230
231         might_sleep();
232         if (vma->vm_ops && vma->vm_ops->close)
233                 vma->vm_ops->close(vma);
234         if (vma->vm_file)
235                 fput(vma->vm_file);
236         mpol_put(vma_policy(vma));
237         kmem_cache_free(vm_area_cachep, vma);
238         return next;
239 }
240
241 static unsigned long do_brk(unsigned long addr, unsigned long len);
242
243 SYSCALL_DEFINE1(brk, unsigned long, brk)
244 {
245         unsigned long rlim, retval;
246         unsigned long newbrk, oldbrk;
247         struct mm_struct *mm = current->mm;
248         unsigned long min_brk;
249
250         down_write(&mm->mmap_sem);
251
252 #ifdef CONFIG_COMPAT_BRK
253         /*
254          * CONFIG_COMPAT_BRK can still be overridden by setting
255          * randomize_va_space to 2, which will still cause mm->start_brk
256          * to be arbitrarily shifted
257          */
258         if (current->brk_randomized)
259                 min_brk = mm->start_brk;
260         else
261                 min_brk = mm->end_data;
262 #else
263         min_brk = mm->start_brk;
264 #endif
265         if (brk < min_brk)
266                 goto out;
267
268         /*
269          * Check against rlimit here. If this check is done later after the test
270          * of oldbrk with newbrk then it can escape the test and let the data
271          * segment grow beyond its set limit the in case where the limit is
272          * not page aligned -Ram Gupta
273          */
274         rlim = rlimit(RLIMIT_DATA);
275         if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
276                         (mm->end_data - mm->start_data) > rlim)
277                 goto out;
278
279         newbrk = PAGE_ALIGN(brk);
280         oldbrk = PAGE_ALIGN(mm->brk);
281         if (oldbrk == newbrk)
282                 goto set_brk;
283
284         /* Always allow shrinking brk. */
285         if (brk <= mm->brk) {
286                 if (!do_munmap(mm, newbrk, oldbrk-newbrk))
287                         goto set_brk;
288                 goto out;
289         }
290
291         /* Check against existing mmap mappings. */
292         if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
293                 goto out;
294
295         /* Ok, looks good - let it rip. */
296         if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
297                 goto out;
298 set_brk:
299         mm->brk = brk;
300 out:
301         retval = mm->brk;
302         up_write(&mm->mmap_sem);
303         return retval;
304 }
305
306 #ifdef DEBUG_MM_RB
307 static int browse_rb(struct rb_root *root)
308 {
309         int i = 0, j;
310         struct rb_node *nd, *pn = NULL;
311         unsigned long prev = 0, pend = 0;
312
313         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
314                 struct vm_area_struct *vma;
315                 vma = rb_entry(nd, struct vm_area_struct, vm_rb);
316                 if (vma->vm_start < prev)
317                         printk("vm_start %lx prev %lx\n", vma->vm_start, prev), i = -1;
318                 if (vma->vm_start < pend)
319                         printk("vm_start %lx pend %lx\n", vma->vm_start, pend);
320                 if (vma->vm_start > vma->vm_end)
321                         printk("vm_end %lx < vm_start %lx\n", vma->vm_end, vma->vm_start);
322                 i++;
323                 pn = nd;
324                 prev = vma->vm_start;
325                 pend = vma->vm_end;
326         }
327         j = 0;
328         for (nd = pn; nd; nd = rb_prev(nd)) {
329                 j++;
330         }
331         if (i != j)
332                 printk("backwards %d, forwards %d\n", j, i), i = 0;
333         return i;
334 }
335
336 void validate_mm(struct mm_struct *mm)
337 {
338         int bug = 0;
339         int i = 0;
340         struct vm_area_struct *tmp = mm->mmap;
341         while (tmp) {
342                 tmp = tmp->vm_next;
343                 i++;
344         }
345         if (i != mm->map_count)
346                 printk("map_count %d vm_next %d\n", mm->map_count, i), bug = 1;
347         i = browse_rb(&mm->mm_rb);
348         if (i != mm->map_count)
349                 printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
350         BUG_ON(bug);
351 }
352 #else
353 #define validate_mm(mm) do { } while (0)
354 #endif
355
356 static int find_vma_links(struct mm_struct *mm, unsigned long addr,
357                 unsigned long end, struct vm_area_struct **pprev,
358                 struct rb_node ***rb_link, struct rb_node **rb_parent)
359 {
360         struct rb_node **__rb_link, *__rb_parent, *rb_prev;
361
362         __rb_link = &mm->mm_rb.rb_node;
363         rb_prev = __rb_parent = NULL;
364
365         while (*__rb_link) {
366                 struct vm_area_struct *vma_tmp;
367
368                 __rb_parent = *__rb_link;
369                 vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
370
371                 if (vma_tmp->vm_end > addr) {
372                         /* Fail if an existing vma overlaps the area */
373                         if (vma_tmp->vm_start < end)
374                                 return -ENOMEM;
375                         __rb_link = &__rb_parent->rb_left;
376                 } else {
377                         rb_prev = __rb_parent;
378                         __rb_link = &__rb_parent->rb_right;
379                 }
380         }
381
382         *pprev = NULL;
383         if (rb_prev)
384                 *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
385         *rb_link = __rb_link;
386         *rb_parent = __rb_parent;
387         return 0;
388 }
389
390 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
391                 struct rb_node **rb_link, struct rb_node *rb_parent)
392 {
393         rb_link_node(&vma->vm_rb, rb_parent, rb_link);
394         rb_insert_color(&vma->vm_rb, &mm->mm_rb);
395 }
396
397 static void __vma_link_file(struct vm_area_struct *vma)
398 {
399         struct file *file;
400
401         file = vma->vm_file;
402         if (file) {
403                 struct address_space *mapping = file->f_mapping;
404
405                 if (vma->vm_flags & VM_DENYWRITE)
406                         atomic_dec(&file->f_path.dentry->d_inode->i_writecount);
407                 if (vma->vm_flags & VM_SHARED)
408                         mapping->i_mmap_writable++;
409
410                 flush_dcache_mmap_lock(mapping);
411                 if (unlikely(vma->vm_flags & VM_NONLINEAR))
412                         vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
413                 else
414                         vma_interval_tree_insert(vma, &mapping->i_mmap);
415                 flush_dcache_mmap_unlock(mapping);
416         }
417 }
418
419 static void
420 __vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
421         struct vm_area_struct *prev, struct rb_node **rb_link,
422         struct rb_node *rb_parent)
423 {
424         __vma_link_list(mm, vma, prev, rb_parent);
425         __vma_link_rb(mm, vma, rb_link, rb_parent);
426 }
427
428 static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
429                         struct vm_area_struct *prev, struct rb_node **rb_link,
430                         struct rb_node *rb_parent)
431 {
432         struct address_space *mapping = NULL;
433
434         if (vma->vm_file)
435                 mapping = vma->vm_file->f_mapping;
436
437         if (mapping)
438                 mutex_lock(&mapping->i_mmap_mutex);
439
440         __vma_link(mm, vma, prev, rb_link, rb_parent);
441         __vma_link_file(vma);
442
443         if (mapping)
444                 mutex_unlock(&mapping->i_mmap_mutex);
445
446         mm->map_count++;
447         validate_mm(mm);
448 }
449
450 /*
451  * Helper for vma_adjust() in the split_vma insert case: insert a vma into the
452  * mm's list and rbtree.  It has already been inserted into the interval tree.
453  */
454 static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
455 {
456         struct vm_area_struct *prev;
457         struct rb_node **rb_link, *rb_parent;
458
459         if (find_vma_links(mm, vma->vm_start, vma->vm_end,
460                            &prev, &rb_link, &rb_parent))
461                 BUG();
462         __vma_link(mm, vma, prev, rb_link, rb_parent);
463         mm->map_count++;
464 }
465
466 static inline void
467 __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
468                 struct vm_area_struct *prev)
469 {
470         struct vm_area_struct *next = vma->vm_next;
471
472         prev->vm_next = next;
473         if (next)
474                 next->vm_prev = prev;
475         rb_erase(&vma->vm_rb, &mm->mm_rb);
476         if (mm->mmap_cache == vma)
477                 mm->mmap_cache = prev;
478 }
479
480 /*
481  * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
482  * is already present in an i_mmap tree without adjusting the tree.
483  * The following helper function should be used when such adjustments
484  * are necessary.  The "insert" vma (if any) is to be inserted
485  * before we drop the necessary locks.
486  */
487 int vma_adjust(struct vm_area_struct *vma, unsigned long start,
488         unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
489 {
490         struct mm_struct *mm = vma->vm_mm;
491         struct vm_area_struct *next = vma->vm_next;
492         struct vm_area_struct *importer = NULL;
493         struct address_space *mapping = NULL;
494         struct rb_root *root = NULL;
495         struct anon_vma *anon_vma = NULL;
496         struct file *file = vma->vm_file;
497         long adjust_next = 0;
498         int remove_next = 0;
499
500         if (next && !insert) {
501                 struct vm_area_struct *exporter = NULL;
502
503                 if (end >= next->vm_end) {
504                         /*
505                          * vma expands, overlapping all the next, and
506                          * perhaps the one after too (mprotect case 6).
507                          */
508 again:                  remove_next = 1 + (end > next->vm_end);
509                         end = next->vm_end;
510                         exporter = next;
511                         importer = vma;
512                 } else if (end > next->vm_start) {
513                         /*
514                          * vma expands, overlapping part of the next:
515                          * mprotect case 5 shifting the boundary up.
516                          */
517                         adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
518                         exporter = next;
519                         importer = vma;
520                 } else if (end < vma->vm_end) {
521                         /*
522                          * vma shrinks, and !insert tells it's not
523                          * split_vma inserting another: so it must be
524                          * mprotect case 4 shifting the boundary down.
525                          */
526                         adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT);
527                         exporter = vma;
528                         importer = next;
529                 }
530
531                 /*
532                  * Easily overlooked: when mprotect shifts the boundary,
533                  * make sure the expanding vma has anon_vma set if the
534                  * shrinking vma had, to cover any anon pages imported.
535                  */
536                 if (exporter && exporter->anon_vma && !importer->anon_vma) {
537                         if (anon_vma_clone(importer, exporter))
538                                 return -ENOMEM;
539                         importer->anon_vma = exporter->anon_vma;
540                 }
541         }
542
543         if (file) {
544                 mapping = file->f_mapping;
545                 if (!(vma->vm_flags & VM_NONLINEAR)) {
546                         root = &mapping->i_mmap;
547                         uprobe_munmap(vma, vma->vm_start, vma->vm_end);
548
549                         if (adjust_next)
550                                 uprobe_munmap(next, next->vm_start,
551                                                         next->vm_end);
552                 }
553
554                 mutex_lock(&mapping->i_mmap_mutex);
555                 if (insert) {
556                         /*
557                          * Put into interval tree now, so instantiated pages
558                          * are visible to arm/parisc __flush_dcache_page
559                          * throughout; but we cannot insert into address
560                          * space until vma start or end is updated.
561                          */
562                         __vma_link_file(insert);
563                 }
564         }
565
566         vma_adjust_trans_huge(vma, start, end, adjust_next);
567
568         /*
569          * When changing only vma->vm_end, we don't really need anon_vma
570          * lock. This is a fairly rare case by itself, but the anon_vma
571          * lock may be shared between many sibling processes.  Skipping
572          * the lock for brk adjustments makes a difference sometimes.
573          */
574         if (vma->anon_vma && (importer || start != vma->vm_start)) {
575                 anon_vma = vma->anon_vma;
576                 VM_BUG_ON(adjust_next && next->anon_vma &&
577                           anon_vma != next->anon_vma);
578         } else if (adjust_next && next->anon_vma)
579                 anon_vma = next->anon_vma;
580         if (anon_vma)
581                 anon_vma_lock(anon_vma);
582
583         if (root) {
584                 flush_dcache_mmap_lock(mapping);
585                 vma_interval_tree_remove(vma, root);
586                 if (adjust_next)
587                         vma_interval_tree_remove(next, root);
588         }
589
590         vma->vm_start = start;
591         vma->vm_end = end;
592         vma->vm_pgoff = pgoff;
593         if (adjust_next) {
594                 next->vm_start += adjust_next << PAGE_SHIFT;
595                 next->vm_pgoff += adjust_next;
596         }
597
598         if (root) {
599                 if (adjust_next)
600                         vma_interval_tree_insert(next, root);
601                 vma_interval_tree_insert(vma, root);
602                 flush_dcache_mmap_unlock(mapping);
603         }
604
605         if (remove_next) {
606                 /*
607                  * vma_merge has merged next into vma, and needs
608                  * us to remove next before dropping the locks.
609                  */
610                 __vma_unlink(mm, next, vma);
611                 if (file)
612                         __remove_shared_vm_struct(next, file, mapping);
613         } else if (insert) {
614                 /*
615                  * split_vma has split insert from vma, and needs
616                  * us to insert it before dropping the locks
617                  * (it may either follow vma or precede it).
618                  */
619                 __insert_vm_struct(mm, insert);
620         }
621
622         if (anon_vma)
623                 anon_vma_unlock(anon_vma);
624         if (mapping)
625                 mutex_unlock(&mapping->i_mmap_mutex);
626
627         if (root) {
628                 uprobe_mmap(vma);
629
630                 if (adjust_next)
631                         uprobe_mmap(next);
632         }
633
634         if (remove_next) {
635                 if (file) {
636                         uprobe_munmap(next, next->vm_start, next->vm_end);
637                         fput(file);
638                 }
639                 if (next->anon_vma)
640                         anon_vma_merge(vma, next);
641                 mm->map_count--;
642                 mpol_put(vma_policy(next));
643                 kmem_cache_free(vm_area_cachep, next);
644                 /*
645                  * In mprotect's case 6 (see comments on vma_merge),
646                  * we must remove another next too. It would clutter
647                  * up the code too much to do both in one go.
648                  */
649                 if (remove_next == 2) {
650                         next = vma->vm_next;
651                         goto again;
652                 }
653         }
654         if (insert && file)
655                 uprobe_mmap(insert);
656
657         validate_mm(mm);
658
659         return 0;
660 }
661
662 /*
663  * If the vma has a ->close operation then the driver probably needs to release
664  * per-vma resources, so we don't attempt to merge those.
665  */
666 static inline int is_mergeable_vma(struct vm_area_struct *vma,
667                         struct file *file, unsigned long vm_flags)
668 {
669         if (vma->vm_flags ^ vm_flags)
670                 return 0;
671         if (vma->vm_file != file)
672                 return 0;
673         if (vma->vm_ops && vma->vm_ops->close)
674                 return 0;
675         return 1;
676 }
677
678 static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
679                                         struct anon_vma *anon_vma2,
680                                         struct vm_area_struct *vma)
681 {
682         /*
683          * The list_is_singular() test is to avoid merging VMA cloned from
684          * parents. This can improve scalability caused by anon_vma lock.
685          */
686         if ((!anon_vma1 || !anon_vma2) && (!vma ||
687                 list_is_singular(&vma->anon_vma_chain)))
688                 return 1;
689         return anon_vma1 == anon_vma2;
690 }
691
692 /*
693  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
694  * in front of (at a lower virtual address and file offset than) the vma.
695  *
696  * We cannot merge two vmas if they have differently assigned (non-NULL)
697  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
698  *
699  * We don't check here for the merged mmap wrapping around the end of pagecache
700  * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which
701  * wrap, nor mmaps which cover the final page at index -1UL.
702  */
703 static int
704 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
705         struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
706 {
707         if (is_mergeable_vma(vma, file, vm_flags) &&
708             is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
709                 if (vma->vm_pgoff == vm_pgoff)
710                         return 1;
711         }
712         return 0;
713 }
714
715 /*
716  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
717  * beyond (at a higher virtual address and file offset than) the vma.
718  *
719  * We cannot merge two vmas if they have differently assigned (non-NULL)
720  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
721  */
722 static int
723 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
724         struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
725 {
726         if (is_mergeable_vma(vma, file, vm_flags) &&
727             is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
728                 pgoff_t vm_pglen;
729                 vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
730                 if (vma->vm_pgoff + vm_pglen == vm_pgoff)
731                         return 1;
732         }
733         return 0;
734 }
735
736 /*
737  * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out
738  * whether that can be merged with its predecessor or its successor.
739  * Or both (it neatly fills a hole).
740  *
741  * In most cases - when called for mmap, brk or mremap - [addr,end) is
742  * certain not to be mapped by the time vma_merge is called; but when
743  * called for mprotect, it is certain to be already mapped (either at
744  * an offset within prev, or at the start of next), and the flags of
745  * this area are about to be changed to vm_flags - and the no-change
746  * case has already been eliminated.
747  *
748  * The following mprotect cases have to be considered, where AAAA is
749  * the area passed down from mprotect_fixup, never extending beyond one
750  * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after:
751  *
752  *     AAAA             AAAA                AAAA          AAAA
753  *    PPPPPPNNNNNN    PPPPPPNNNNNN    PPPPPPNNNNNN    PPPPNNNNXXXX
754  *    cannot merge    might become    might become    might become
755  *                    PPNNNNNNNNNN    PPPPPPPPPPNN    PPPPPPPPPPPP 6 or
756  *    mmap, brk or    case 4 below    case 5 below    PPPPPPPPXXXX 7 or
757  *    mremap move:                                    PPPPNNNNNNNN 8
758  *        AAAA
759  *    PPPP    NNNN    PPPPPPPPPPPP    PPPPPPPPNNNN    PPPPNNNNNNNN
760  *    might become    case 1 below    case 2 below    case 3 below
761  *
762  * Odd one out? Case 8, because it extends NNNN but needs flags of XXXX:
763  * mprotect_fixup updates vm_flags & vm_page_prot on successful return.
764  */
765 struct vm_area_struct *vma_merge(struct mm_struct *mm,
766                         struct vm_area_struct *prev, unsigned long addr,
767                         unsigned long end, unsigned long vm_flags,
768                         struct anon_vma *anon_vma, struct file *file,
769                         pgoff_t pgoff, struct mempolicy *policy)
770 {
771         pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
772         struct vm_area_struct *area, *next;
773         int err;
774
775         /*
776          * We later require that vma->vm_flags == vm_flags,
777          * so this tests vma->vm_flags & VM_SPECIAL, too.
778          */
779         if (vm_flags & VM_SPECIAL)
780                 return NULL;
781
782         if (prev)
783                 next = prev->vm_next;
784         else
785                 next = mm->mmap;
786         area = next;
787         if (next && next->vm_end == end)                /* cases 6, 7, 8 */
788                 next = next->vm_next;
789
790         /*
791          * Can it merge with the predecessor?
792          */
793         if (prev && prev->vm_end == addr &&
794                         mpol_equal(vma_policy(prev), policy) &&
795                         can_vma_merge_after(prev, vm_flags,
796                                                 anon_vma, file, pgoff)) {
797                 /*
798                  * OK, it can.  Can we now merge in the successor as well?
799                  */
800                 if (next && end == next->vm_start &&
801                                 mpol_equal(policy, vma_policy(next)) &&
802                                 can_vma_merge_before(next, vm_flags,
803                                         anon_vma, file, pgoff+pglen) &&
804                                 is_mergeable_anon_vma(prev->anon_vma,
805                                                       next->anon_vma, NULL)) {
806                                                         /* cases 1, 6 */
807                         err = vma_adjust(prev, prev->vm_start,
808                                 next->vm_end, prev->vm_pgoff, NULL);
809                 } else                                  /* cases 2, 5, 7 */
810                         err = vma_adjust(prev, prev->vm_start,
811                                 end, prev->vm_pgoff, NULL);
812                 if (err)
813                         return NULL;
814                 khugepaged_enter_vma_merge(prev);
815                 return prev;
816         }
817
818         /*
819          * Can this new request be merged in front of next?
820          */
821         if (next && end == next->vm_start &&
822                         mpol_equal(policy, vma_policy(next)) &&
823                         can_vma_merge_before(next, vm_flags,
824                                         anon_vma, file, pgoff+pglen)) {
825                 if (prev && addr < prev->vm_end)        /* case 4 */
826                         err = vma_adjust(prev, prev->vm_start,
827                                 addr, prev->vm_pgoff, NULL);
828                 else                                    /* cases 3, 8 */
829                         err = vma_adjust(area, addr, next->vm_end,
830                                 next->vm_pgoff - pglen, NULL);
831                 if (err)
832                         return NULL;
833                 khugepaged_enter_vma_merge(area);
834                 return area;
835         }
836
837         return NULL;
838 }
839
840 /*
841  * Rough compatbility check to quickly see if it's even worth looking
842  * at sharing an anon_vma.
843  *
844  * They need to have the same vm_file, and the flags can only differ
845  * in things that mprotect may change.
846  *
847  * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
848  * we can merge the two vma's. For example, we refuse to merge a vma if
849  * there is a vm_ops->close() function, because that indicates that the
850  * driver is doing some kind of reference counting. But that doesn't
851  * really matter for the anon_vma sharing case.
852  */
853 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
854 {
855         return a->vm_end == b->vm_start &&
856                 mpol_equal(vma_policy(a), vma_policy(b)) &&
857                 a->vm_file == b->vm_file &&
858                 !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC)) &&
859                 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
860 }
861
862 /*
863  * Do some basic sanity checking to see if we can re-use the anon_vma
864  * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
865  * the same as 'old', the other will be the new one that is trying
866  * to share the anon_vma.
867  *
868  * NOTE! This runs with mm_sem held for reading, so it is possible that
869  * the anon_vma of 'old' is concurrently in the process of being set up
870  * by another page fault trying to merge _that_. But that's ok: if it
871  * is being set up, that automatically means that it will be a singleton
872  * acceptable for merging, so we can do all of this optimistically. But
873  * we do that ACCESS_ONCE() to make sure that we never re-load the pointer.
874  *
875  * IOW: that the "list_is_singular()" test on the anon_vma_chain only
876  * matters for the 'stable anon_vma' case (ie the thing we want to avoid
877  * is to return an anon_vma that is "complex" due to having gone through
878  * a fork).
879  *
880  * We also make sure that the two vma's are compatible (adjacent,
881  * and with the same memory policies). That's all stable, even with just
882  * a read lock on the mm_sem.
883  */
884 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
885 {
886         if (anon_vma_compatible(a, b)) {
887                 struct anon_vma *anon_vma = ACCESS_ONCE(old->anon_vma);
888
889                 if (anon_vma && list_is_singular(&old->anon_vma_chain))
890                         return anon_vma;
891         }
892         return NULL;
893 }
894
895 /*
896  * find_mergeable_anon_vma is used by anon_vma_prepare, to check
897  * neighbouring vmas for a suitable anon_vma, before it goes off
898  * to allocate a new anon_vma.  It checks because a repetitive
899  * sequence of mprotects and faults may otherwise lead to distinct
900  * anon_vmas being allocated, preventing vma merge in subsequent
901  * mprotect.
902  */
903 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
904 {
905         struct anon_vma *anon_vma;
906         struct vm_area_struct *near;
907
908         near = vma->vm_next;
909         if (!near)
910                 goto try_prev;
911
912         anon_vma = reusable_anon_vma(near, vma, near);
913         if (anon_vma)
914                 return anon_vma;
915 try_prev:
916         near = vma->vm_prev;
917         if (!near)
918                 goto none;
919
920         anon_vma = reusable_anon_vma(near, near, vma);
921         if (anon_vma)
922                 return anon_vma;
923 none:
924         /*
925          * There's no absolute need to look only at touching neighbours:
926          * we could search further afield for "compatible" anon_vmas.
927          * But it would probably just be a waste of time searching,
928          * or lead to too many vmas hanging off the same anon_vma.
929          * We're trying to allow mprotect remerging later on,
930          * not trying to minimize memory used for anon_vmas.
931          */
932         return NULL;
933 }
934
935 #ifdef CONFIG_PROC_FS
936 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
937                                                 struct file *file, long pages)
938 {
939         const unsigned long stack_flags
940                 = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
941
942         mm->total_vm += pages;
943
944         if (file) {
945                 mm->shared_vm += pages;
946                 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
947                         mm->exec_vm += pages;
948         } else if (flags & stack_flags)
949                 mm->stack_vm += pages;
950 }
951 #endif /* CONFIG_PROC_FS */
952
953 /*
954  * If a hint addr is less than mmap_min_addr change hint to be as
955  * low as possible but still greater than mmap_min_addr
956  */
957 static inline unsigned long round_hint_to_min(unsigned long hint)
958 {
959         hint &= PAGE_MASK;
960         if (((void *)hint != NULL) &&
961             (hint < mmap_min_addr))
962                 return PAGE_ALIGN(mmap_min_addr);
963         return hint;
964 }
965
966 /*
967  * The caller must hold down_write(&current->mm->mmap_sem).
968  */
969
970 unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
971                         unsigned long len, unsigned long prot,
972                         unsigned long flags, unsigned long pgoff)
973 {
974         struct mm_struct * mm = current->mm;
975         struct inode *inode;
976         vm_flags_t vm_flags;
977
978         /*
979          * Does the application expect PROT_READ to imply PROT_EXEC?
980          *
981          * (the exception is when the underlying filesystem is noexec
982          *  mounted, in which case we dont add PROT_EXEC.)
983          */
984         if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
985                 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
986                         prot |= PROT_EXEC;
987
988         if (!len)
989                 return -EINVAL;
990
991         if (!(flags & MAP_FIXED))
992                 addr = round_hint_to_min(addr);
993
994         /* Careful about overflows.. */
995         len = PAGE_ALIGN(len);
996         if (!len)
997                 return -ENOMEM;
998
999         /* offset overflow? */
1000         if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
1001                return -EOVERFLOW;
1002
1003         /* Too many mappings? */
1004         if (mm->map_count > sysctl_max_map_count)
1005                 return -ENOMEM;
1006
1007         /* Obtain the address to map to. we verify (or select) it and ensure
1008          * that it represents a valid section of the address space.
1009          */
1010         addr = get_unmapped_area(file, addr, len, pgoff, flags);
1011         if (addr & ~PAGE_MASK)
1012                 return addr;
1013
1014         /* Do simple checking here so the lower-level routines won't have
1015          * to. we assume access permissions have been handled by the open
1016          * of the memory object, so we don't do any here.
1017          */
1018         vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
1019                         mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1020
1021         if (flags & MAP_LOCKED)
1022                 if (!can_do_mlock())
1023                         return -EPERM;
1024
1025         /* mlock MCL_FUTURE? */
1026         if (vm_flags & VM_LOCKED) {
1027                 unsigned long locked, lock_limit;
1028                 locked = len >> PAGE_SHIFT;
1029                 locked += mm->locked_vm;
1030                 lock_limit = rlimit(RLIMIT_MEMLOCK);
1031                 lock_limit >>= PAGE_SHIFT;
1032                 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
1033                         return -EAGAIN;
1034         }
1035
1036         inode = file ? file->f_path.dentry->d_inode : NULL;
1037
1038         if (file) {
1039                 switch (flags & MAP_TYPE) {
1040                 case MAP_SHARED:
1041                         if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
1042                                 return -EACCES;
1043
1044                         /*
1045                          * Make sure we don't allow writing to an append-only
1046                          * file..
1047                          */
1048                         if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
1049                                 return -EACCES;
1050
1051                         /*
1052                          * Make sure there are no mandatory locks on the file.
1053                          */
1054                         if (locks_verify_locked(inode))
1055                                 return -EAGAIN;
1056
1057                         vm_flags |= VM_SHARED | VM_MAYSHARE;
1058                         if (!(file->f_mode & FMODE_WRITE))
1059                                 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
1060
1061                         /* fall through */
1062                 case MAP_PRIVATE:
1063                         if (!(file->f_mode & FMODE_READ))
1064                                 return -EACCES;
1065                         if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
1066                                 if (vm_flags & VM_EXEC)
1067                                         return -EPERM;
1068                                 vm_flags &= ~VM_MAYEXEC;
1069                         }
1070
1071                         if (!file->f_op || !file->f_op->mmap)
1072                                 return -ENODEV;
1073                         break;
1074
1075                 default:
1076                         return -EINVAL;
1077                 }
1078         } else {
1079                 switch (flags & MAP_TYPE) {
1080                 case MAP_SHARED:
1081                         /*
1082                          * Ignore pgoff.
1083                          */
1084                         pgoff = 0;
1085                         vm_flags |= VM_SHARED | VM_MAYSHARE;
1086                         break;
1087                 case MAP_PRIVATE:
1088                         /*
1089                          * Set pgoff according to addr for anon_vma.
1090                          */
1091                         pgoff = addr >> PAGE_SHIFT;
1092                         break;
1093                 default:
1094                         return -EINVAL;
1095                 }
1096         }
1097
1098         return mmap_region(file, addr, len, flags, vm_flags, pgoff);
1099 }
1100
1101 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1102                 unsigned long, prot, unsigned long, flags,
1103                 unsigned long, fd, unsigned long, pgoff)
1104 {
1105         struct file *file = NULL;
1106         unsigned long retval = -EBADF;
1107
1108         if (!(flags & MAP_ANONYMOUS)) {
1109                 audit_mmap_fd(fd, flags);
1110                 if (unlikely(flags & MAP_HUGETLB))
1111                         return -EINVAL;
1112                 file = fget(fd);
1113                 if (!file)
1114                         goto out;
1115         } else if (flags & MAP_HUGETLB) {
1116                 struct user_struct *user = NULL;
1117                 /*
1118                  * VM_NORESERVE is used because the reservations will be
1119                  * taken when vm_ops->mmap() is called
1120                  * A dummy user value is used because we are not locking
1121                  * memory so no accounting is necessary
1122                  */
1123                 file = hugetlb_file_setup(HUGETLB_ANON_FILE, addr, len,
1124                                                 VM_NORESERVE, &user,
1125                                                 HUGETLB_ANONHUGE_INODE);
1126                 if (IS_ERR(file))
1127                         return PTR_ERR(file);
1128         }
1129
1130         flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1131
1132         retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1133         if (file)
1134                 fput(file);
1135 out:
1136         return retval;
1137 }
1138
1139 #ifdef __ARCH_WANT_SYS_OLD_MMAP
1140 struct mmap_arg_struct {
1141         unsigned long addr;
1142         unsigned long len;
1143         unsigned long prot;
1144         unsigned long flags;
1145         unsigned long fd;
1146         unsigned long offset;
1147 };
1148
1149 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1150 {
1151         struct mmap_arg_struct a;
1152
1153         if (copy_from_user(&a, arg, sizeof(a)))
1154                 return -EFAULT;
1155         if (a.offset & ~PAGE_MASK)
1156                 return -EINVAL;
1157
1158         return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1159                               a.offset >> PAGE_SHIFT);
1160 }
1161 #endif /* __ARCH_WANT_SYS_OLD_MMAP */
1162
1163 /*
1164  * Some shared mappigns will want the pages marked read-only
1165  * to track write events. If so, we'll downgrade vm_page_prot
1166  * to the private version (using protection_map[] without the
1167  * VM_SHARED bit).
1168  */
1169 int vma_wants_writenotify(struct vm_area_struct *vma)
1170 {
1171         vm_flags_t vm_flags = vma->vm_flags;
1172
1173         /* If it was private or non-writable, the write bit is already clear */
1174         if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
1175                 return 0;
1176
1177         /* The backer wishes to know when pages are first written to? */
1178         if (vma->vm_ops && vma->vm_ops->page_mkwrite)
1179                 return 1;
1180
1181         /* The open routine did something to the protections already? */
1182         if (pgprot_val(vma->vm_page_prot) !=
1183             pgprot_val(vm_get_page_prot(vm_flags)))
1184                 return 0;
1185
1186         /* Specialty mapping? */
1187         if (vm_flags & VM_PFNMAP)
1188                 return 0;
1189
1190         /* Can the mapping track the dirty pages? */
1191         return vma->vm_file && vma->vm_file->f_mapping &&
1192                 mapping_cap_account_dirty(vma->vm_file->f_mapping);
1193 }
1194
1195 /*
1196  * We account for memory if it's a private writeable mapping,
1197  * not hugepages and VM_NORESERVE wasn't set.
1198  */
1199 static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
1200 {
1201         /*
1202          * hugetlb has its own accounting separate from the core VM
1203          * VM_HUGETLB may not be set yet so we cannot check for that flag.
1204          */
1205         if (file && is_file_hugepages(file))
1206                 return 0;
1207
1208         return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
1209 }
1210
1211 unsigned long mmap_region(struct file *file, unsigned long addr,
1212                           unsigned long len, unsigned long flags,
1213                           vm_flags_t vm_flags, unsigned long pgoff)
1214 {
1215         struct mm_struct *mm = current->mm;
1216         struct vm_area_struct *vma, *prev;
1217         int correct_wcount = 0;
1218         int error;
1219         struct rb_node **rb_link, *rb_parent;
1220         unsigned long charged = 0;
1221         struct inode *inode =  file ? file->f_path.dentry->d_inode : NULL;
1222
1223         /* Clear old maps */
1224         error = -ENOMEM;
1225 munmap_back:
1226         if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
1227                 if (do_munmap(mm, addr, len))
1228                         return -ENOMEM;
1229                 goto munmap_back;
1230         }
1231
1232         /* Check against address space limit. */
1233         if (!may_expand_vm(mm, len >> PAGE_SHIFT))
1234                 return -ENOMEM;
1235
1236         /*
1237          * Set 'VM_NORESERVE' if we should not account for the
1238          * memory use of this mapping.
1239          */
1240         if ((flags & MAP_NORESERVE)) {
1241                 /* We honor MAP_NORESERVE if allowed to overcommit */
1242                 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
1243                         vm_flags |= VM_NORESERVE;
1244
1245                 /* hugetlb applies strict overcommit unless MAP_NORESERVE */
1246                 if (file && is_file_hugepages(file))
1247                         vm_flags |= VM_NORESERVE;
1248         }
1249
1250         /*
1251          * Private writable mapping: check memory availability
1252          */
1253         if (accountable_mapping(file, vm_flags)) {
1254                 charged = len >> PAGE_SHIFT;
1255                 if (security_vm_enough_memory_mm(mm, charged))
1256                         return -ENOMEM;
1257                 vm_flags |= VM_ACCOUNT;
1258         }
1259
1260         /*
1261          * Can we just expand an old mapping?
1262          */
1263         vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL);
1264         if (vma)
1265                 goto out;
1266
1267         /*
1268          * Determine the object being mapped and call the appropriate
1269          * specific mapper. the address has already been validated, but
1270          * not unmapped, but the maps are removed from the list.
1271          */
1272         vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1273         if (!vma) {
1274                 error = -ENOMEM;
1275                 goto unacct_error;
1276         }
1277
1278         vma->vm_mm = mm;
1279         vma->vm_start = addr;
1280         vma->vm_end = addr + len;
1281         vma->vm_flags = vm_flags;
1282         vma->vm_page_prot = vm_get_page_prot(vm_flags);
1283         vma->vm_pgoff = pgoff;
1284         INIT_LIST_HEAD(&vma->anon_vma_chain);
1285
1286         error = -EINVAL;        /* when rejecting VM_GROWSDOWN|VM_GROWSUP */
1287
1288         if (file) {
1289                 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1290                         goto free_vma;
1291                 if (vm_flags & VM_DENYWRITE) {
1292                         error = deny_write_access(file);
1293                         if (error)
1294                                 goto free_vma;
1295                         correct_wcount = 1;
1296                 }
1297                 vma->vm_file = get_file(file);
1298                 error = file->f_op->mmap(file, vma);
1299                 if (error)
1300                         goto unmap_and_free_vma;
1301
1302                 /* Can addr have changed??
1303                  *
1304                  * Answer: Yes, several device drivers can do it in their
1305                  *         f_op->mmap method. -DaveM
1306                  */
1307                 addr = vma->vm_start;
1308                 pgoff = vma->vm_pgoff;
1309                 vm_flags = vma->vm_flags;
1310         } else if (vm_flags & VM_SHARED) {
1311                 if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP)))
1312                         goto free_vma;
1313                 error = shmem_zero_setup(vma);
1314                 if (error)
1315                         goto free_vma;
1316         }
1317
1318         if (vma_wants_writenotify(vma)) {
1319                 pgprot_t pprot = vma->vm_page_prot;
1320
1321                 /* Can vma->vm_page_prot have changed??
1322                  *
1323                  * Answer: Yes, drivers may have changed it in their
1324                  *         f_op->mmap method.
1325                  *
1326                  * Ensures that vmas marked as uncached stay that way.
1327                  */
1328                 vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
1329                 if (pgprot_val(pprot) == pgprot_val(pgprot_noncached(pprot)))
1330                         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1331         }
1332
1333         vma_link(mm, vma, prev, rb_link, rb_parent);
1334         file = vma->vm_file;
1335
1336         /* Once vma denies write, undo our temporary denial count */
1337         if (correct_wcount)
1338                 atomic_inc(&inode->i_writecount);
1339 out:
1340         perf_event_mmap(vma);
1341
1342         vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
1343         if (vm_flags & VM_LOCKED) {
1344                 if (!mlock_vma_pages_range(vma, addr, addr + len))
1345                         mm->locked_vm += (len >> PAGE_SHIFT);
1346         } else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
1347                 make_pages_present(addr, addr + len);
1348
1349         if (file)
1350                 uprobe_mmap(vma);
1351
1352         return addr;
1353
1354 unmap_and_free_vma:
1355         if (correct_wcount)
1356                 atomic_inc(&inode->i_writecount);
1357         vma->vm_file = NULL;
1358         fput(file);
1359
1360         /* Undo any partial mapping done by a device driver. */
1361         unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
1362         charged = 0;
1363 free_vma:
1364         kmem_cache_free(vm_area_cachep, vma);
1365 unacct_error:
1366         if (charged)
1367                 vm_unacct_memory(charged);
1368         return error;
1369 }
1370
1371 /* Get an address range which is currently unmapped.
1372  * For shmat() with addr=0.
1373  *
1374  * Ugly calling convention alert:
1375  * Return value with the low bits set means error value,
1376  * ie
1377  *      if (ret & ~PAGE_MASK)
1378  *              error = ret;
1379  *
1380  * This function "knows" that -ENOMEM has the bits set.
1381  */
1382 #ifndef HAVE_ARCH_UNMAPPED_AREA
1383 unsigned long
1384 arch_get_unmapped_area(struct file *filp, unsigned long addr,
1385                 unsigned long len, unsigned long pgoff, unsigned long flags)
1386 {
1387         struct mm_struct *mm = current->mm;
1388         struct vm_area_struct *vma;
1389         unsigned long start_addr;
1390
1391         if (len > TASK_SIZE)
1392                 return -ENOMEM;
1393
1394         if (flags & MAP_FIXED)
1395                 return addr;
1396
1397         if (addr) {
1398                 addr = PAGE_ALIGN(addr);
1399                 vma = find_vma(mm, addr);
1400                 if (TASK_SIZE - len >= addr &&
1401                     (!vma || addr + len <= vma->vm_start))
1402                         return addr;
1403         }
1404         if (len > mm->cached_hole_size) {
1405                 start_addr = addr = mm->free_area_cache;
1406         } else {
1407                 start_addr = addr = TASK_UNMAPPED_BASE;
1408                 mm->cached_hole_size = 0;
1409         }
1410
1411 full_search:
1412         for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1413                 /* At this point:  (!vma || addr < vma->vm_end). */
1414                 if (TASK_SIZE - len < addr) {
1415                         /*
1416                          * Start a new search - just in case we missed
1417                          * some holes.
1418                          */
1419                         if (start_addr != TASK_UNMAPPED_BASE) {
1420                                 addr = TASK_UNMAPPED_BASE;
1421                                 start_addr = addr;
1422                                 mm->cached_hole_size = 0;
1423                                 goto full_search;
1424                         }
1425                         return -ENOMEM;
1426                 }
1427                 if (!vma || addr + len <= vma->vm_start) {
1428                         /*
1429                          * Remember the place where we stopped the search:
1430                          */
1431                         mm->free_area_cache = addr + len;
1432                         return addr;
1433                 }
1434                 if (addr + mm->cached_hole_size < vma->vm_start)
1435                         mm->cached_hole_size = vma->vm_start - addr;
1436                 addr = vma->vm_end;
1437         }
1438 }
1439 #endif  
1440
1441 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
1442 {
1443         /*
1444          * Is this a new hole at the lowest possible address?
1445          */
1446         if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
1447                 mm->free_area_cache = addr;
1448 }
1449
1450 /*
1451  * This mmap-allocator allocates new areas top-down from below the
1452  * stack's low limit (the base):
1453  */
1454 #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1455 unsigned long
1456 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
1457                           const unsigned long len, const unsigned long pgoff,
1458                           const unsigned long flags)
1459 {
1460         struct vm_area_struct *vma;
1461         struct mm_struct *mm = current->mm;
1462         unsigned long addr = addr0, start_addr;
1463
1464         /* requested length too big for entire address space */
1465         if (len > TASK_SIZE)
1466                 return -ENOMEM;
1467
1468         if (flags & MAP_FIXED)
1469                 return addr;
1470
1471         /* requesting a specific address */
1472         if (addr) {
1473                 addr = PAGE_ALIGN(addr);
1474                 vma = find_vma(mm, addr);
1475                 if (TASK_SIZE - len >= addr &&
1476                                 (!vma || addr + len <= vma->vm_start))
1477                         return addr;
1478         }
1479
1480         /* check if free_area_cache is useful for us */
1481         if (len <= mm->cached_hole_size) {
1482                 mm->cached_hole_size = 0;
1483                 mm->free_area_cache = mm->mmap_base;
1484         }
1485
1486 try_again:
1487         /* either no address requested or can't fit in requested address hole */
1488         start_addr = addr = mm->free_area_cache;
1489
1490         if (addr < len)
1491                 goto fail;
1492
1493         addr -= len;
1494         do {
1495                 /*
1496                  * Lookup failure means no vma is above this address,
1497                  * else if new region fits below vma->vm_start,
1498                  * return with success:
1499                  */
1500                 vma = find_vma(mm, addr);
1501                 if (!vma || addr+len <= vma->vm_start)
1502                         /* remember the address as a hint for next time */
1503                         return (mm->free_area_cache = addr);
1504
1505                 /* remember the largest hole we saw so far */
1506                 if (addr + mm->cached_hole_size < vma->vm_start)
1507                         mm->cached_hole_size = vma->vm_start - addr;
1508
1509                 /* try just below the current vma->vm_start */
1510                 addr = vma->vm_start-len;
1511         } while (len < vma->vm_start);
1512
1513 fail:
1514         /*
1515          * if hint left us with no space for the requested
1516          * mapping then try again:
1517          *
1518          * Note: this is different with the case of bottomup
1519          * which does the fully line-search, but we use find_vma
1520          * here that causes some holes skipped.
1521          */
1522         if (start_addr != mm->mmap_base) {
1523                 mm->free_area_cache = mm->mmap_base;
1524                 mm->cached_hole_size = 0;
1525                 goto try_again;
1526         }
1527
1528         /*
1529          * A failed mmap() very likely causes application failure,
1530          * so fall back to the bottom-up function here. This scenario
1531          * can happen with large stack limits and large mmap()
1532          * allocations.
1533          */
1534         mm->cached_hole_size = ~0UL;
1535         mm->free_area_cache = TASK_UNMAPPED_BASE;
1536         addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
1537         /*
1538          * Restore the topdown base:
1539          */
1540         mm->free_area_cache = mm->mmap_base;
1541         mm->cached_hole_size = ~0UL;
1542
1543         return addr;
1544 }
1545 #endif
1546
1547 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
1548 {
1549         /*
1550          * Is this a new hole at the highest possible address?
1551          */
1552         if (addr > mm->free_area_cache)
1553                 mm->free_area_cache = addr;
1554
1555         /* dont allow allocations above current base */
1556         if (mm->free_area_cache > mm->mmap_base)
1557                 mm->free_area_cache = mm->mmap_base;
1558 }
1559
1560 unsigned long
1561 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
1562                 unsigned long pgoff, unsigned long flags)
1563 {
1564         unsigned long (*get_area)(struct file *, unsigned long,
1565                                   unsigned long, unsigned long, unsigned long);
1566
1567         unsigned long error = arch_mmap_check(addr, len, flags);
1568         if (error)
1569                 return error;
1570
1571         /* Careful about overflows.. */
1572         if (len > TASK_SIZE)
1573                 return -ENOMEM;
1574
1575         get_area = current->mm->get_unmapped_area;
1576         if (file && file->f_op && file->f_op->get_unmapped_area)
1577                 get_area = file->f_op->get_unmapped_area;
1578         addr = get_area(file, addr, len, pgoff, flags);
1579         if (IS_ERR_VALUE(addr))
1580                 return addr;
1581
1582         if (addr > TASK_SIZE - len)
1583                 return -ENOMEM;
1584         if (addr & ~PAGE_MASK)
1585                 return -EINVAL;
1586
1587         addr = arch_rebalance_pgtables(addr, len);
1588         error = security_mmap_addr(addr);
1589         return error ? error : addr;
1590 }
1591
1592 EXPORT_SYMBOL(get_unmapped_area);
1593
1594 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
1595 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
1596 {
1597         struct vm_area_struct *vma = NULL;
1598
1599         if (WARN_ON_ONCE(!mm))          /* Remove this in linux-3.6 */
1600                 return NULL;
1601
1602         /* Check the cache first. */
1603         /* (Cache hit rate is typically around 35%.) */
1604         vma = mm->mmap_cache;
1605         if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
1606                 struct rb_node *rb_node;
1607
1608                 rb_node = mm->mm_rb.rb_node;
1609                 vma = NULL;
1610
1611                 while (rb_node) {
1612                         struct vm_area_struct *vma_tmp;
1613
1614                         vma_tmp = rb_entry(rb_node,
1615                                            struct vm_area_struct, vm_rb);
1616
1617                         if (vma_tmp->vm_end > addr) {
1618                                 vma = vma_tmp;
1619                                 if (vma_tmp->vm_start <= addr)
1620                                         break;
1621                                 rb_node = rb_node->rb_left;
1622                         } else
1623                                 rb_node = rb_node->rb_right;
1624                 }
1625                 if (vma)
1626                         mm->mmap_cache = vma;
1627         }
1628         return vma;
1629 }
1630
1631 EXPORT_SYMBOL(find_vma);
1632
1633 /*
1634  * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
1635  */
1636 struct vm_area_struct *
1637 find_vma_prev(struct mm_struct *mm, unsigned long addr,
1638                         struct vm_area_struct **pprev)
1639 {
1640         struct vm_area_struct *vma;
1641
1642         vma = find_vma(mm, addr);
1643         if (vma) {
1644                 *pprev = vma->vm_prev;
1645         } else {
1646                 struct rb_node *rb_node = mm->mm_rb.rb_node;
1647                 *pprev = NULL;
1648                 while (rb_node) {
1649                         *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
1650                         rb_node = rb_node->rb_right;
1651                 }
1652         }
1653         return vma;
1654 }
1655
1656 /*
1657  * Verify that the stack growth is acceptable and
1658  * update accounting. This is shared with both the
1659  * grow-up and grow-down cases.
1660  */
1661 static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
1662 {
1663         struct mm_struct *mm = vma->vm_mm;
1664         struct rlimit *rlim = current->signal->rlim;
1665         unsigned long new_start;
1666
1667         /* address space limit tests */
1668         if (!may_expand_vm(mm, grow))
1669                 return -ENOMEM;
1670
1671         /* Stack limit test */
1672         if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
1673                 return -ENOMEM;
1674
1675         /* mlock limit tests */
1676         if (vma->vm_flags & VM_LOCKED) {
1677                 unsigned long locked;
1678                 unsigned long limit;
1679                 locked = mm->locked_vm + grow;
1680                 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
1681                 limit >>= PAGE_SHIFT;
1682                 if (locked > limit && !capable(CAP_IPC_LOCK))
1683                         return -ENOMEM;
1684         }
1685
1686         /* Check to ensure the stack will not grow into a hugetlb-only region */
1687         new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
1688                         vma->vm_end - size;
1689         if (is_hugepage_only_range(vma->vm_mm, new_start, size))
1690                 return -EFAULT;
1691
1692         /*
1693          * Overcommit..  This must be the final test, as it will
1694          * update security statistics.
1695          */
1696         if (security_vm_enough_memory_mm(mm, grow))
1697                 return -ENOMEM;
1698
1699         /* Ok, everything looks good - let it rip */
1700         if (vma->vm_flags & VM_LOCKED)
1701                 mm->locked_vm += grow;
1702         vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
1703         return 0;
1704 }
1705
1706 #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
1707 /*
1708  * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
1709  * vma is the last one with address > vma->vm_end.  Have to extend vma.
1710  */
1711 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1712 {
1713         int error;
1714
1715         if (!(vma->vm_flags & VM_GROWSUP))
1716                 return -EFAULT;
1717
1718         /*
1719          * We must make sure the anon_vma is allocated
1720          * so that the anon_vma locking is not a noop.
1721          */
1722         if (unlikely(anon_vma_prepare(vma)))
1723                 return -ENOMEM;
1724         vma_lock_anon_vma(vma);
1725
1726         /*
1727          * vma->vm_start/vm_end cannot change under us because the caller
1728          * is required to hold the mmap_sem in read mode.  We need the
1729          * anon_vma lock to serialize against concurrent expand_stacks.
1730          * Also guard against wrapping around to address 0.
1731          */
1732         if (address < PAGE_ALIGN(address+4))
1733                 address = PAGE_ALIGN(address+4);
1734         else {
1735                 vma_unlock_anon_vma(vma);
1736                 return -ENOMEM;
1737         }
1738         error = 0;
1739
1740         /* Somebody else might have raced and expanded it already */
1741         if (address > vma->vm_end) {
1742                 unsigned long size, grow;
1743
1744                 size = address - vma->vm_start;
1745                 grow = (address - vma->vm_end) >> PAGE_SHIFT;
1746
1747                 error = -ENOMEM;
1748                 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
1749                         error = acct_stack_growth(vma, size, grow);
1750                         if (!error) {
1751                                 vma->vm_end = address;
1752                                 perf_event_mmap(vma);
1753                         }
1754                 }
1755         }
1756         vma_unlock_anon_vma(vma);
1757         khugepaged_enter_vma_merge(vma);
1758         return error;
1759 }
1760 #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
1761
1762 /*
1763  * vma is the first one with address < vma->vm_start.  Have to extend vma.
1764  */
1765 int expand_downwards(struct vm_area_struct *vma,
1766                                    unsigned long address)
1767 {
1768         int error;
1769
1770         /*
1771          * We must make sure the anon_vma is allocated
1772          * so that the anon_vma locking is not a noop.
1773          */
1774         if (unlikely(anon_vma_prepare(vma)))
1775                 return -ENOMEM;
1776
1777         address &= PAGE_MASK;
1778         error = security_mmap_addr(address);
1779         if (error)
1780                 return error;
1781
1782         vma_lock_anon_vma(vma);
1783
1784         /*
1785          * vma->vm_start/vm_end cannot change under us because the caller
1786          * is required to hold the mmap_sem in read mode.  We need the
1787          * anon_vma lock to serialize against concurrent expand_stacks.
1788          */
1789
1790         /* Somebody else might have raced and expanded it already */
1791         if (address < vma->vm_start) {
1792                 unsigned long size, grow;
1793
1794                 size = vma->vm_end - address;
1795                 grow = (vma->vm_start - address) >> PAGE_SHIFT;
1796
1797                 error = -ENOMEM;
1798                 if (grow <= vma->vm_pgoff) {
1799                         error = acct_stack_growth(vma, size, grow);
1800                         if (!error) {
1801                                 vma->vm_start = address;
1802                                 vma->vm_pgoff -= grow;
1803                                 perf_event_mmap(vma);
1804                         }
1805                 }
1806         }
1807         vma_unlock_anon_vma(vma);
1808         khugepaged_enter_vma_merge(vma);
1809         return error;
1810 }
1811
1812 #ifdef CONFIG_STACK_GROWSUP
1813 int expand_stack(struct vm_area_struct *vma, unsigned long address)
1814 {
1815         return expand_upwards(vma, address);
1816 }
1817
1818 struct vm_area_struct *
1819 find_extend_vma(struct mm_struct *mm, unsigned long addr)
1820 {
1821         struct vm_area_struct *vma, *prev;
1822
1823         addr &= PAGE_MASK;
1824         vma = find_vma_prev(mm, addr, &prev);
1825         if (vma && (vma->vm_start <= addr))
1826                 return vma;
1827         if (!prev || expand_stack(prev, addr))
1828                 return NULL;
1829         if (prev->vm_flags & VM_LOCKED) {
1830                 mlock_vma_pages_range(prev, addr, prev->vm_end);
1831         }
1832         return prev;
1833 }
1834 #else
1835 int expand_stack(struct vm_area_struct *vma, unsigned long address)
1836 {
1837         return expand_downwards(vma, address);
1838 }
1839
1840 struct vm_area_struct *
1841 find_extend_vma(struct mm_struct * mm, unsigned long addr)
1842 {
1843         struct vm_area_struct * vma;
1844         unsigned long start;
1845
1846         addr &= PAGE_MASK;
1847         vma = find_vma(mm,addr);
1848         if (!vma)
1849                 return NULL;
1850         if (vma->vm_start <= addr)
1851                 return vma;
1852         if (!(vma->vm_flags & VM_GROWSDOWN))
1853                 return NULL;
1854         start = vma->vm_start;
1855         if (expand_stack(vma, addr))
1856                 return NULL;
1857         if (vma->vm_flags & VM_LOCKED) {
1858                 mlock_vma_pages_range(vma, addr, start);
1859         }
1860         return vma;
1861 }
1862 #endif
1863
1864 /*
1865  * Ok - we have the memory areas we should free on the vma list,
1866  * so release them, and do the vma updates.
1867  *
1868  * Called with the mm semaphore held.
1869  */
1870 static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
1871 {
1872         unsigned long nr_accounted = 0;
1873
1874         /* Update high watermark before we lower total_vm */
1875         update_hiwater_vm(mm);
1876         do {
1877                 long nrpages = vma_pages(vma);
1878
1879                 if (vma->vm_flags & VM_ACCOUNT)
1880                         nr_accounted += nrpages;
1881                 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
1882                 vma = remove_vma(vma);
1883         } while (vma);
1884         vm_unacct_memory(nr_accounted);
1885         validate_mm(mm);
1886 }
1887
1888 /*
1889  * Get rid of page table information in the indicated region.
1890  *
1891  * Called with the mm semaphore held.
1892  */
1893 static void unmap_region(struct mm_struct *mm,
1894                 struct vm_area_struct *vma, struct vm_area_struct *prev,
1895                 unsigned long start, unsigned long end)
1896 {
1897         struct vm_area_struct *next = prev? prev->vm_next: mm->mmap;
1898         struct mmu_gather tlb;
1899
1900         lru_add_drain();
1901         tlb_gather_mmu(&tlb, mm, 0);
1902         update_hiwater_rss(mm);
1903         unmap_vmas(&tlb, vma, start, end);
1904         free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
1905                                  next ? next->vm_start : 0);
1906         tlb_finish_mmu(&tlb, start, end);
1907 }
1908
1909 /*
1910  * Create a list of vma's touched by the unmap, removing them from the mm's
1911  * vma list as we go..
1912  */
1913 static void
1914 detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
1915         struct vm_area_struct *prev, unsigned long end)
1916 {
1917         struct vm_area_struct **insertion_point;
1918         struct vm_area_struct *tail_vma = NULL;
1919         unsigned long addr;
1920
1921         insertion_point = (prev ? &prev->vm_next : &mm->mmap);
1922         vma->vm_prev = NULL;
1923         do {
1924                 rb_erase(&vma->vm_rb, &mm->mm_rb);
1925                 mm->map_count--;
1926                 tail_vma = vma;
1927                 vma = vma->vm_next;
1928         } while (vma && vma->vm_start < end);
1929         *insertion_point = vma;
1930         if (vma)
1931                 vma->vm_prev = prev;
1932         tail_vma->vm_next = NULL;
1933         if (mm->unmap_area == arch_unmap_area)
1934                 addr = prev ? prev->vm_end : mm->mmap_base;
1935         else
1936                 addr = vma ?  vma->vm_start : mm->mmap_base;
1937         mm->unmap_area(mm, addr);
1938         mm->mmap_cache = NULL;          /* Kill the cache. */
1939 }
1940
1941 /*
1942  * __split_vma() bypasses sysctl_max_map_count checking.  We use this on the
1943  * munmap path where it doesn't make sense to fail.
1944  */
1945 static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
1946               unsigned long addr, int new_below)
1947 {
1948         struct mempolicy *pol;
1949         struct vm_area_struct *new;
1950         int err = -ENOMEM;
1951
1952         if (is_vm_hugetlb_page(vma) && (addr &
1953                                         ~(huge_page_mask(hstate_vma(vma)))))
1954                 return -EINVAL;
1955
1956         new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
1957         if (!new)
1958                 goto out_err;
1959
1960         /* most fields are the same, copy all, and then fixup */
1961         *new = *vma;
1962
1963         INIT_LIST_HEAD(&new->anon_vma_chain);
1964
1965         if (new_below)
1966                 new->vm_end = addr;
1967         else {
1968                 new->vm_start = addr;
1969                 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
1970         }
1971
1972         pol = mpol_dup(vma_policy(vma));
1973         if (IS_ERR(pol)) {
1974                 err = PTR_ERR(pol);
1975                 goto out_free_vma;
1976         }
1977         vma_set_policy(new, pol);
1978
1979         if (anon_vma_clone(new, vma))
1980                 goto out_free_mpol;
1981
1982         if (new->vm_file)
1983                 get_file(new->vm_file);
1984
1985         if (new->vm_ops && new->vm_ops->open)
1986                 new->vm_ops->open(new);
1987
1988         if (new_below)
1989                 err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
1990                         ((addr - new->vm_start) >> PAGE_SHIFT), new);
1991         else
1992                 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
1993
1994         /* Success. */
1995         if (!err)
1996                 return 0;
1997
1998         /* Clean everything up if vma_adjust failed. */
1999         if (new->vm_ops && new->vm_ops->close)
2000                 new->vm_ops->close(new);
2001         if (new->vm_file)
2002                 fput(new->vm_file);
2003         unlink_anon_vmas(new);
2004  out_free_mpol:
2005         mpol_put(pol);
2006  out_free_vma:
2007         kmem_cache_free(vm_area_cachep, new);
2008  out_err:
2009         return err;
2010 }
2011
2012 /*
2013  * Split a vma into two pieces at address 'addr', a new vma is allocated
2014  * either for the first part or the tail.
2015  */
2016 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
2017               unsigned long addr, int new_below)
2018 {
2019         if (mm->map_count >= sysctl_max_map_count)
2020                 return -ENOMEM;
2021
2022         return __split_vma(mm, vma, addr, new_below);
2023 }
2024
2025 /* Munmap is split into 2 main parts -- this part which finds
2026  * what needs doing, and the areas themselves, which do the
2027  * work.  This now handles partial unmappings.
2028  * Jeremy Fitzhardinge <jeremy@goop.org>
2029  */
2030 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
2031 {
2032         unsigned long end;
2033         struct vm_area_struct *vma, *prev, *last;
2034
2035         if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
2036                 return -EINVAL;
2037
2038         if ((len = PAGE_ALIGN(len)) == 0)
2039                 return -EINVAL;
2040
2041         /* Find the first overlapping VMA */
2042         vma = find_vma(mm, start);
2043         if (!vma)
2044                 return 0;
2045         prev = vma->vm_prev;
2046         /* we have  start < vma->vm_end  */
2047
2048         /* if it doesn't overlap, we have nothing.. */
2049         end = start + len;
2050         if (vma->vm_start >= end)
2051                 return 0;
2052
2053         /*
2054          * If we need to split any vma, do it now to save pain later.
2055          *
2056          * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
2057          * unmapped vm_area_struct will remain in use: so lower split_vma
2058          * places tmp vma above, and higher split_vma places tmp vma below.
2059          */
2060         if (start > vma->vm_start) {
2061                 int error;
2062
2063                 /*
2064                  * Make sure that map_count on return from munmap() will
2065                  * not exceed its limit; but let map_count go just above
2066                  * its limit temporarily, to help free resources as expected.
2067                  */
2068                 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
2069                         return -ENOMEM;
2070
2071                 error = __split_vma(mm, vma, start, 0);
2072                 if (error)
2073                         return error;
2074                 prev = vma;
2075         }
2076
2077         /* Does it split the last one? */
2078         last = find_vma(mm, end);
2079         if (last && end > last->vm_start) {
2080                 int error = __split_vma(mm, last, end, 1);
2081                 if (error)
2082                         return error;
2083         }
2084         vma = prev? prev->vm_next: mm->mmap;
2085
2086         /*
2087          * unlock any mlock()ed ranges before detaching vmas
2088          */
2089         if (mm->locked_vm) {
2090                 struct vm_area_struct *tmp = vma;
2091                 while (tmp && tmp->vm_start < end) {
2092                         if (tmp->vm_flags & VM_LOCKED) {
2093                                 mm->locked_vm -= vma_pages(tmp);
2094                                 munlock_vma_pages_all(tmp);
2095                         }
2096                         tmp = tmp->vm_next;
2097                 }
2098         }
2099
2100         /*
2101          * Remove the vma's, and unmap the actual pages
2102          */
2103         detach_vmas_to_be_unmapped(mm, vma, prev, end);
2104         unmap_region(mm, vma, prev, start, end);
2105
2106         /* Fix up all other VM information */
2107         remove_vma_list(mm, vma);
2108
2109         return 0;
2110 }
2111
2112 int vm_munmap(unsigned long start, size_t len)
2113 {
2114         int ret;
2115         struct mm_struct *mm = current->mm;
2116
2117         down_write(&mm->mmap_sem);
2118         ret = do_munmap(mm, start, len);
2119         up_write(&mm->mmap_sem);
2120         return ret;
2121 }
2122 EXPORT_SYMBOL(vm_munmap);
2123
2124 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
2125 {
2126         profile_munmap(addr);
2127         return vm_munmap(addr, len);
2128 }
2129
2130 static inline void verify_mm_writelocked(struct mm_struct *mm)
2131 {
2132 #ifdef CONFIG_DEBUG_VM
2133         if (unlikely(down_read_trylock(&mm->mmap_sem))) {
2134                 WARN_ON(1);
2135                 up_read(&mm->mmap_sem);
2136         }
2137 #endif
2138 }
2139
2140 /*
2141  *  this is really a simplified "do_mmap".  it only handles
2142  *  anonymous maps.  eventually we may be able to do some
2143  *  brk-specific accounting here.
2144  */
2145 static unsigned long do_brk(unsigned long addr, unsigned long len)
2146 {
2147         struct mm_struct * mm = current->mm;
2148         struct vm_area_struct * vma, * prev;
2149         unsigned long flags;
2150         struct rb_node ** rb_link, * rb_parent;
2151         pgoff_t pgoff = addr >> PAGE_SHIFT;
2152         int error;
2153
2154         len = PAGE_ALIGN(len);
2155         if (!len)
2156                 return addr;
2157
2158         flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
2159
2160         error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
2161         if (error & ~PAGE_MASK)
2162                 return error;
2163
2164         /*
2165          * mlock MCL_FUTURE?
2166          */
2167         if (mm->def_flags & VM_LOCKED) {
2168                 unsigned long locked, lock_limit;
2169                 locked = len >> PAGE_SHIFT;
2170                 locked += mm->locked_vm;
2171                 lock_limit = rlimit(RLIMIT_MEMLOCK);
2172                 lock_limit >>= PAGE_SHIFT;
2173                 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
2174                         return -EAGAIN;
2175         }
2176
2177         /*
2178          * mm->mmap_sem is required to protect against another thread
2179          * changing the mappings in case we sleep.
2180          */
2181         verify_mm_writelocked(mm);
2182
2183         /*
2184          * Clear old maps.  this also does some error checking for us
2185          */
2186  munmap_back:
2187         if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
2188                 if (do_munmap(mm, addr, len))
2189                         return -ENOMEM;
2190                 goto munmap_back;
2191         }
2192
2193         /* Check against address space limits *after* clearing old maps... */
2194         if (!may_expand_vm(mm, len >> PAGE_SHIFT))
2195                 return -ENOMEM;
2196
2197         if (mm->map_count > sysctl_max_map_count)
2198                 return -ENOMEM;
2199
2200         if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
2201                 return -ENOMEM;
2202
2203         /* Can we just expand an old private anonymous mapping? */
2204         vma = vma_merge(mm, prev, addr, addr + len, flags,
2205                                         NULL, NULL, pgoff, NULL);
2206         if (vma)
2207                 goto out;
2208
2209         /*
2210          * create a vma struct for an anonymous mapping
2211          */
2212         vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2213         if (!vma) {
2214                 vm_unacct_memory(len >> PAGE_SHIFT);
2215                 return -ENOMEM;
2216         }
2217
2218         INIT_LIST_HEAD(&vma->anon_vma_chain);
2219         vma->vm_mm = mm;
2220         vma->vm_start = addr;
2221         vma->vm_end = addr + len;
2222         vma->vm_pgoff = pgoff;
2223         vma->vm_flags = flags;
2224         vma->vm_page_prot = vm_get_page_prot(flags);
2225         vma_link(mm, vma, prev, rb_link, rb_parent);
2226 out:
2227         perf_event_mmap(vma);
2228         mm->total_vm += len >> PAGE_SHIFT;
2229         if (flags & VM_LOCKED) {
2230                 if (!mlock_vma_pages_range(vma, addr, addr + len))
2231                         mm->locked_vm += (len >> PAGE_SHIFT);
2232         }
2233         return addr;
2234 }
2235
2236 unsigned long vm_brk(unsigned long addr, unsigned long len)
2237 {
2238         struct mm_struct *mm = current->mm;
2239         unsigned long ret;
2240
2241         down_write(&mm->mmap_sem);
2242         ret = do_brk(addr, len);
2243         up_write(&mm->mmap_sem);
2244         return ret;
2245 }
2246 EXPORT_SYMBOL(vm_brk);
2247
2248 /* Release all mmaps. */
2249 void exit_mmap(struct mm_struct *mm)
2250 {
2251         struct mmu_gather tlb;
2252         struct vm_area_struct *vma;
2253         unsigned long nr_accounted = 0;
2254
2255         /* mm's last user has gone, and its about to be pulled down */
2256         mmu_notifier_release(mm);
2257
2258         if (mm->locked_vm) {
2259                 vma = mm->mmap;
2260                 while (vma) {
2261                         if (vma->vm_flags & VM_LOCKED)
2262                                 munlock_vma_pages_all(vma);
2263                         vma = vma->vm_next;
2264                 }
2265         }
2266
2267         arch_exit_mmap(mm);
2268
2269         vma = mm->mmap;
2270         if (!vma)       /* Can happen if dup_mmap() received an OOM */
2271                 return;
2272
2273         lru_add_drain();
2274         flush_cache_mm(mm);
2275         tlb_gather_mmu(&tlb, mm, 1);
2276         /* update_hiwater_rss(mm) here? but nobody should be looking */
2277         /* Use -1 here to ensure all VMAs in the mm are unmapped */
2278         unmap_vmas(&tlb, vma, 0, -1);
2279
2280         free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
2281         tlb_finish_mmu(&tlb, 0, -1);
2282
2283         /*
2284          * Walk the list again, actually closing and freeing it,
2285          * with preemption enabled, without holding any MM locks.
2286          */
2287         while (vma) {
2288                 if (vma->vm_flags & VM_ACCOUNT)
2289                         nr_accounted += vma_pages(vma);
2290                 vma = remove_vma(vma);
2291         }
2292         vm_unacct_memory(nr_accounted);
2293
2294         WARN_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
2295 }
2296
2297 /* Insert vm structure into process list sorted by address
2298  * and into the inode's i_mmap tree.  If vm_file is non-NULL
2299  * then i_mmap_mutex is taken here.
2300  */
2301 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
2302 {
2303         struct vm_area_struct *prev;
2304         struct rb_node **rb_link, *rb_parent;
2305
2306         /*
2307          * The vm_pgoff of a purely anonymous vma should be irrelevant
2308          * until its first write fault, when page's anon_vma and index
2309          * are set.  But now set the vm_pgoff it will almost certainly
2310          * end up with (unless mremap moves it elsewhere before that
2311          * first wfault), so /proc/pid/maps tells a consistent story.
2312          *
2313          * By setting it to reflect the virtual start address of the
2314          * vma, merges and splits can happen in a seamless way, just
2315          * using the existing file pgoff checks and manipulations.
2316          * Similarly in do_mmap_pgoff and in do_brk.
2317          */
2318         if (!vma->vm_file) {
2319                 BUG_ON(vma->anon_vma);
2320                 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2321         }
2322         if (find_vma_links(mm, vma->vm_start, vma->vm_end,
2323                            &prev, &rb_link, &rb_parent))
2324                 return -ENOMEM;
2325         if ((vma->vm_flags & VM_ACCOUNT) &&
2326              security_vm_enough_memory_mm(mm, vma_pages(vma)))
2327                 return -ENOMEM;
2328
2329         vma_link(mm, vma, prev, rb_link, rb_parent);
2330         return 0;
2331 }
2332
2333 /*
2334  * Copy the vma structure to a new location in the same mm,
2335  * prior to moving page table entries, to effect an mremap move.
2336  */
2337 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
2338         unsigned long addr, unsigned long len, pgoff_t pgoff)
2339 {
2340         struct vm_area_struct *vma = *vmap;
2341         unsigned long vma_start = vma->vm_start;
2342         struct mm_struct *mm = vma->vm_mm;
2343         struct vm_area_struct *new_vma, *prev;
2344         struct rb_node **rb_link, *rb_parent;
2345         struct mempolicy *pol;
2346         bool faulted_in_anon_vma = true;
2347
2348         /*
2349          * If anonymous vma has not yet been faulted, update new pgoff
2350          * to match new location, to increase its chance of merging.
2351          */
2352         if (unlikely(!vma->vm_file && !vma->anon_vma)) {
2353                 pgoff = addr >> PAGE_SHIFT;
2354                 faulted_in_anon_vma = false;
2355         }
2356
2357         if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
2358                 return NULL;    /* should never get here */
2359         new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
2360                         vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
2361         if (new_vma) {
2362                 /*
2363                  * Source vma may have been merged into new_vma
2364                  */
2365                 if (unlikely(vma_start >= new_vma->vm_start &&
2366                              vma_start < new_vma->vm_end)) {
2367                         /*
2368                          * The only way we can get a vma_merge with
2369                          * self during an mremap is if the vma hasn't
2370                          * been faulted in yet and we were allowed to
2371                          * reset the dst vma->vm_pgoff to the
2372                          * destination address of the mremap to allow
2373                          * the merge to happen. mremap must change the
2374                          * vm_pgoff linearity between src and dst vmas
2375                          * (in turn preventing a vma_merge) to be
2376                          * safe. It is only safe to keep the vm_pgoff
2377                          * linear if there are no pages mapped yet.
2378                          */
2379                         VM_BUG_ON(faulted_in_anon_vma);
2380                         *vmap = new_vma;
2381                 } else
2382                         anon_vma_moveto_tail(new_vma);
2383         } else {
2384                 new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
2385                 if (new_vma) {
2386                         *new_vma = *vma;
2387                         pol = mpol_dup(vma_policy(vma));
2388                         if (IS_ERR(pol))
2389                                 goto out_free_vma;
2390                         INIT_LIST_HEAD(&new_vma->anon_vma_chain);
2391                         if (anon_vma_clone(new_vma, vma))
2392                                 goto out_free_mempol;
2393                         vma_set_policy(new_vma, pol);
2394                         new_vma->vm_start = addr;
2395                         new_vma->vm_end = addr + len;
2396                         new_vma->vm_pgoff = pgoff;
2397                         if (new_vma->vm_file)
2398                                 get_file(new_vma->vm_file);
2399                         if (new_vma->vm_ops && new_vma->vm_ops->open)
2400                                 new_vma->vm_ops->open(new_vma);
2401                         vma_link(mm, new_vma, prev, rb_link, rb_parent);
2402                 }
2403         }
2404         return new_vma;
2405
2406  out_free_mempol:
2407         mpol_put(pol);
2408  out_free_vma:
2409         kmem_cache_free(vm_area_cachep, new_vma);
2410         return NULL;
2411 }
2412
2413 /*
2414  * Return true if the calling process may expand its vm space by the passed
2415  * number of pages
2416  */
2417 int may_expand_vm(struct mm_struct *mm, unsigned long npages)
2418 {
2419         unsigned long cur = mm->total_vm;       /* pages */
2420         unsigned long lim;
2421
2422         lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
2423
2424         if (cur + npages > lim)
2425                 return 0;
2426         return 1;
2427 }
2428
2429
2430 static int special_mapping_fault(struct vm_area_struct *vma,
2431                                 struct vm_fault *vmf)
2432 {
2433         pgoff_t pgoff;
2434         struct page **pages;
2435
2436         /*
2437          * special mappings have no vm_file, and in that case, the mm
2438          * uses vm_pgoff internally. So we have to subtract it from here.
2439          * We are allowed to do this because we are the mm; do not copy
2440          * this code into drivers!
2441          */
2442         pgoff = vmf->pgoff - vma->vm_pgoff;
2443
2444         for (pages = vma->vm_private_data; pgoff && *pages; ++pages)
2445                 pgoff--;
2446
2447         if (*pages) {
2448                 struct page *page = *pages;
2449                 get_page(page);
2450                 vmf->page = page;
2451                 return 0;
2452         }
2453
2454         return VM_FAULT_SIGBUS;
2455 }
2456
2457 /*
2458  * Having a close hook prevents vma merging regardless of flags.
2459  */
2460 static void special_mapping_close(struct vm_area_struct *vma)
2461 {
2462 }
2463
2464 static const struct vm_operations_struct special_mapping_vmops = {
2465         .close = special_mapping_close,
2466         .fault = special_mapping_fault,
2467 };
2468
2469 /*
2470  * Called with mm->mmap_sem held for writing.
2471  * Insert a new vma covering the given region, with the given flags.
2472  * Its pages are supplied by the given array of struct page *.
2473  * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
2474  * The region past the last page supplied will always produce SIGBUS.
2475  * The array pointer and the pages it points to are assumed to stay alive
2476  * for as long as this mapping might exist.
2477  */
2478 int install_special_mapping(struct mm_struct *mm,
2479                             unsigned long addr, unsigned long len,
2480                             unsigned long vm_flags, struct page **pages)
2481 {
2482         int ret;
2483         struct vm_area_struct *vma;
2484
2485         vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2486         if (unlikely(vma == NULL))
2487                 return -ENOMEM;
2488
2489         INIT_LIST_HEAD(&vma->anon_vma_chain);
2490         vma->vm_mm = mm;
2491         vma->vm_start = addr;
2492         vma->vm_end = addr + len;
2493
2494         vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
2495         vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2496
2497         vma->vm_ops = &special_mapping_vmops;
2498         vma->vm_private_data = pages;
2499
2500         ret = insert_vm_struct(mm, vma);
2501         if (ret)
2502                 goto out;
2503
2504         mm->total_vm += len >> PAGE_SHIFT;
2505
2506         perf_event_mmap(vma);
2507
2508         return 0;
2509
2510 out:
2511         kmem_cache_free(vm_area_cachep, vma);
2512         return ret;
2513 }
2514
2515 static DEFINE_MUTEX(mm_all_locks_mutex);
2516
2517 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
2518 {
2519         if (!test_bit(0, (unsigned long *) &anon_vma->root->head.next)) {
2520                 /*
2521                  * The LSB of head.next can't change from under us
2522                  * because we hold the mm_all_locks_mutex.
2523                  */
2524                 mutex_lock_nest_lock(&anon_vma->root->mutex, &mm->mmap_sem);
2525                 /*
2526                  * We can safely modify head.next after taking the
2527                  * anon_vma->root->mutex. If some other vma in this mm shares
2528                  * the same anon_vma we won't take it again.
2529                  *
2530                  * No need of atomic instructions here, head.next
2531                  * can't change from under us thanks to the
2532                  * anon_vma->root->mutex.
2533                  */
2534                 if (__test_and_set_bit(0, (unsigned long *)
2535                                        &anon_vma->root->head.next))
2536                         BUG();
2537         }
2538 }
2539
2540 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
2541 {
2542         if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2543                 /*
2544                  * AS_MM_ALL_LOCKS can't change from under us because
2545                  * we hold the mm_all_locks_mutex.
2546                  *
2547                  * Operations on ->flags have to be atomic because
2548                  * even if AS_MM_ALL_LOCKS is stable thanks to the
2549                  * mm_all_locks_mutex, there may be other cpus
2550                  * changing other bitflags in parallel to us.
2551                  */
2552                 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
2553                         BUG();
2554                 mutex_lock_nest_lock(&mapping->i_mmap_mutex, &mm->mmap_sem);
2555         }
2556 }
2557
2558 /*
2559  * This operation locks against the VM for all pte/vma/mm related
2560  * operations that could ever happen on a certain mm. This includes
2561  * vmtruncate, try_to_unmap, and all page faults.
2562  *
2563  * The caller must take the mmap_sem in write mode before calling
2564  * mm_take_all_locks(). The caller isn't allowed to release the
2565  * mmap_sem until mm_drop_all_locks() returns.
2566  *
2567  * mmap_sem in write mode is required in order to block all operations
2568  * that could modify pagetables and free pages without need of
2569  * altering the vma layout (for example populate_range() with
2570  * nonlinear vmas). It's also needed in write mode to avoid new
2571  * anon_vmas to be associated with existing vmas.
2572  *
2573  * A single task can't take more than one mm_take_all_locks() in a row
2574  * or it would deadlock.
2575  *
2576  * The LSB in anon_vma->head.next and the AS_MM_ALL_LOCKS bitflag in
2577  * mapping->flags avoid to take the same lock twice, if more than one
2578  * vma in this mm is backed by the same anon_vma or address_space.
2579  *
2580  * We can take all the locks in random order because the VM code
2581  * taking i_mmap_mutex or anon_vma->mutex outside the mmap_sem never
2582  * takes more than one of them in a row. Secondly we're protected
2583  * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
2584  *
2585  * mm_take_all_locks() and mm_drop_all_locks are expensive operations
2586  * that may have to take thousand of locks.
2587  *
2588  * mm_take_all_locks() can fail if it's interrupted by signals.
2589  */
2590 int mm_take_all_locks(struct mm_struct *mm)
2591 {
2592         struct vm_area_struct *vma;
2593         struct anon_vma_chain *avc;
2594
2595         BUG_ON(down_read_trylock(&mm->mmap_sem));
2596
2597         mutex_lock(&mm_all_locks_mutex);
2598
2599         for (vma = mm->mmap; vma; vma = vma->vm_next) {
2600                 if (signal_pending(current))
2601                         goto out_unlock;
2602                 if (vma->vm_file && vma->vm_file->f_mapping)
2603                         vm_lock_mapping(mm, vma->vm_file->f_mapping);
2604         }
2605
2606         for (vma = mm->mmap; vma; vma = vma->vm_next) {
2607                 if (signal_pending(current))
2608                         goto out_unlock;
2609                 if (vma->anon_vma)
2610                         list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
2611                                 vm_lock_anon_vma(mm, avc->anon_vma);
2612         }
2613
2614         return 0;
2615
2616 out_unlock:
2617         mm_drop_all_locks(mm);
2618         return -EINTR;
2619 }
2620
2621 static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
2622 {
2623         if (test_bit(0, (unsigned long *) &anon_vma->root->head.next)) {
2624                 /*
2625                  * The LSB of head.next can't change to 0 from under
2626                  * us because we hold the mm_all_locks_mutex.
2627                  *
2628                  * We must however clear the bitflag before unlocking
2629                  * the vma so the users using the anon_vma->head will
2630                  * never see our bitflag.
2631                  *
2632                  * No need of atomic instructions here, head.next
2633                  * can't change from under us until we release the
2634                  * anon_vma->root->mutex.
2635                  */
2636                 if (!__test_and_clear_bit(0, (unsigned long *)
2637                                           &anon_vma->root->head.next))
2638                         BUG();
2639                 anon_vma_unlock(anon_vma);
2640         }
2641 }
2642
2643 static void vm_unlock_mapping(struct address_space *mapping)
2644 {
2645         if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2646                 /*
2647                  * AS_MM_ALL_LOCKS can't change to 0 from under us
2648                  * because we hold the mm_all_locks_mutex.
2649                  */
2650                 mutex_unlock(&mapping->i_mmap_mutex);
2651                 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
2652                                         &mapping->flags))
2653                         BUG();
2654         }
2655 }
2656
2657 /*
2658  * The mmap_sem cannot be released by the caller until
2659  * mm_drop_all_locks() returns.
2660  */
2661 void mm_drop_all_locks(struct mm_struct *mm)
2662 {
2663         struct vm_area_struct *vma;
2664         struct anon_vma_chain *avc;
2665
2666         BUG_ON(down_read_trylock(&mm->mmap_sem));
2667         BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
2668
2669         for (vma = mm->mmap; vma; vma = vma->vm_next) {
2670                 if (vma->anon_vma)
2671                         list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
2672                                 vm_unlock_anon_vma(avc->anon_vma);
2673                 if (vma->vm_file && vma->vm_file->f_mapping)
2674                         vm_unlock_mapping(vma->vm_file->f_mapping);
2675         }
2676
2677         mutex_unlock(&mm_all_locks_mutex);
2678 }
2679
2680 /*
2681  * initialise the VMA slab
2682  */
2683 void __init mmap_init(void)
2684 {
2685         int ret;
2686
2687         ret = percpu_counter_init(&vm_committed_as, 0);
2688         VM_BUG_ON(ret);
2689 }