tracing: Fix a possible race when disabling buffered events
[platform/kernel/linux-starfive.git] / mm / nommu.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/nommu.c
4  *
5  *  Replacement code for mm functions to support CPU's that don't
6  *  have any form of memory management unit (thus no virtual memory).
7  *
8  *  See Documentation/admin-guide/mm/nommu-mmap.rst
9  *
10  *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
11  *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
12  *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
13  *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
14  *  Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
15  */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/export.h>
20 #include <linux/mm.h>
21 #include <linux/sched/mm.h>
22 #include <linux/mman.h>
23 #include <linux/swap.h>
24 #include <linux/file.h>
25 #include <linux/highmem.h>
26 #include <linux/pagemap.h>
27 #include <linux/slab.h>
28 #include <linux/vmalloc.h>
29 #include <linux/backing-dev.h>
30 #include <linux/compiler.h>
31 #include <linux/mount.h>
32 #include <linux/personality.h>
33 #include <linux/security.h>
34 #include <linux/syscalls.h>
35 #include <linux/audit.h>
36 #include <linux/printk.h>
37
38 #include <linux/uaccess.h>
39 #include <asm/tlb.h>
40 #include <asm/tlbflush.h>
41 #include <asm/mmu_context.h>
42 #include "internal.h"
43
44 void *high_memory;
45 EXPORT_SYMBOL(high_memory);
46 struct page *mem_map;
47 unsigned long max_mapnr;
48 EXPORT_SYMBOL(max_mapnr);
49 unsigned long highest_memmap_pfn;
50 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
51 int heap_stack_gap = 0;
52
53 atomic_long_t mmap_pages_allocated;
54
55 EXPORT_SYMBOL(mem_map);
56
57 /* list of mapped, potentially shareable regions */
58 static struct kmem_cache *vm_region_jar;
59 struct rb_root nommu_region_tree = RB_ROOT;
60 DECLARE_RWSEM(nommu_region_sem);
61
62 const struct vm_operations_struct generic_file_vm_ops = {
63 };
64
65 /*
66  * Return the total memory allocated for this pointer, not
67  * just what the caller asked for.
68  *
69  * Doesn't have to be accurate, i.e. may have races.
70  */
71 unsigned int kobjsize(const void *objp)
72 {
73         struct page *page;
74
75         /*
76          * If the object we have should not have ksize performed on it,
77          * return size of 0
78          */
79         if (!objp || !virt_addr_valid(objp))
80                 return 0;
81
82         page = virt_to_head_page(objp);
83
84         /*
85          * If the allocator sets PageSlab, we know the pointer came from
86          * kmalloc().
87          */
88         if (PageSlab(page))
89                 return ksize(objp);
90
91         /*
92          * If it's not a compound page, see if we have a matching VMA
93          * region. This test is intentionally done in reverse order,
94          * so if there's no VMA, we still fall through and hand back
95          * PAGE_SIZE for 0-order pages.
96          */
97         if (!PageCompound(page)) {
98                 struct vm_area_struct *vma;
99
100                 vma = find_vma(current->mm, (unsigned long)objp);
101                 if (vma)
102                         return vma->vm_end - vma->vm_start;
103         }
104
105         /*
106          * The ksize() function is only guaranteed to work for pointers
107          * returned by kmalloc(). So handle arbitrary pointers here.
108          */
109         return page_size(page);
110 }
111
112 /**
113  * follow_pfn - look up PFN at a user virtual address
114  * @vma: memory mapping
115  * @address: user virtual address
116  * @pfn: location to store found PFN
117  *
118  * Only IO mappings and raw PFN mappings are allowed.
119  *
120  * Returns zero and the pfn at @pfn on success, -ve otherwise.
121  */
122 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
123         unsigned long *pfn)
124 {
125         if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
126                 return -EINVAL;
127
128         *pfn = address >> PAGE_SHIFT;
129         return 0;
130 }
131 EXPORT_SYMBOL(follow_pfn);
132
133 LIST_HEAD(vmap_area_list);
134
135 void vfree(const void *addr)
136 {
137         kfree(addr);
138 }
139 EXPORT_SYMBOL(vfree);
140
141 void *__vmalloc(unsigned long size, gfp_t gfp_mask)
142 {
143         /*
144          *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
145          * returns only a logical address.
146          */
147         return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
148 }
149 EXPORT_SYMBOL(__vmalloc);
150
151 void *__vmalloc_node_range(unsigned long size, unsigned long align,
152                 unsigned long start, unsigned long end, gfp_t gfp_mask,
153                 pgprot_t prot, unsigned long vm_flags, int node,
154                 const void *caller)
155 {
156         return __vmalloc(size, gfp_mask);
157 }
158
159 void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
160                 int node, const void *caller)
161 {
162         return __vmalloc(size, gfp_mask);
163 }
164
165 static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
166 {
167         void *ret;
168
169         ret = __vmalloc(size, flags);
170         if (ret) {
171                 struct vm_area_struct *vma;
172
173                 mmap_write_lock(current->mm);
174                 vma = find_vma(current->mm, (unsigned long)ret);
175                 if (vma)
176                         vma->vm_flags |= VM_USERMAP;
177                 mmap_write_unlock(current->mm);
178         }
179
180         return ret;
181 }
182
183 void *vmalloc_user(unsigned long size)
184 {
185         return __vmalloc_user_flags(size, GFP_KERNEL | __GFP_ZERO);
186 }
187 EXPORT_SYMBOL(vmalloc_user);
188
189 struct page *vmalloc_to_page(const void *addr)
190 {
191         return virt_to_page(addr);
192 }
193 EXPORT_SYMBOL(vmalloc_to_page);
194
195 unsigned long vmalloc_to_pfn(const void *addr)
196 {
197         return page_to_pfn(virt_to_page(addr));
198 }
199 EXPORT_SYMBOL(vmalloc_to_pfn);
200
201 long vread(char *buf, char *addr, unsigned long count)
202 {
203         /* Don't allow overflow */
204         if ((unsigned long) buf + count < count)
205                 count = -(unsigned long) buf;
206
207         memcpy(buf, addr, count);
208         return count;
209 }
210
211 /*
212  *      vmalloc  -  allocate virtually contiguous memory
213  *
214  *      @size:          allocation size
215  *
216  *      Allocate enough pages to cover @size from the page level
217  *      allocator and map them into contiguous kernel virtual space.
218  *
219  *      For tight control over page level allocator and protection flags
220  *      use __vmalloc() instead.
221  */
222 void *vmalloc(unsigned long size)
223 {
224         return __vmalloc(size, GFP_KERNEL);
225 }
226 EXPORT_SYMBOL(vmalloc);
227
228 void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc);
229
230 /*
231  *      vzalloc - allocate virtually contiguous memory with zero fill
232  *
233  *      @size:          allocation size
234  *
235  *      Allocate enough pages to cover @size from the page level
236  *      allocator and map them into contiguous kernel virtual space.
237  *      The memory allocated is set to zero.
238  *
239  *      For tight control over page level allocator and protection flags
240  *      use __vmalloc() instead.
241  */
242 void *vzalloc(unsigned long size)
243 {
244         return __vmalloc(size, GFP_KERNEL | __GFP_ZERO);
245 }
246 EXPORT_SYMBOL(vzalloc);
247
248 /**
249  * vmalloc_node - allocate memory on a specific node
250  * @size:       allocation size
251  * @node:       numa node
252  *
253  * Allocate enough pages to cover @size from the page level
254  * allocator and map them into contiguous kernel virtual space.
255  *
256  * For tight control over page level allocator and protection flags
257  * use __vmalloc() instead.
258  */
259 void *vmalloc_node(unsigned long size, int node)
260 {
261         return vmalloc(size);
262 }
263 EXPORT_SYMBOL(vmalloc_node);
264
265 /**
266  * vzalloc_node - allocate memory on a specific node with zero fill
267  * @size:       allocation size
268  * @node:       numa node
269  *
270  * Allocate enough pages to cover @size from the page level
271  * allocator and map them into contiguous kernel virtual space.
272  * The memory allocated is set to zero.
273  *
274  * For tight control over page level allocator and protection flags
275  * use __vmalloc() instead.
276  */
277 void *vzalloc_node(unsigned long size, int node)
278 {
279         return vzalloc(size);
280 }
281 EXPORT_SYMBOL(vzalloc_node);
282
283 /**
284  * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
285  *      @size:          allocation size
286  *
287  *      Allocate enough 32bit PA addressable pages to cover @size from the
288  *      page level allocator and map them into contiguous kernel virtual space.
289  */
290 void *vmalloc_32(unsigned long size)
291 {
292         return __vmalloc(size, GFP_KERNEL);
293 }
294 EXPORT_SYMBOL(vmalloc_32);
295
296 /**
297  * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
298  *      @size:          allocation size
299  *
300  * The resulting memory area is 32bit addressable and zeroed so it can be
301  * mapped to userspace without leaking data.
302  *
303  * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
304  * remap_vmalloc_range() are permissible.
305  */
306 void *vmalloc_32_user(unsigned long size)
307 {
308         /*
309          * We'll have to sort out the ZONE_DMA bits for 64-bit,
310          * but for now this can simply use vmalloc_user() directly.
311          */
312         return vmalloc_user(size);
313 }
314 EXPORT_SYMBOL(vmalloc_32_user);
315
316 void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
317 {
318         BUG();
319         return NULL;
320 }
321 EXPORT_SYMBOL(vmap);
322
323 void vunmap(const void *addr)
324 {
325         BUG();
326 }
327 EXPORT_SYMBOL(vunmap);
328
329 void *vm_map_ram(struct page **pages, unsigned int count, int node)
330 {
331         BUG();
332         return NULL;
333 }
334 EXPORT_SYMBOL(vm_map_ram);
335
336 void vm_unmap_ram(const void *mem, unsigned int count)
337 {
338         BUG();
339 }
340 EXPORT_SYMBOL(vm_unmap_ram);
341
342 void vm_unmap_aliases(void)
343 {
344 }
345 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
346
347 void free_vm_area(struct vm_struct *area)
348 {
349         BUG();
350 }
351 EXPORT_SYMBOL_GPL(free_vm_area);
352
353 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
354                    struct page *page)
355 {
356         return -EINVAL;
357 }
358 EXPORT_SYMBOL(vm_insert_page);
359
360 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
361                         unsigned long num)
362 {
363         return -EINVAL;
364 }
365 EXPORT_SYMBOL(vm_map_pages);
366
367 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
368                                 unsigned long num)
369 {
370         return -EINVAL;
371 }
372 EXPORT_SYMBOL(vm_map_pages_zero);
373
374 /*
375  *  sys_brk() for the most part doesn't need the global kernel
376  *  lock, except when an application is doing something nasty
377  *  like trying to un-brk an area that has already been mapped
378  *  to a regular file.  in this case, the unmapping will need
379  *  to invoke file system routines that need the global lock.
380  */
381 SYSCALL_DEFINE1(brk, unsigned long, brk)
382 {
383         struct mm_struct *mm = current->mm;
384
385         if (brk < mm->start_brk || brk > mm->context.end_brk)
386                 return mm->brk;
387
388         if (mm->brk == brk)
389                 return mm->brk;
390
391         /*
392          * Always allow shrinking brk
393          */
394         if (brk <= mm->brk) {
395                 mm->brk = brk;
396                 return brk;
397         }
398
399         /*
400          * Ok, looks good - let it rip.
401          */
402         flush_icache_user_range(mm->brk, brk);
403         return mm->brk = brk;
404 }
405
406 /*
407  * initialise the percpu counter for VM and region record slabs
408  */
409 void __init mmap_init(void)
410 {
411         int ret;
412
413         ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
414         VM_BUG_ON(ret);
415         vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC|SLAB_ACCOUNT);
416 }
417
418 /*
419  * validate the region tree
420  * - the caller must hold the region lock
421  */
422 #ifdef CONFIG_DEBUG_NOMMU_REGIONS
423 static noinline void validate_nommu_regions(void)
424 {
425         struct vm_region *region, *last;
426         struct rb_node *p, *lastp;
427
428         lastp = rb_first(&nommu_region_tree);
429         if (!lastp)
430                 return;
431
432         last = rb_entry(lastp, struct vm_region, vm_rb);
433         BUG_ON(last->vm_end <= last->vm_start);
434         BUG_ON(last->vm_top < last->vm_end);
435
436         while ((p = rb_next(lastp))) {
437                 region = rb_entry(p, struct vm_region, vm_rb);
438                 last = rb_entry(lastp, struct vm_region, vm_rb);
439
440                 BUG_ON(region->vm_end <= region->vm_start);
441                 BUG_ON(region->vm_top < region->vm_end);
442                 BUG_ON(region->vm_start < last->vm_top);
443
444                 lastp = p;
445         }
446 }
447 #else
448 static void validate_nommu_regions(void)
449 {
450 }
451 #endif
452
453 /*
454  * add a region into the global tree
455  */
456 static void add_nommu_region(struct vm_region *region)
457 {
458         struct vm_region *pregion;
459         struct rb_node **p, *parent;
460
461         validate_nommu_regions();
462
463         parent = NULL;
464         p = &nommu_region_tree.rb_node;
465         while (*p) {
466                 parent = *p;
467                 pregion = rb_entry(parent, struct vm_region, vm_rb);
468                 if (region->vm_start < pregion->vm_start)
469                         p = &(*p)->rb_left;
470                 else if (region->vm_start > pregion->vm_start)
471                         p = &(*p)->rb_right;
472                 else if (pregion == region)
473                         return;
474                 else
475                         BUG();
476         }
477
478         rb_link_node(&region->vm_rb, parent, p);
479         rb_insert_color(&region->vm_rb, &nommu_region_tree);
480
481         validate_nommu_regions();
482 }
483
484 /*
485  * delete a region from the global tree
486  */
487 static void delete_nommu_region(struct vm_region *region)
488 {
489         BUG_ON(!nommu_region_tree.rb_node);
490
491         validate_nommu_regions();
492         rb_erase(&region->vm_rb, &nommu_region_tree);
493         validate_nommu_regions();
494 }
495
496 /*
497  * free a contiguous series of pages
498  */
499 static void free_page_series(unsigned long from, unsigned long to)
500 {
501         for (; from < to; from += PAGE_SIZE) {
502                 struct page *page = virt_to_page((void *)from);
503
504                 atomic_long_dec(&mmap_pages_allocated);
505                 put_page(page);
506         }
507 }
508
509 /*
510  * release a reference to a region
511  * - the caller must hold the region semaphore for writing, which this releases
512  * - the region may not have been added to the tree yet, in which case vm_top
513  *   will equal vm_start
514  */
515 static void __put_nommu_region(struct vm_region *region)
516         __releases(nommu_region_sem)
517 {
518         BUG_ON(!nommu_region_tree.rb_node);
519
520         if (--region->vm_usage == 0) {
521                 if (region->vm_top > region->vm_start)
522                         delete_nommu_region(region);
523                 up_write(&nommu_region_sem);
524
525                 if (region->vm_file)
526                         fput(region->vm_file);
527
528                 /* IO memory and memory shared directly out of the pagecache
529                  * from ramfs/tmpfs mustn't be released here */
530                 if (region->vm_flags & VM_MAPPED_COPY)
531                         free_page_series(region->vm_start, region->vm_top);
532                 kmem_cache_free(vm_region_jar, region);
533         } else {
534                 up_write(&nommu_region_sem);
535         }
536 }
537
538 /*
539  * release a reference to a region
540  */
541 static void put_nommu_region(struct vm_region *region)
542 {
543         down_write(&nommu_region_sem);
544         __put_nommu_region(region);
545 }
546
547 void vma_mas_store(struct vm_area_struct *vma, struct ma_state *mas)
548 {
549         mas_set_range(mas, vma->vm_start, vma->vm_end - 1);
550         mas_store_prealloc(mas, vma);
551 }
552
553 void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas)
554 {
555         mas->index = vma->vm_start;
556         mas->last = vma->vm_end - 1;
557         mas_store_prealloc(mas, NULL);
558 }
559
560 static void setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm)
561 {
562         vma->vm_mm = mm;
563
564         /* add the VMA to the mapping */
565         if (vma->vm_file) {
566                 struct address_space *mapping = vma->vm_file->f_mapping;
567
568                 i_mmap_lock_write(mapping);
569                 flush_dcache_mmap_lock(mapping);
570                 vma_interval_tree_insert(vma, &mapping->i_mmap);
571                 flush_dcache_mmap_unlock(mapping);
572                 i_mmap_unlock_write(mapping);
573         }
574 }
575
576 /*
577  * mas_add_vma_to_mm() - Maple state variant of add_mas_to_mm().
578  * @mas: The maple state with preallocations.
579  * @mm: The mm_struct
580  * @vma: The vma to add
581  *
582  */
583 static void mas_add_vma_to_mm(struct ma_state *mas, struct mm_struct *mm,
584                               struct vm_area_struct *vma)
585 {
586         BUG_ON(!vma->vm_region);
587
588         setup_vma_to_mm(vma, mm);
589         mm->map_count++;
590
591         /* add the VMA to the tree */
592         vma_mas_store(vma, mas);
593 }
594
595 /*
596  * add a VMA into a process's mm_struct in the appropriate place in the list
597  * and tree and add to the address space's page tree also if not an anonymous
598  * page
599  * - should be called with mm->mmap_lock held writelocked
600  */
601 static int add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
602 {
603         MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_end);
604
605         if (mas_preallocate(&mas, vma, GFP_KERNEL)) {
606                 pr_warn("Allocation of vma tree for process %d failed\n",
607                        current->pid);
608                 return -ENOMEM;
609         }
610         mas_add_vma_to_mm(&mas, mm, vma);
611         return 0;
612 }
613
614 static void cleanup_vma_from_mm(struct vm_area_struct *vma)
615 {
616         vma->vm_mm->map_count--;
617         /* remove the VMA from the mapping */
618         if (vma->vm_file) {
619                 struct address_space *mapping;
620                 mapping = vma->vm_file->f_mapping;
621
622                 i_mmap_lock_write(mapping);
623                 flush_dcache_mmap_lock(mapping);
624                 vma_interval_tree_remove(vma, &mapping->i_mmap);
625                 flush_dcache_mmap_unlock(mapping);
626                 i_mmap_unlock_write(mapping);
627         }
628 }
629 /*
630  * delete a VMA from its owning mm_struct and address space
631  */
632 static int delete_vma_from_mm(struct vm_area_struct *vma)
633 {
634         MA_STATE(mas, &vma->vm_mm->mm_mt, 0, 0);
635
636         if (mas_preallocate(&mas, vma, GFP_KERNEL)) {
637                 pr_warn("Allocation of vma tree for process %d failed\n",
638                        current->pid);
639                 return -ENOMEM;
640         }
641         cleanup_vma_from_mm(vma);
642
643         /* remove from the MM's tree and list */
644         vma_mas_remove(vma, &mas);
645         return 0;
646 }
647
648 /*
649  * destroy a VMA record
650  */
651 static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
652 {
653         if (vma->vm_ops && vma->vm_ops->close)
654                 vma->vm_ops->close(vma);
655         if (vma->vm_file)
656                 fput(vma->vm_file);
657         put_nommu_region(vma->vm_region);
658         vm_area_free(vma);
659 }
660
661 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
662                                              unsigned long start_addr,
663                                              unsigned long end_addr)
664 {
665         unsigned long index = start_addr;
666
667         mmap_assert_locked(mm);
668         return mt_find(&mm->mm_mt, &index, end_addr - 1);
669 }
670 EXPORT_SYMBOL(find_vma_intersection);
671
672 /*
673  * look up the first VMA in which addr resides, NULL if none
674  * - should be called with mm->mmap_lock at least held readlocked
675  */
676 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
677 {
678         MA_STATE(mas, &mm->mm_mt, addr, addr);
679
680         return mas_walk(&mas);
681 }
682 EXPORT_SYMBOL(find_vma);
683
684 /*
685  * At least xtensa ends up having protection faults even with no
686  * MMU.. No stack expansion, at least.
687  */
688 struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
689                         unsigned long addr, struct pt_regs *regs)
690 {
691         struct vm_area_struct *vma;
692
693         mmap_read_lock(mm);
694         vma = vma_lookup(mm, addr);
695         if (!vma)
696                 mmap_read_unlock(mm);
697         return vma;
698 }
699
700 /*
701  * expand a stack to a given address
702  * - not supported under NOMMU conditions
703  */
704 int expand_stack_locked(struct vm_area_struct *vma, unsigned long addr)
705 {
706         return -ENOMEM;
707 }
708
709 struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
710 {
711         mmap_read_unlock(mm);
712         return NULL;
713 }
714
715 /*
716  * look up the first VMA exactly that exactly matches addr
717  * - should be called with mm->mmap_lock at least held readlocked
718  */
719 static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
720                                              unsigned long addr,
721                                              unsigned long len)
722 {
723         struct vm_area_struct *vma;
724         unsigned long end = addr + len;
725         MA_STATE(mas, &mm->mm_mt, addr, addr);
726
727         vma = mas_walk(&mas);
728         if (!vma)
729                 return NULL;
730         if (vma->vm_start != addr)
731                 return NULL;
732         if (vma->vm_end != end)
733                 return NULL;
734
735         return vma;
736 }
737
738 /*
739  * determine whether a mapping should be permitted and, if so, what sort of
740  * mapping we're capable of supporting
741  */
742 static int validate_mmap_request(struct file *file,
743                                  unsigned long addr,
744                                  unsigned long len,
745                                  unsigned long prot,
746                                  unsigned long flags,
747                                  unsigned long pgoff,
748                                  unsigned long *_capabilities)
749 {
750         unsigned long capabilities, rlen;
751         int ret;
752
753         /* do the simple checks first */
754         if (flags & MAP_FIXED)
755                 return -EINVAL;
756
757         if ((flags & MAP_TYPE) != MAP_PRIVATE &&
758             (flags & MAP_TYPE) != MAP_SHARED)
759                 return -EINVAL;
760
761         if (!len)
762                 return -EINVAL;
763
764         /* Careful about overflows.. */
765         rlen = PAGE_ALIGN(len);
766         if (!rlen || rlen > TASK_SIZE)
767                 return -ENOMEM;
768
769         /* offset overflow? */
770         if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
771                 return -EOVERFLOW;
772
773         if (file) {
774                 /* files must support mmap */
775                 if (!file->f_op->mmap)
776                         return -ENODEV;
777
778                 /* work out if what we've got could possibly be shared
779                  * - we support chardevs that provide their own "memory"
780                  * - we support files/blockdevs that are memory backed
781                  */
782                 if (file->f_op->mmap_capabilities) {
783                         capabilities = file->f_op->mmap_capabilities(file);
784                 } else {
785                         /* no explicit capabilities set, so assume some
786                          * defaults */
787                         switch (file_inode(file)->i_mode & S_IFMT) {
788                         case S_IFREG:
789                         case S_IFBLK:
790                                 capabilities = NOMMU_MAP_COPY;
791                                 break;
792
793                         case S_IFCHR:
794                                 capabilities =
795                                         NOMMU_MAP_DIRECT |
796                                         NOMMU_MAP_READ |
797                                         NOMMU_MAP_WRITE;
798                                 break;
799
800                         default:
801                                 return -EINVAL;
802                         }
803                 }
804
805                 /* eliminate any capabilities that we can't support on this
806                  * device */
807                 if (!file->f_op->get_unmapped_area)
808                         capabilities &= ~NOMMU_MAP_DIRECT;
809                 if (!(file->f_mode & FMODE_CAN_READ))
810                         capabilities &= ~NOMMU_MAP_COPY;
811
812                 /* The file shall have been opened with read permission. */
813                 if (!(file->f_mode & FMODE_READ))
814                         return -EACCES;
815
816                 if (flags & MAP_SHARED) {
817                         /* do checks for writing, appending and locking */
818                         if ((prot & PROT_WRITE) &&
819                             !(file->f_mode & FMODE_WRITE))
820                                 return -EACCES;
821
822                         if (IS_APPEND(file_inode(file)) &&
823                             (file->f_mode & FMODE_WRITE))
824                                 return -EACCES;
825
826                         if (!(capabilities & NOMMU_MAP_DIRECT))
827                                 return -ENODEV;
828
829                         /* we mustn't privatise shared mappings */
830                         capabilities &= ~NOMMU_MAP_COPY;
831                 } else {
832                         /* we're going to read the file into private memory we
833                          * allocate */
834                         if (!(capabilities & NOMMU_MAP_COPY))
835                                 return -ENODEV;
836
837                         /* we don't permit a private writable mapping to be
838                          * shared with the backing device */
839                         if (prot & PROT_WRITE)
840                                 capabilities &= ~NOMMU_MAP_DIRECT;
841                 }
842
843                 if (capabilities & NOMMU_MAP_DIRECT) {
844                         if (((prot & PROT_READ)  && !(capabilities & NOMMU_MAP_READ))  ||
845                             ((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) ||
846                             ((prot & PROT_EXEC)  && !(capabilities & NOMMU_MAP_EXEC))
847                             ) {
848                                 capabilities &= ~NOMMU_MAP_DIRECT;
849                                 if (flags & MAP_SHARED) {
850                                         pr_warn("MAP_SHARED not completely supported on !MMU\n");
851                                         return -EINVAL;
852                                 }
853                         }
854                 }
855
856                 /* handle executable mappings and implied executable
857                  * mappings */
858                 if (path_noexec(&file->f_path)) {
859                         if (prot & PROT_EXEC)
860                                 return -EPERM;
861                 } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
862                         /* handle implication of PROT_EXEC by PROT_READ */
863                         if (current->personality & READ_IMPLIES_EXEC) {
864                                 if (capabilities & NOMMU_MAP_EXEC)
865                                         prot |= PROT_EXEC;
866                         }
867                 } else if ((prot & PROT_READ) &&
868                          (prot & PROT_EXEC) &&
869                          !(capabilities & NOMMU_MAP_EXEC)
870                          ) {
871                         /* backing file is not executable, try to copy */
872                         capabilities &= ~NOMMU_MAP_DIRECT;
873                 }
874         } else {
875                 /* anonymous mappings are always memory backed and can be
876                  * privately mapped
877                  */
878                 capabilities = NOMMU_MAP_COPY;
879
880                 /* handle PROT_EXEC implication by PROT_READ */
881                 if ((prot & PROT_READ) &&
882                     (current->personality & READ_IMPLIES_EXEC))
883                         prot |= PROT_EXEC;
884         }
885
886         /* allow the security API to have its say */
887         ret = security_mmap_addr(addr);
888         if (ret < 0)
889                 return ret;
890
891         /* looks okay */
892         *_capabilities = capabilities;
893         return 0;
894 }
895
896 /*
897  * we've determined that we can make the mapping, now translate what we
898  * now know into VMA flags
899  */
900 static unsigned long determine_vm_flags(struct file *file,
901                                         unsigned long prot,
902                                         unsigned long flags,
903                                         unsigned long capabilities)
904 {
905         unsigned long vm_flags;
906
907         vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(flags);
908         /* vm_flags |= mm->def_flags; */
909
910         if (!(capabilities & NOMMU_MAP_DIRECT)) {
911                 /* attempt to share read-only copies of mapped file chunks */
912                 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
913                 if (file && !(prot & PROT_WRITE))
914                         vm_flags |= VM_MAYSHARE;
915         } else {
916                 /* overlay a shareable mapping on the backing device or inode
917                  * if possible - used for chardevs, ramfs/tmpfs/shmfs and
918                  * romfs/cramfs */
919                 vm_flags |= VM_MAYSHARE | (capabilities & NOMMU_VMFLAGS);
920                 if (flags & MAP_SHARED)
921                         vm_flags |= VM_SHARED;
922         }
923
924         /* refuse to let anyone share private mappings with this process if
925          * it's being traced - otherwise breakpoints set in it may interfere
926          * with another untraced process
927          */
928         if ((flags & MAP_PRIVATE) && current->ptrace)
929                 vm_flags &= ~VM_MAYSHARE;
930
931         return vm_flags;
932 }
933
934 /*
935  * set up a shared mapping on a file (the driver or filesystem provides and
936  * pins the storage)
937  */
938 static int do_mmap_shared_file(struct vm_area_struct *vma)
939 {
940         int ret;
941
942         ret = call_mmap(vma->vm_file, vma);
943         if (ret == 0) {
944                 vma->vm_region->vm_top = vma->vm_region->vm_end;
945                 return 0;
946         }
947         if (ret != -ENOSYS)
948                 return ret;
949
950         /* getting -ENOSYS indicates that direct mmap isn't possible (as
951          * opposed to tried but failed) so we can only give a suitable error as
952          * it's not possible to make a private copy if MAP_SHARED was given */
953         return -ENODEV;
954 }
955
956 /*
957  * set up a private mapping or an anonymous shared mapping
958  */
959 static int do_mmap_private(struct vm_area_struct *vma,
960                            struct vm_region *region,
961                            unsigned long len,
962                            unsigned long capabilities)
963 {
964         unsigned long total, point;
965         void *base;
966         int ret, order;
967
968         /* invoke the file's mapping function so that it can keep track of
969          * shared mappings on devices or memory
970          * - VM_MAYSHARE will be set if it may attempt to share
971          */
972         if (capabilities & NOMMU_MAP_DIRECT) {
973                 ret = call_mmap(vma->vm_file, vma);
974                 if (ret == 0) {
975                         /* shouldn't return success if we're not sharing */
976                         BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
977                         vma->vm_region->vm_top = vma->vm_region->vm_end;
978                         return 0;
979                 }
980                 if (ret != -ENOSYS)
981                         return ret;
982
983                 /* getting an ENOSYS error indicates that direct mmap isn't
984                  * possible (as opposed to tried but failed) so we'll try to
985                  * make a private copy of the data and map that instead */
986         }
987
988
989         /* allocate some memory to hold the mapping
990          * - note that this may not return a page-aligned address if the object
991          *   we're allocating is smaller than a page
992          */
993         order = get_order(len);
994         total = 1 << order;
995         point = len >> PAGE_SHIFT;
996
997         /* we don't want to allocate a power-of-2 sized page set */
998         if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages)
999                 total = point;
1000
1001         base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL);
1002         if (!base)
1003                 goto enomem;
1004
1005         atomic_long_add(total, &mmap_pages_allocated);
1006
1007         region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
1008         region->vm_start = (unsigned long) base;
1009         region->vm_end   = region->vm_start + len;
1010         region->vm_top   = region->vm_start + (total << PAGE_SHIFT);
1011
1012         vma->vm_start = region->vm_start;
1013         vma->vm_end   = region->vm_start + len;
1014
1015         if (vma->vm_file) {
1016                 /* read the contents of a file into the copy */
1017                 loff_t fpos;
1018
1019                 fpos = vma->vm_pgoff;
1020                 fpos <<= PAGE_SHIFT;
1021
1022                 ret = kernel_read(vma->vm_file, base, len, &fpos);
1023                 if (ret < 0)
1024                         goto error_free;
1025
1026                 /* clear the last little bit */
1027                 if (ret < len)
1028                         memset(base + ret, 0, len - ret);
1029
1030         } else {
1031                 vma_set_anonymous(vma);
1032         }
1033
1034         return 0;
1035
1036 error_free:
1037         free_page_series(region->vm_start, region->vm_top);
1038         region->vm_start = vma->vm_start = 0;
1039         region->vm_end   = vma->vm_end = 0;
1040         region->vm_top   = 0;
1041         return ret;
1042
1043 enomem:
1044         pr_err("Allocation of length %lu from process %d (%s) failed\n",
1045                len, current->pid, current->comm);
1046         show_free_areas(0, NULL);
1047         return -ENOMEM;
1048 }
1049
1050 /*
1051  * handle mapping creation for uClinux
1052  */
1053 unsigned long do_mmap(struct file *file,
1054                         unsigned long addr,
1055                         unsigned long len,
1056                         unsigned long prot,
1057                         unsigned long flags,
1058                         unsigned long pgoff,
1059                         unsigned long *populate,
1060                         struct list_head *uf)
1061 {
1062         struct vm_area_struct *vma;
1063         struct vm_region *region;
1064         struct rb_node *rb;
1065         vm_flags_t vm_flags;
1066         unsigned long capabilities, result;
1067         int ret;
1068         MA_STATE(mas, &current->mm->mm_mt, 0, 0);
1069
1070         *populate = 0;
1071
1072         /* decide whether we should attempt the mapping, and if so what sort of
1073          * mapping */
1074         ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
1075                                     &capabilities);
1076         if (ret < 0)
1077                 return ret;
1078
1079         /* we ignore the address hint */
1080         addr = 0;
1081         len = PAGE_ALIGN(len);
1082
1083         /* we've determined that we can make the mapping, now translate what we
1084          * now know into VMA flags */
1085         vm_flags = determine_vm_flags(file, prot, flags, capabilities);
1086
1087
1088         /* we're going to need to record the mapping */
1089         region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1090         if (!region)
1091                 goto error_getting_region;
1092
1093         vma = vm_area_alloc(current->mm);
1094         if (!vma)
1095                 goto error_getting_vma;
1096
1097         if (mas_preallocate(&mas, vma, GFP_KERNEL))
1098                 goto error_maple_preallocate;
1099
1100         region->vm_usage = 1;
1101         region->vm_flags = vm_flags;
1102         region->vm_pgoff = pgoff;
1103
1104         vma->vm_flags = vm_flags;
1105         vma->vm_pgoff = pgoff;
1106
1107         if (file) {
1108                 region->vm_file = get_file(file);
1109                 vma->vm_file = get_file(file);
1110         }
1111
1112         down_write(&nommu_region_sem);
1113
1114         /* if we want to share, we need to check for regions created by other
1115          * mmap() calls that overlap with our proposed mapping
1116          * - we can only share with a superset match on most regular files
1117          * - shared mappings on character devices and memory backed files are
1118          *   permitted to overlap inexactly as far as we are concerned for in
1119          *   these cases, sharing is handled in the driver or filesystem rather
1120          *   than here
1121          */
1122         if (vm_flags & VM_MAYSHARE) {
1123                 struct vm_region *pregion;
1124                 unsigned long pglen, rpglen, pgend, rpgend, start;
1125
1126                 pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1127                 pgend = pgoff + pglen;
1128
1129                 for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
1130                         pregion = rb_entry(rb, struct vm_region, vm_rb);
1131
1132                         if (!(pregion->vm_flags & VM_MAYSHARE))
1133                                 continue;
1134
1135                         /* search for overlapping mappings on the same file */
1136                         if (file_inode(pregion->vm_file) !=
1137                             file_inode(file))
1138                                 continue;
1139
1140                         if (pregion->vm_pgoff >= pgend)
1141                                 continue;
1142
1143                         rpglen = pregion->vm_end - pregion->vm_start;
1144                         rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1145                         rpgend = pregion->vm_pgoff + rpglen;
1146                         if (pgoff >= rpgend)
1147                                 continue;
1148
1149                         /* handle inexactly overlapping matches between
1150                          * mappings */
1151                         if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1152                             !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1153                                 /* new mapping is not a subset of the region */
1154                                 if (!(capabilities & NOMMU_MAP_DIRECT))
1155                                         goto sharing_violation;
1156                                 continue;
1157                         }
1158
1159                         /* we've found a region we can share */
1160                         pregion->vm_usage++;
1161                         vma->vm_region = pregion;
1162                         start = pregion->vm_start;
1163                         start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
1164                         vma->vm_start = start;
1165                         vma->vm_end = start + len;
1166
1167                         if (pregion->vm_flags & VM_MAPPED_COPY)
1168                                 vma->vm_flags |= VM_MAPPED_COPY;
1169                         else {
1170                                 ret = do_mmap_shared_file(vma);
1171                                 if (ret < 0) {
1172                                         vma->vm_region = NULL;
1173                                         vma->vm_start = 0;
1174                                         vma->vm_end = 0;
1175                                         pregion->vm_usage--;
1176                                         pregion = NULL;
1177                                         goto error_just_free;
1178                                 }
1179                         }
1180                         fput(region->vm_file);
1181                         kmem_cache_free(vm_region_jar, region);
1182                         region = pregion;
1183                         result = start;
1184                         goto share;
1185                 }
1186
1187                 /* obtain the address at which to make a shared mapping
1188                  * - this is the hook for quasi-memory character devices to
1189                  *   tell us the location of a shared mapping
1190                  */
1191                 if (capabilities & NOMMU_MAP_DIRECT) {
1192                         addr = file->f_op->get_unmapped_area(file, addr, len,
1193                                                              pgoff, flags);
1194                         if (IS_ERR_VALUE(addr)) {
1195                                 ret = addr;
1196                                 if (ret != -ENOSYS)
1197                                         goto error_just_free;
1198
1199                                 /* the driver refused to tell us where to site
1200                                  * the mapping so we'll have to attempt to copy
1201                                  * it */
1202                                 ret = -ENODEV;
1203                                 if (!(capabilities & NOMMU_MAP_COPY))
1204                                         goto error_just_free;
1205
1206                                 capabilities &= ~NOMMU_MAP_DIRECT;
1207                         } else {
1208                                 vma->vm_start = region->vm_start = addr;
1209                                 vma->vm_end = region->vm_end = addr + len;
1210                         }
1211                 }
1212         }
1213
1214         vma->vm_region = region;
1215
1216         /* set up the mapping
1217          * - the region is filled in if NOMMU_MAP_DIRECT is still set
1218          */
1219         if (file && vma->vm_flags & VM_SHARED)
1220                 ret = do_mmap_shared_file(vma);
1221         else
1222                 ret = do_mmap_private(vma, region, len, capabilities);
1223         if (ret < 0)
1224                 goto error_just_free;
1225         add_nommu_region(region);
1226
1227         /* clear anonymous mappings that don't ask for uninitialized data */
1228         if (!vma->vm_file &&
1229             (!IS_ENABLED(CONFIG_MMAP_ALLOW_UNINITIALIZED) ||
1230              !(flags & MAP_UNINITIALIZED)))
1231                 memset((void *)region->vm_start, 0,
1232                        region->vm_end - region->vm_start);
1233
1234         /* okay... we have a mapping; now we have to register it */
1235         result = vma->vm_start;
1236
1237         current->mm->total_vm += len >> PAGE_SHIFT;
1238
1239 share:
1240         mas_add_vma_to_mm(&mas, current->mm, vma);
1241
1242         /* we flush the region from the icache only when the first executable
1243          * mapping of it is made  */
1244         if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1245                 flush_icache_user_range(region->vm_start, region->vm_end);
1246                 region->vm_icache_flushed = true;
1247         }
1248
1249         up_write(&nommu_region_sem);
1250
1251         return result;
1252
1253 error_just_free:
1254         up_write(&nommu_region_sem);
1255 error:
1256         mas_destroy(&mas);
1257         if (region->vm_file)
1258                 fput(region->vm_file);
1259         kmem_cache_free(vm_region_jar, region);
1260         if (vma->vm_file)
1261                 fput(vma->vm_file);
1262         vm_area_free(vma);
1263         return ret;
1264
1265 sharing_violation:
1266         up_write(&nommu_region_sem);
1267         pr_warn("Attempt to share mismatched mappings\n");
1268         ret = -EINVAL;
1269         goto error;
1270
1271 error_getting_vma:
1272         kmem_cache_free(vm_region_jar, region);
1273         pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
1274                         len, current->pid);
1275         show_free_areas(0, NULL);
1276         return -ENOMEM;
1277
1278 error_getting_region:
1279         pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
1280                         len, current->pid);
1281         show_free_areas(0, NULL);
1282         return -ENOMEM;
1283
1284 error_maple_preallocate:
1285         kmem_cache_free(vm_region_jar, region);
1286         vm_area_free(vma);
1287         pr_warn("Allocation of vma tree for process %d failed\n", current->pid);
1288         show_free_areas(0, NULL);
1289         return -ENOMEM;
1290
1291 }
1292
1293 unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
1294                               unsigned long prot, unsigned long flags,
1295                               unsigned long fd, unsigned long pgoff)
1296 {
1297         struct file *file = NULL;
1298         unsigned long retval = -EBADF;
1299
1300         audit_mmap_fd(fd, flags);
1301         if (!(flags & MAP_ANONYMOUS)) {
1302                 file = fget(fd);
1303                 if (!file)
1304                         goto out;
1305         }
1306
1307         retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1308
1309         if (file)
1310                 fput(file);
1311 out:
1312         return retval;
1313 }
1314
1315 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1316                 unsigned long, prot, unsigned long, flags,
1317                 unsigned long, fd, unsigned long, pgoff)
1318 {
1319         return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
1320 }
1321
1322 #ifdef __ARCH_WANT_SYS_OLD_MMAP
1323 struct mmap_arg_struct {
1324         unsigned long addr;
1325         unsigned long len;
1326         unsigned long prot;
1327         unsigned long flags;
1328         unsigned long fd;
1329         unsigned long offset;
1330 };
1331
1332 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1333 {
1334         struct mmap_arg_struct a;
1335
1336         if (copy_from_user(&a, arg, sizeof(a)))
1337                 return -EFAULT;
1338         if (offset_in_page(a.offset))
1339                 return -EINVAL;
1340
1341         return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1342                                a.offset >> PAGE_SHIFT);
1343 }
1344 #endif /* __ARCH_WANT_SYS_OLD_MMAP */
1345
1346 /*
1347  * split a vma into two pieces at address 'addr', a new vma is allocated either
1348  * for the first part or the tail.
1349  */
1350 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
1351               unsigned long addr, int new_below)
1352 {
1353         struct vm_area_struct *new;
1354         struct vm_region *region;
1355         unsigned long npages;
1356         MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_end);
1357
1358         /* we're only permitted to split anonymous regions (these should have
1359          * only a single usage on the region) */
1360         if (vma->vm_file)
1361                 return -ENOMEM;
1362
1363         mm = vma->vm_mm;
1364         if (mm->map_count >= sysctl_max_map_count)
1365                 return -ENOMEM;
1366
1367         region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1368         if (!region)
1369                 return -ENOMEM;
1370
1371         new = vm_area_dup(vma);
1372         if (!new)
1373                 goto err_vma_dup;
1374
1375         if (mas_preallocate(&mas, vma, GFP_KERNEL)) {
1376                 pr_warn("Allocation of vma tree for process %d failed\n",
1377                         current->pid);
1378                 goto err_mas_preallocate;
1379         }
1380
1381         /* most fields are the same, copy all, and then fixup */
1382         *region = *vma->vm_region;
1383         new->vm_region = region;
1384
1385         npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1386
1387         if (new_below) {
1388                 region->vm_top = region->vm_end = new->vm_end = addr;
1389         } else {
1390                 region->vm_start = new->vm_start = addr;
1391                 region->vm_pgoff = new->vm_pgoff += npages;
1392         }
1393
1394         if (new->vm_ops && new->vm_ops->open)
1395                 new->vm_ops->open(new);
1396
1397         down_write(&nommu_region_sem);
1398         delete_nommu_region(vma->vm_region);
1399         if (new_below) {
1400                 vma->vm_region->vm_start = vma->vm_start = addr;
1401                 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1402         } else {
1403                 vma->vm_region->vm_end = vma->vm_end = addr;
1404                 vma->vm_region->vm_top = addr;
1405         }
1406         add_nommu_region(vma->vm_region);
1407         add_nommu_region(new->vm_region);
1408         up_write(&nommu_region_sem);
1409
1410         setup_vma_to_mm(vma, mm);
1411         setup_vma_to_mm(new, mm);
1412         mas_set_range(&mas, vma->vm_start, vma->vm_end - 1);
1413         mas_store(&mas, vma);
1414         vma_mas_store(new, &mas);
1415         mm->map_count++;
1416         return 0;
1417
1418 err_mas_preallocate:
1419         vm_area_free(new);
1420 err_vma_dup:
1421         kmem_cache_free(vm_region_jar, region);
1422         return -ENOMEM;
1423 }
1424
1425 /*
1426  * shrink a VMA by removing the specified chunk from either the beginning or
1427  * the end
1428  */
1429 static int shrink_vma(struct mm_struct *mm,
1430                       struct vm_area_struct *vma,
1431                       unsigned long from, unsigned long to)
1432 {
1433         struct vm_region *region;
1434
1435         /* adjust the VMA's pointers, which may reposition it in the MM's tree
1436          * and list */
1437         if (delete_vma_from_mm(vma))
1438                 return -ENOMEM;
1439         if (from > vma->vm_start)
1440                 vma->vm_end = from;
1441         else
1442                 vma->vm_start = to;
1443         if (add_vma_to_mm(mm, vma))
1444                 return -ENOMEM;
1445
1446         /* cut the backing region down to size */
1447         region = vma->vm_region;
1448         BUG_ON(region->vm_usage != 1);
1449
1450         down_write(&nommu_region_sem);
1451         delete_nommu_region(region);
1452         if (from > region->vm_start) {
1453                 to = region->vm_top;
1454                 region->vm_top = region->vm_end = from;
1455         } else {
1456                 region->vm_start = to;
1457         }
1458         add_nommu_region(region);
1459         up_write(&nommu_region_sem);
1460
1461         free_page_series(from, to);
1462         return 0;
1463 }
1464
1465 /*
1466  * release a mapping
1467  * - under NOMMU conditions the chunk to be unmapped must be backed by a single
1468  *   VMA, though it need not cover the whole VMA
1469  */
1470 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf)
1471 {
1472         MA_STATE(mas, &mm->mm_mt, start, start);
1473         struct vm_area_struct *vma;
1474         unsigned long end;
1475         int ret = 0;
1476
1477         len = PAGE_ALIGN(len);
1478         if (len == 0)
1479                 return -EINVAL;
1480
1481         end = start + len;
1482
1483         /* find the first potentially overlapping VMA */
1484         vma = mas_find(&mas, end - 1);
1485         if (!vma) {
1486                 static int limit;
1487                 if (limit < 5) {
1488                         pr_warn("munmap of memory not mmapped by process %d (%s): 0x%lx-0x%lx\n",
1489                                         current->pid, current->comm,
1490                                         start, start + len - 1);
1491                         limit++;
1492                 }
1493                 return -EINVAL;
1494         }
1495
1496         /* we're allowed to split an anonymous VMA but not a file-backed one */
1497         if (vma->vm_file) {
1498                 do {
1499                         if (start > vma->vm_start)
1500                                 return -EINVAL;
1501                         if (end == vma->vm_end)
1502                                 goto erase_whole_vma;
1503                         vma = mas_next(&mas, end - 1);
1504                 } while (vma);
1505                 return -EINVAL;
1506         } else {
1507                 /* the chunk must be a subset of the VMA found */
1508                 if (start == vma->vm_start && end == vma->vm_end)
1509                         goto erase_whole_vma;
1510                 if (start < vma->vm_start || end > vma->vm_end)
1511                         return -EINVAL;
1512                 if (offset_in_page(start))
1513                         return -EINVAL;
1514                 if (end != vma->vm_end && offset_in_page(end))
1515                         return -EINVAL;
1516                 if (start != vma->vm_start && end != vma->vm_end) {
1517                         ret = split_vma(mm, vma, start, 1);
1518                         if (ret < 0)
1519                                 return ret;
1520                 }
1521                 return shrink_vma(mm, vma, start, end);
1522         }
1523
1524 erase_whole_vma:
1525         if (delete_vma_from_mm(vma))
1526                 ret = -ENOMEM;
1527         else
1528                 delete_vma(mm, vma);
1529         return ret;
1530 }
1531
1532 int vm_munmap(unsigned long addr, size_t len)
1533 {
1534         struct mm_struct *mm = current->mm;
1535         int ret;
1536
1537         mmap_write_lock(mm);
1538         ret = do_munmap(mm, addr, len, NULL);
1539         mmap_write_unlock(mm);
1540         return ret;
1541 }
1542 EXPORT_SYMBOL(vm_munmap);
1543
1544 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1545 {
1546         return vm_munmap(addr, len);
1547 }
1548
1549 /*
1550  * release all the mappings made in a process's VM space
1551  */
1552 void exit_mmap(struct mm_struct *mm)
1553 {
1554         VMA_ITERATOR(vmi, mm, 0);
1555         struct vm_area_struct *vma;
1556
1557         if (!mm)
1558                 return;
1559
1560         mm->total_vm = 0;
1561
1562         /*
1563          * Lock the mm to avoid assert complaining even though this is the only
1564          * user of the mm
1565          */
1566         mmap_write_lock(mm);
1567         for_each_vma(vmi, vma) {
1568                 cleanup_vma_from_mm(vma);
1569                 delete_vma(mm, vma);
1570                 cond_resched();
1571         }
1572         __mt_destroy(&mm->mm_mt);
1573         mmap_write_unlock(mm);
1574 }
1575
1576 int vm_brk(unsigned long addr, unsigned long len)
1577 {
1578         return -ENOMEM;
1579 }
1580
1581 /*
1582  * expand (or shrink) an existing mapping, potentially moving it at the same
1583  * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
1584  *
1585  * under NOMMU conditions, we only permit changing a mapping's size, and only
1586  * as long as it stays within the region allocated by do_mmap_private() and the
1587  * block is not shareable
1588  *
1589  * MREMAP_FIXED is not supported under NOMMU conditions
1590  */
1591 static unsigned long do_mremap(unsigned long addr,
1592                         unsigned long old_len, unsigned long new_len,
1593                         unsigned long flags, unsigned long new_addr)
1594 {
1595         struct vm_area_struct *vma;
1596
1597         /* insanity checks first */
1598         old_len = PAGE_ALIGN(old_len);
1599         new_len = PAGE_ALIGN(new_len);
1600         if (old_len == 0 || new_len == 0)
1601                 return (unsigned long) -EINVAL;
1602
1603         if (offset_in_page(addr))
1604                 return -EINVAL;
1605
1606         if (flags & MREMAP_FIXED && new_addr != addr)
1607                 return (unsigned long) -EINVAL;
1608
1609         vma = find_vma_exact(current->mm, addr, old_len);
1610         if (!vma)
1611                 return (unsigned long) -EINVAL;
1612
1613         if (vma->vm_end != vma->vm_start + old_len)
1614                 return (unsigned long) -EFAULT;
1615
1616         if (vma->vm_flags & VM_MAYSHARE)
1617                 return (unsigned long) -EPERM;
1618
1619         if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1620                 return (unsigned long) -ENOMEM;
1621
1622         /* all checks complete - do it */
1623         vma->vm_end = vma->vm_start + new_len;
1624         return vma->vm_start;
1625 }
1626
1627 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1628                 unsigned long, new_len, unsigned long, flags,
1629                 unsigned long, new_addr)
1630 {
1631         unsigned long ret;
1632
1633         mmap_write_lock(current->mm);
1634         ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1635         mmap_write_unlock(current->mm);
1636         return ret;
1637 }
1638
1639 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1640                          unsigned int foll_flags)
1641 {
1642         return NULL;
1643 }
1644
1645 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1646                 unsigned long pfn, unsigned long size, pgprot_t prot)
1647 {
1648         if (addr != (pfn << PAGE_SHIFT))
1649                 return -EINVAL;
1650
1651         vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1652         return 0;
1653 }
1654 EXPORT_SYMBOL(remap_pfn_range);
1655
1656 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
1657 {
1658         unsigned long pfn = start >> PAGE_SHIFT;
1659         unsigned long vm_len = vma->vm_end - vma->vm_start;
1660
1661         pfn += vma->vm_pgoff;
1662         return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1663 }
1664 EXPORT_SYMBOL(vm_iomap_memory);
1665
1666 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1667                         unsigned long pgoff)
1668 {
1669         unsigned int size = vma->vm_end - vma->vm_start;
1670
1671         if (!(vma->vm_flags & VM_USERMAP))
1672                 return -EINVAL;
1673
1674         vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1675         vma->vm_end = vma->vm_start + size;
1676
1677         return 0;
1678 }
1679 EXPORT_SYMBOL(remap_vmalloc_range);
1680
1681 vm_fault_t filemap_fault(struct vm_fault *vmf)
1682 {
1683         BUG();
1684         return 0;
1685 }
1686 EXPORT_SYMBOL(filemap_fault);
1687
1688 vm_fault_t filemap_map_pages(struct vm_fault *vmf,
1689                 pgoff_t start_pgoff, pgoff_t end_pgoff)
1690 {
1691         BUG();
1692         return 0;
1693 }
1694 EXPORT_SYMBOL(filemap_map_pages);
1695
1696 int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf,
1697                        int len, unsigned int gup_flags)
1698 {
1699         struct vm_area_struct *vma;
1700         int write = gup_flags & FOLL_WRITE;
1701
1702         if (mmap_read_lock_killable(mm))
1703                 return 0;
1704
1705         /* the access must start within one of the target process's mappings */
1706         vma = find_vma(mm, addr);
1707         if (vma) {
1708                 /* don't overrun this mapping */
1709                 if (addr + len >= vma->vm_end)
1710                         len = vma->vm_end - addr;
1711
1712                 /* only read or write mappings where it is permitted */
1713                 if (write && vma->vm_flags & VM_MAYWRITE)
1714                         copy_to_user_page(vma, NULL, addr,
1715                                          (void *) addr, buf, len);
1716                 else if (!write && vma->vm_flags & VM_MAYREAD)
1717                         copy_from_user_page(vma, NULL, addr,
1718                                             buf, (void *) addr, len);
1719                 else
1720                         len = 0;
1721         } else {
1722                 len = 0;
1723         }
1724
1725         mmap_read_unlock(mm);
1726
1727         return len;
1728 }
1729
1730 /**
1731  * access_remote_vm - access another process' address space
1732  * @mm:         the mm_struct of the target address space
1733  * @addr:       start address to access
1734  * @buf:        source or destination buffer
1735  * @len:        number of bytes to transfer
1736  * @gup_flags:  flags modifying lookup behaviour
1737  *
1738  * The caller must hold a reference on @mm.
1739  */
1740 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1741                 void *buf, int len, unsigned int gup_flags)
1742 {
1743         return __access_remote_vm(mm, addr, buf, len, gup_flags);
1744 }
1745
1746 /*
1747  * Access another process' address space.
1748  * - source/target buffer must be kernel space
1749  */
1750 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
1751                 unsigned int gup_flags)
1752 {
1753         struct mm_struct *mm;
1754
1755         if (addr + len < addr)
1756                 return 0;
1757
1758         mm = get_task_mm(tsk);
1759         if (!mm)
1760                 return 0;
1761
1762         len = __access_remote_vm(mm, addr, buf, len, gup_flags);
1763
1764         mmput(mm);
1765         return len;
1766 }
1767 EXPORT_SYMBOL_GPL(access_process_vm);
1768
1769 /**
1770  * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
1771  * @inode: The inode to check
1772  * @size: The current filesize of the inode
1773  * @newsize: The proposed filesize of the inode
1774  *
1775  * Check the shared mappings on an inode on behalf of a shrinking truncate to
1776  * make sure that any outstanding VMAs aren't broken and then shrink the
1777  * vm_regions that extend beyond so that do_mmap() doesn't
1778  * automatically grant mappings that are too large.
1779  */
1780 int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
1781                                 size_t newsize)
1782 {
1783         struct vm_area_struct *vma;
1784         struct vm_region *region;
1785         pgoff_t low, high;
1786         size_t r_size, r_top;
1787
1788         low = newsize >> PAGE_SHIFT;
1789         high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1790
1791         down_write(&nommu_region_sem);
1792         i_mmap_lock_read(inode->i_mapping);
1793
1794         /* search for VMAs that fall within the dead zone */
1795         vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
1796                 /* found one - only interested if it's shared out of the page
1797                  * cache */
1798                 if (vma->vm_flags & VM_SHARED) {
1799                         i_mmap_unlock_read(inode->i_mapping);
1800                         up_write(&nommu_region_sem);
1801                         return -ETXTBSY; /* not quite true, but near enough */
1802                 }
1803         }
1804
1805         /* reduce any regions that overlap the dead zone - if in existence,
1806          * these will be pointed to by VMAs that don't overlap the dead zone
1807          *
1808          * we don't check for any regions that start beyond the EOF as there
1809          * shouldn't be any
1810          */
1811         vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) {
1812                 if (!(vma->vm_flags & VM_SHARED))
1813                         continue;
1814
1815                 region = vma->vm_region;
1816                 r_size = region->vm_top - region->vm_start;
1817                 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
1818
1819                 if (r_top > newsize) {
1820                         region->vm_top -= r_top - newsize;
1821                         if (region->vm_end > region->vm_top)
1822                                 region->vm_end = region->vm_top;
1823                 }
1824         }
1825
1826         i_mmap_unlock_read(inode->i_mapping);
1827         up_write(&nommu_region_sem);
1828         return 0;
1829 }
1830
1831 /*
1832  * Initialise sysctl_user_reserve_kbytes.
1833  *
1834  * This is intended to prevent a user from starting a single memory hogging
1835  * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
1836  * mode.
1837  *
1838  * The default value is min(3% of free memory, 128MB)
1839  * 128MB is enough to recover with sshd/login, bash, and top/kill.
1840  */
1841 static int __meminit init_user_reserve(void)
1842 {
1843         unsigned long free_kbytes;
1844
1845         free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
1846
1847         sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
1848         return 0;
1849 }
1850 subsys_initcall(init_user_reserve);
1851
1852 /*
1853  * Initialise sysctl_admin_reserve_kbytes.
1854  *
1855  * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
1856  * to log in and kill a memory hogging process.
1857  *
1858  * Systems with more than 256MB will reserve 8MB, enough to recover
1859  * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
1860  * only reserve 3% of free pages by default.
1861  */
1862 static int __meminit init_admin_reserve(void)
1863 {
1864         unsigned long free_kbytes;
1865
1866         free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
1867
1868         sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
1869         return 0;
1870 }
1871 subsys_initcall(init_admin_reserve);