Merge tag 'spi-fix-v6.1-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi
[platform/kernel/linux-starfive.git] / mm / nommu.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/nommu.c
4  *
5  *  Replacement code for mm functions to support CPU's that don't
6  *  have any form of memory management unit (thus no virtual memory).
7  *
8  *  See Documentation/admin-guide/mm/nommu-mmap.rst
9  *
10  *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
11  *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
12  *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
13  *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
14  *  Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
15  */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/export.h>
20 #include <linux/mm.h>
21 #include <linux/sched/mm.h>
22 #include <linux/mman.h>
23 #include <linux/swap.h>
24 #include <linux/file.h>
25 #include <linux/highmem.h>
26 #include <linux/pagemap.h>
27 #include <linux/slab.h>
28 #include <linux/vmalloc.h>
29 #include <linux/backing-dev.h>
30 #include <linux/compiler.h>
31 #include <linux/mount.h>
32 #include <linux/personality.h>
33 #include <linux/security.h>
34 #include <linux/syscalls.h>
35 #include <linux/audit.h>
36 #include <linux/printk.h>
37
38 #include <linux/uaccess.h>
39 #include <asm/tlb.h>
40 #include <asm/tlbflush.h>
41 #include <asm/mmu_context.h>
42 #include "internal.h"
43
44 void *high_memory;
45 EXPORT_SYMBOL(high_memory);
46 struct page *mem_map;
47 unsigned long max_mapnr;
48 EXPORT_SYMBOL(max_mapnr);
49 unsigned long highest_memmap_pfn;
50 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
51 int heap_stack_gap = 0;
52
53 atomic_long_t mmap_pages_allocated;
54
55 EXPORT_SYMBOL(mem_map);
56
57 /* list of mapped, potentially shareable regions */
58 static struct kmem_cache *vm_region_jar;
59 struct rb_root nommu_region_tree = RB_ROOT;
60 DECLARE_RWSEM(nommu_region_sem);
61
62 const struct vm_operations_struct generic_file_vm_ops = {
63 };
64
65 /*
66  * Return the total memory allocated for this pointer, not
67  * just what the caller asked for.
68  *
69  * Doesn't have to be accurate, i.e. may have races.
70  */
71 unsigned int kobjsize(const void *objp)
72 {
73         struct page *page;
74
75         /*
76          * If the object we have should not have ksize performed on it,
77          * return size of 0
78          */
79         if (!objp || !virt_addr_valid(objp))
80                 return 0;
81
82         page = virt_to_head_page(objp);
83
84         /*
85          * If the allocator sets PageSlab, we know the pointer came from
86          * kmalloc().
87          */
88         if (PageSlab(page))
89                 return ksize(objp);
90
91         /*
92          * If it's not a compound page, see if we have a matching VMA
93          * region. This test is intentionally done in reverse order,
94          * so if there's no VMA, we still fall through and hand back
95          * PAGE_SIZE for 0-order pages.
96          */
97         if (!PageCompound(page)) {
98                 struct vm_area_struct *vma;
99
100                 vma = find_vma(current->mm, (unsigned long)objp);
101                 if (vma)
102                         return vma->vm_end - vma->vm_start;
103         }
104
105         /*
106          * The ksize() function is only guaranteed to work for pointers
107          * returned by kmalloc(). So handle arbitrary pointers here.
108          */
109         return page_size(page);
110 }
111
112 /**
113  * follow_pfn - look up PFN at a user virtual address
114  * @vma: memory mapping
115  * @address: user virtual address
116  * @pfn: location to store found PFN
117  *
118  * Only IO mappings and raw PFN mappings are allowed.
119  *
120  * Returns zero and the pfn at @pfn on success, -ve otherwise.
121  */
122 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
123         unsigned long *pfn)
124 {
125         if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
126                 return -EINVAL;
127
128         *pfn = address >> PAGE_SHIFT;
129         return 0;
130 }
131 EXPORT_SYMBOL(follow_pfn);
132
133 LIST_HEAD(vmap_area_list);
134
135 void vfree(const void *addr)
136 {
137         kfree(addr);
138 }
139 EXPORT_SYMBOL(vfree);
140
141 void *__vmalloc(unsigned long size, gfp_t gfp_mask)
142 {
143         /*
144          *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
145          * returns only a logical address.
146          */
147         return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
148 }
149 EXPORT_SYMBOL(__vmalloc);
150
151 void *__vmalloc_node_range(unsigned long size, unsigned long align,
152                 unsigned long start, unsigned long end, gfp_t gfp_mask,
153                 pgprot_t prot, unsigned long vm_flags, int node,
154                 const void *caller)
155 {
156         return __vmalloc(size, gfp_mask);
157 }
158
159 void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
160                 int node, const void *caller)
161 {
162         return __vmalloc(size, gfp_mask);
163 }
164
165 static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
166 {
167         void *ret;
168
169         ret = __vmalloc(size, flags);
170         if (ret) {
171                 struct vm_area_struct *vma;
172
173                 mmap_write_lock(current->mm);
174                 vma = find_vma(current->mm, (unsigned long)ret);
175                 if (vma)
176                         vma->vm_flags |= VM_USERMAP;
177                 mmap_write_unlock(current->mm);
178         }
179
180         return ret;
181 }
182
183 void *vmalloc_user(unsigned long size)
184 {
185         return __vmalloc_user_flags(size, GFP_KERNEL | __GFP_ZERO);
186 }
187 EXPORT_SYMBOL(vmalloc_user);
188
189 struct page *vmalloc_to_page(const void *addr)
190 {
191         return virt_to_page(addr);
192 }
193 EXPORT_SYMBOL(vmalloc_to_page);
194
195 unsigned long vmalloc_to_pfn(const void *addr)
196 {
197         return page_to_pfn(virt_to_page(addr));
198 }
199 EXPORT_SYMBOL(vmalloc_to_pfn);
200
201 long vread(char *buf, char *addr, unsigned long count)
202 {
203         /* Don't allow overflow */
204         if ((unsigned long) buf + count < count)
205                 count = -(unsigned long) buf;
206
207         memcpy(buf, addr, count);
208         return count;
209 }
210
211 /*
212  *      vmalloc  -  allocate virtually contiguous memory
213  *
214  *      @size:          allocation size
215  *
216  *      Allocate enough pages to cover @size from the page level
217  *      allocator and map them into contiguous kernel virtual space.
218  *
219  *      For tight control over page level allocator and protection flags
220  *      use __vmalloc() instead.
221  */
222 void *vmalloc(unsigned long size)
223 {
224         return __vmalloc(size, GFP_KERNEL);
225 }
226 EXPORT_SYMBOL(vmalloc);
227
228 void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc);
229
230 /*
231  *      vzalloc - allocate virtually contiguous memory with zero fill
232  *
233  *      @size:          allocation size
234  *
235  *      Allocate enough pages to cover @size from the page level
236  *      allocator and map them into contiguous kernel virtual space.
237  *      The memory allocated is set to zero.
238  *
239  *      For tight control over page level allocator and protection flags
240  *      use __vmalloc() instead.
241  */
242 void *vzalloc(unsigned long size)
243 {
244         return __vmalloc(size, GFP_KERNEL | __GFP_ZERO);
245 }
246 EXPORT_SYMBOL(vzalloc);
247
248 /**
249  * vmalloc_node - allocate memory on a specific node
250  * @size:       allocation size
251  * @node:       numa node
252  *
253  * Allocate enough pages to cover @size from the page level
254  * allocator and map them into contiguous kernel virtual space.
255  *
256  * For tight control over page level allocator and protection flags
257  * use __vmalloc() instead.
258  */
259 void *vmalloc_node(unsigned long size, int node)
260 {
261         return vmalloc(size);
262 }
263 EXPORT_SYMBOL(vmalloc_node);
264
265 /**
266  * vzalloc_node - allocate memory on a specific node with zero fill
267  * @size:       allocation size
268  * @node:       numa node
269  *
270  * Allocate enough pages to cover @size from the page level
271  * allocator and map them into contiguous kernel virtual space.
272  * The memory allocated is set to zero.
273  *
274  * For tight control over page level allocator and protection flags
275  * use __vmalloc() instead.
276  */
277 void *vzalloc_node(unsigned long size, int node)
278 {
279         return vzalloc(size);
280 }
281 EXPORT_SYMBOL(vzalloc_node);
282
283 /**
284  * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
285  *      @size:          allocation size
286  *
287  *      Allocate enough 32bit PA addressable pages to cover @size from the
288  *      page level allocator and map them into contiguous kernel virtual space.
289  */
290 void *vmalloc_32(unsigned long size)
291 {
292         return __vmalloc(size, GFP_KERNEL);
293 }
294 EXPORT_SYMBOL(vmalloc_32);
295
296 /**
297  * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
298  *      @size:          allocation size
299  *
300  * The resulting memory area is 32bit addressable and zeroed so it can be
301  * mapped to userspace without leaking data.
302  *
303  * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
304  * remap_vmalloc_range() are permissible.
305  */
306 void *vmalloc_32_user(unsigned long size)
307 {
308         /*
309          * We'll have to sort out the ZONE_DMA bits for 64-bit,
310          * but for now this can simply use vmalloc_user() directly.
311          */
312         return vmalloc_user(size);
313 }
314 EXPORT_SYMBOL(vmalloc_32_user);
315
316 void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
317 {
318         BUG();
319         return NULL;
320 }
321 EXPORT_SYMBOL(vmap);
322
323 void vunmap(const void *addr)
324 {
325         BUG();
326 }
327 EXPORT_SYMBOL(vunmap);
328
329 void *vm_map_ram(struct page **pages, unsigned int count, int node)
330 {
331         BUG();
332         return NULL;
333 }
334 EXPORT_SYMBOL(vm_map_ram);
335
336 void vm_unmap_ram(const void *mem, unsigned int count)
337 {
338         BUG();
339 }
340 EXPORT_SYMBOL(vm_unmap_ram);
341
342 void vm_unmap_aliases(void)
343 {
344 }
345 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
346
347 void free_vm_area(struct vm_struct *area)
348 {
349         BUG();
350 }
351 EXPORT_SYMBOL_GPL(free_vm_area);
352
353 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
354                    struct page *page)
355 {
356         return -EINVAL;
357 }
358 EXPORT_SYMBOL(vm_insert_page);
359
360 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
361                         unsigned long num)
362 {
363         return -EINVAL;
364 }
365 EXPORT_SYMBOL(vm_map_pages);
366
367 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
368                                 unsigned long num)
369 {
370         return -EINVAL;
371 }
372 EXPORT_SYMBOL(vm_map_pages_zero);
373
374 /*
375  *  sys_brk() for the most part doesn't need the global kernel
376  *  lock, except when an application is doing something nasty
377  *  like trying to un-brk an area that has already been mapped
378  *  to a regular file.  in this case, the unmapping will need
379  *  to invoke file system routines that need the global lock.
380  */
381 SYSCALL_DEFINE1(brk, unsigned long, brk)
382 {
383         struct mm_struct *mm = current->mm;
384
385         if (brk < mm->start_brk || brk > mm->context.end_brk)
386                 return mm->brk;
387
388         if (mm->brk == brk)
389                 return mm->brk;
390
391         /*
392          * Always allow shrinking brk
393          */
394         if (brk <= mm->brk) {
395                 mm->brk = brk;
396                 return brk;
397         }
398
399         /*
400          * Ok, looks good - let it rip.
401          */
402         flush_icache_user_range(mm->brk, brk);
403         return mm->brk = brk;
404 }
405
406 /*
407  * initialise the percpu counter for VM and region record slabs
408  */
409 void __init mmap_init(void)
410 {
411         int ret;
412
413         ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
414         VM_BUG_ON(ret);
415         vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC|SLAB_ACCOUNT);
416 }
417
418 /*
419  * validate the region tree
420  * - the caller must hold the region lock
421  */
422 #ifdef CONFIG_DEBUG_NOMMU_REGIONS
423 static noinline void validate_nommu_regions(void)
424 {
425         struct vm_region *region, *last;
426         struct rb_node *p, *lastp;
427
428         lastp = rb_first(&nommu_region_tree);
429         if (!lastp)
430                 return;
431
432         last = rb_entry(lastp, struct vm_region, vm_rb);
433         BUG_ON(last->vm_end <= last->vm_start);
434         BUG_ON(last->vm_top < last->vm_end);
435
436         while ((p = rb_next(lastp))) {
437                 region = rb_entry(p, struct vm_region, vm_rb);
438                 last = rb_entry(lastp, struct vm_region, vm_rb);
439
440                 BUG_ON(region->vm_end <= region->vm_start);
441                 BUG_ON(region->vm_top < region->vm_end);
442                 BUG_ON(region->vm_start < last->vm_top);
443
444                 lastp = p;
445         }
446 }
447 #else
448 static void validate_nommu_regions(void)
449 {
450 }
451 #endif
452
453 /*
454  * add a region into the global tree
455  */
456 static void add_nommu_region(struct vm_region *region)
457 {
458         struct vm_region *pregion;
459         struct rb_node **p, *parent;
460
461         validate_nommu_regions();
462
463         parent = NULL;
464         p = &nommu_region_tree.rb_node;
465         while (*p) {
466                 parent = *p;
467                 pregion = rb_entry(parent, struct vm_region, vm_rb);
468                 if (region->vm_start < pregion->vm_start)
469                         p = &(*p)->rb_left;
470                 else if (region->vm_start > pregion->vm_start)
471                         p = &(*p)->rb_right;
472                 else if (pregion == region)
473                         return;
474                 else
475                         BUG();
476         }
477
478         rb_link_node(&region->vm_rb, parent, p);
479         rb_insert_color(&region->vm_rb, &nommu_region_tree);
480
481         validate_nommu_regions();
482 }
483
484 /*
485  * delete a region from the global tree
486  */
487 static void delete_nommu_region(struct vm_region *region)
488 {
489         BUG_ON(!nommu_region_tree.rb_node);
490
491         validate_nommu_regions();
492         rb_erase(&region->vm_rb, &nommu_region_tree);
493         validate_nommu_regions();
494 }
495
496 /*
497  * free a contiguous series of pages
498  */
499 static void free_page_series(unsigned long from, unsigned long to)
500 {
501         for (; from < to; from += PAGE_SIZE) {
502                 struct page *page = virt_to_page((void *)from);
503
504                 atomic_long_dec(&mmap_pages_allocated);
505                 put_page(page);
506         }
507 }
508
509 /*
510  * release a reference to a region
511  * - the caller must hold the region semaphore for writing, which this releases
512  * - the region may not have been added to the tree yet, in which case vm_top
513  *   will equal vm_start
514  */
515 static void __put_nommu_region(struct vm_region *region)
516         __releases(nommu_region_sem)
517 {
518         BUG_ON(!nommu_region_tree.rb_node);
519
520         if (--region->vm_usage == 0) {
521                 if (region->vm_top > region->vm_start)
522                         delete_nommu_region(region);
523                 up_write(&nommu_region_sem);
524
525                 if (region->vm_file)
526                         fput(region->vm_file);
527
528                 /* IO memory and memory shared directly out of the pagecache
529                  * from ramfs/tmpfs mustn't be released here */
530                 if (region->vm_flags & VM_MAPPED_COPY)
531                         free_page_series(region->vm_start, region->vm_top);
532                 kmem_cache_free(vm_region_jar, region);
533         } else {
534                 up_write(&nommu_region_sem);
535         }
536 }
537
538 /*
539  * release a reference to a region
540  */
541 static void put_nommu_region(struct vm_region *region)
542 {
543         down_write(&nommu_region_sem);
544         __put_nommu_region(region);
545 }
546
547 void vma_mas_store(struct vm_area_struct *vma, struct ma_state *mas)
548 {
549         mas_set_range(mas, vma->vm_start, vma->vm_end - 1);
550         mas_store_prealloc(mas, vma);
551 }
552
553 void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas)
554 {
555         mas->index = vma->vm_start;
556         mas->last = vma->vm_end - 1;
557         mas_store_prealloc(mas, NULL);
558 }
559
560 static void setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm)
561 {
562         mm->map_count++;
563         vma->vm_mm = mm;
564
565         /* add the VMA to the mapping */
566         if (vma->vm_file) {
567                 struct address_space *mapping = vma->vm_file->f_mapping;
568
569                 i_mmap_lock_write(mapping);
570                 flush_dcache_mmap_lock(mapping);
571                 vma_interval_tree_insert(vma, &mapping->i_mmap);
572                 flush_dcache_mmap_unlock(mapping);
573                 i_mmap_unlock_write(mapping);
574         }
575 }
576
577 /*
578  * mas_add_vma_to_mm() - Maple state variant of add_mas_to_mm().
579  * @mas: The maple state with preallocations.
580  * @mm: The mm_struct
581  * @vma: The vma to add
582  *
583  */
584 static void mas_add_vma_to_mm(struct ma_state *mas, struct mm_struct *mm,
585                               struct vm_area_struct *vma)
586 {
587         BUG_ON(!vma->vm_region);
588
589         setup_vma_to_mm(vma, mm);
590
591         /* add the VMA to the tree */
592         vma_mas_store(vma, mas);
593 }
594
595 /*
596  * add a VMA into a process's mm_struct in the appropriate place in the list
597  * and tree and add to the address space's page tree also if not an anonymous
598  * page
599  * - should be called with mm->mmap_lock held writelocked
600  */
601 static int add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
602 {
603         MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_end);
604
605         if (mas_preallocate(&mas, vma, GFP_KERNEL)) {
606                 pr_warn("Allocation of vma tree for process %d failed\n",
607                        current->pid);
608                 return -ENOMEM;
609         }
610         mas_add_vma_to_mm(&mas, mm, vma);
611         return 0;
612 }
613
614 static void cleanup_vma_from_mm(struct vm_area_struct *vma)
615 {
616         vma->vm_mm->map_count--;
617         /* remove the VMA from the mapping */
618         if (vma->vm_file) {
619                 struct address_space *mapping;
620                 mapping = vma->vm_file->f_mapping;
621
622                 i_mmap_lock_write(mapping);
623                 flush_dcache_mmap_lock(mapping);
624                 vma_interval_tree_remove(vma, &mapping->i_mmap);
625                 flush_dcache_mmap_unlock(mapping);
626                 i_mmap_unlock_write(mapping);
627         }
628 }
629 /*
630  * delete a VMA from its owning mm_struct and address space
631  */
632 static int delete_vma_from_mm(struct vm_area_struct *vma)
633 {
634         MA_STATE(mas, &vma->vm_mm->mm_mt, 0, 0);
635
636         if (mas_preallocate(&mas, vma, GFP_KERNEL)) {
637                 pr_warn("Allocation of vma tree for process %d failed\n",
638                        current->pid);
639                 return -ENOMEM;
640         }
641         cleanup_vma_from_mm(vma);
642
643         /* remove from the MM's tree and list */
644         vma_mas_remove(vma, &mas);
645         return 0;
646 }
647
648 /*
649  * destroy a VMA record
650  */
651 static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
652 {
653         if (vma->vm_ops && vma->vm_ops->close)
654                 vma->vm_ops->close(vma);
655         if (vma->vm_file)
656                 fput(vma->vm_file);
657         put_nommu_region(vma->vm_region);
658         vm_area_free(vma);
659 }
660
661 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
662                                              unsigned long start_addr,
663                                              unsigned long end_addr)
664 {
665         unsigned long index = start_addr;
666
667         mmap_assert_locked(mm);
668         return mt_find(&mm->mm_mt, &index, end_addr - 1);
669 }
670 EXPORT_SYMBOL(find_vma_intersection);
671
672 /*
673  * look up the first VMA in which addr resides, NULL if none
674  * - should be called with mm->mmap_lock at least held readlocked
675  */
676 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
677 {
678         MA_STATE(mas, &mm->mm_mt, addr, addr);
679
680         return mas_walk(&mas);
681 }
682 EXPORT_SYMBOL(find_vma);
683
684 /*
685  * find a VMA
686  * - we don't extend stack VMAs under NOMMU conditions
687  */
688 struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
689 {
690         return find_vma(mm, addr);
691 }
692
693 /*
694  * expand a stack to a given address
695  * - not supported under NOMMU conditions
696  */
697 int expand_stack(struct vm_area_struct *vma, unsigned long address)
698 {
699         return -ENOMEM;
700 }
701
702 /*
703  * look up the first VMA exactly that exactly matches addr
704  * - should be called with mm->mmap_lock at least held readlocked
705  */
706 static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
707                                              unsigned long addr,
708                                              unsigned long len)
709 {
710         struct vm_area_struct *vma;
711         unsigned long end = addr + len;
712         MA_STATE(mas, &mm->mm_mt, addr, addr);
713
714         vma = mas_walk(&mas);
715         if (!vma)
716                 return NULL;
717         if (vma->vm_start != addr)
718                 return NULL;
719         if (vma->vm_end != end)
720                 return NULL;
721
722         return vma;
723 }
724
725 /*
726  * determine whether a mapping should be permitted and, if so, what sort of
727  * mapping we're capable of supporting
728  */
729 static int validate_mmap_request(struct file *file,
730                                  unsigned long addr,
731                                  unsigned long len,
732                                  unsigned long prot,
733                                  unsigned long flags,
734                                  unsigned long pgoff,
735                                  unsigned long *_capabilities)
736 {
737         unsigned long capabilities, rlen;
738         int ret;
739
740         /* do the simple checks first */
741         if (flags & MAP_FIXED)
742                 return -EINVAL;
743
744         if ((flags & MAP_TYPE) != MAP_PRIVATE &&
745             (flags & MAP_TYPE) != MAP_SHARED)
746                 return -EINVAL;
747
748         if (!len)
749                 return -EINVAL;
750
751         /* Careful about overflows.. */
752         rlen = PAGE_ALIGN(len);
753         if (!rlen || rlen > TASK_SIZE)
754                 return -ENOMEM;
755
756         /* offset overflow? */
757         if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
758                 return -EOVERFLOW;
759
760         if (file) {
761                 /* files must support mmap */
762                 if (!file->f_op->mmap)
763                         return -ENODEV;
764
765                 /* work out if what we've got could possibly be shared
766                  * - we support chardevs that provide their own "memory"
767                  * - we support files/blockdevs that are memory backed
768                  */
769                 if (file->f_op->mmap_capabilities) {
770                         capabilities = file->f_op->mmap_capabilities(file);
771                 } else {
772                         /* no explicit capabilities set, so assume some
773                          * defaults */
774                         switch (file_inode(file)->i_mode & S_IFMT) {
775                         case S_IFREG:
776                         case S_IFBLK:
777                                 capabilities = NOMMU_MAP_COPY;
778                                 break;
779
780                         case S_IFCHR:
781                                 capabilities =
782                                         NOMMU_MAP_DIRECT |
783                                         NOMMU_MAP_READ |
784                                         NOMMU_MAP_WRITE;
785                                 break;
786
787                         default:
788                                 return -EINVAL;
789                         }
790                 }
791
792                 /* eliminate any capabilities that we can't support on this
793                  * device */
794                 if (!file->f_op->get_unmapped_area)
795                         capabilities &= ~NOMMU_MAP_DIRECT;
796                 if (!(file->f_mode & FMODE_CAN_READ))
797                         capabilities &= ~NOMMU_MAP_COPY;
798
799                 /* The file shall have been opened with read permission. */
800                 if (!(file->f_mode & FMODE_READ))
801                         return -EACCES;
802
803                 if (flags & MAP_SHARED) {
804                         /* do checks for writing, appending and locking */
805                         if ((prot & PROT_WRITE) &&
806                             !(file->f_mode & FMODE_WRITE))
807                                 return -EACCES;
808
809                         if (IS_APPEND(file_inode(file)) &&
810                             (file->f_mode & FMODE_WRITE))
811                                 return -EACCES;
812
813                         if (!(capabilities & NOMMU_MAP_DIRECT))
814                                 return -ENODEV;
815
816                         /* we mustn't privatise shared mappings */
817                         capabilities &= ~NOMMU_MAP_COPY;
818                 } else {
819                         /* we're going to read the file into private memory we
820                          * allocate */
821                         if (!(capabilities & NOMMU_MAP_COPY))
822                                 return -ENODEV;
823
824                         /* we don't permit a private writable mapping to be
825                          * shared with the backing device */
826                         if (prot & PROT_WRITE)
827                                 capabilities &= ~NOMMU_MAP_DIRECT;
828                 }
829
830                 if (capabilities & NOMMU_MAP_DIRECT) {
831                         if (((prot & PROT_READ)  && !(capabilities & NOMMU_MAP_READ))  ||
832                             ((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) ||
833                             ((prot & PROT_EXEC)  && !(capabilities & NOMMU_MAP_EXEC))
834                             ) {
835                                 capabilities &= ~NOMMU_MAP_DIRECT;
836                                 if (flags & MAP_SHARED) {
837                                         pr_warn("MAP_SHARED not completely supported on !MMU\n");
838                                         return -EINVAL;
839                                 }
840                         }
841                 }
842
843                 /* handle executable mappings and implied executable
844                  * mappings */
845                 if (path_noexec(&file->f_path)) {
846                         if (prot & PROT_EXEC)
847                                 return -EPERM;
848                 } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
849                         /* handle implication of PROT_EXEC by PROT_READ */
850                         if (current->personality & READ_IMPLIES_EXEC) {
851                                 if (capabilities & NOMMU_MAP_EXEC)
852                                         prot |= PROT_EXEC;
853                         }
854                 } else if ((prot & PROT_READ) &&
855                          (prot & PROT_EXEC) &&
856                          !(capabilities & NOMMU_MAP_EXEC)
857                          ) {
858                         /* backing file is not executable, try to copy */
859                         capabilities &= ~NOMMU_MAP_DIRECT;
860                 }
861         } else {
862                 /* anonymous mappings are always memory backed and can be
863                  * privately mapped
864                  */
865                 capabilities = NOMMU_MAP_COPY;
866
867                 /* handle PROT_EXEC implication by PROT_READ */
868                 if ((prot & PROT_READ) &&
869                     (current->personality & READ_IMPLIES_EXEC))
870                         prot |= PROT_EXEC;
871         }
872
873         /* allow the security API to have its say */
874         ret = security_mmap_addr(addr);
875         if (ret < 0)
876                 return ret;
877
878         /* looks okay */
879         *_capabilities = capabilities;
880         return 0;
881 }
882
883 /*
884  * we've determined that we can make the mapping, now translate what we
885  * now know into VMA flags
886  */
887 static unsigned long determine_vm_flags(struct file *file,
888                                         unsigned long prot,
889                                         unsigned long flags,
890                                         unsigned long capabilities)
891 {
892         unsigned long vm_flags;
893
894         vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(flags);
895         /* vm_flags |= mm->def_flags; */
896
897         if (!(capabilities & NOMMU_MAP_DIRECT)) {
898                 /* attempt to share read-only copies of mapped file chunks */
899                 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
900                 if (file && !(prot & PROT_WRITE))
901                         vm_flags |= VM_MAYSHARE;
902         } else {
903                 /* overlay a shareable mapping on the backing device or inode
904                  * if possible - used for chardevs, ramfs/tmpfs/shmfs and
905                  * romfs/cramfs */
906                 vm_flags |= VM_MAYSHARE | (capabilities & NOMMU_VMFLAGS);
907                 if (flags & MAP_SHARED)
908                         vm_flags |= VM_SHARED;
909         }
910
911         /* refuse to let anyone share private mappings with this process if
912          * it's being traced - otherwise breakpoints set in it may interfere
913          * with another untraced process
914          */
915         if ((flags & MAP_PRIVATE) && current->ptrace)
916                 vm_flags &= ~VM_MAYSHARE;
917
918         return vm_flags;
919 }
920
921 /*
922  * set up a shared mapping on a file (the driver or filesystem provides and
923  * pins the storage)
924  */
925 static int do_mmap_shared_file(struct vm_area_struct *vma)
926 {
927         int ret;
928
929         ret = call_mmap(vma->vm_file, vma);
930         if (ret == 0) {
931                 vma->vm_region->vm_top = vma->vm_region->vm_end;
932                 return 0;
933         }
934         if (ret != -ENOSYS)
935                 return ret;
936
937         /* getting -ENOSYS indicates that direct mmap isn't possible (as
938          * opposed to tried but failed) so we can only give a suitable error as
939          * it's not possible to make a private copy if MAP_SHARED was given */
940         return -ENODEV;
941 }
942
943 /*
944  * set up a private mapping or an anonymous shared mapping
945  */
946 static int do_mmap_private(struct vm_area_struct *vma,
947                            struct vm_region *region,
948                            unsigned long len,
949                            unsigned long capabilities)
950 {
951         unsigned long total, point;
952         void *base;
953         int ret, order;
954
955         /* invoke the file's mapping function so that it can keep track of
956          * shared mappings on devices or memory
957          * - VM_MAYSHARE will be set if it may attempt to share
958          */
959         if (capabilities & NOMMU_MAP_DIRECT) {
960                 ret = call_mmap(vma->vm_file, vma);
961                 if (ret == 0) {
962                         /* shouldn't return success if we're not sharing */
963                         BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
964                         vma->vm_region->vm_top = vma->vm_region->vm_end;
965                         return 0;
966                 }
967                 if (ret != -ENOSYS)
968                         return ret;
969
970                 /* getting an ENOSYS error indicates that direct mmap isn't
971                  * possible (as opposed to tried but failed) so we'll try to
972                  * make a private copy of the data and map that instead */
973         }
974
975
976         /* allocate some memory to hold the mapping
977          * - note that this may not return a page-aligned address if the object
978          *   we're allocating is smaller than a page
979          */
980         order = get_order(len);
981         total = 1 << order;
982         point = len >> PAGE_SHIFT;
983
984         /* we don't want to allocate a power-of-2 sized page set */
985         if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages)
986                 total = point;
987
988         base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL);
989         if (!base)
990                 goto enomem;
991
992         atomic_long_add(total, &mmap_pages_allocated);
993
994         region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
995         region->vm_start = (unsigned long) base;
996         region->vm_end   = region->vm_start + len;
997         region->vm_top   = region->vm_start + (total << PAGE_SHIFT);
998
999         vma->vm_start = region->vm_start;
1000         vma->vm_end   = region->vm_start + len;
1001
1002         if (vma->vm_file) {
1003                 /* read the contents of a file into the copy */
1004                 loff_t fpos;
1005
1006                 fpos = vma->vm_pgoff;
1007                 fpos <<= PAGE_SHIFT;
1008
1009                 ret = kernel_read(vma->vm_file, base, len, &fpos);
1010                 if (ret < 0)
1011                         goto error_free;
1012
1013                 /* clear the last little bit */
1014                 if (ret < len)
1015                         memset(base + ret, 0, len - ret);
1016
1017         } else {
1018                 vma_set_anonymous(vma);
1019         }
1020
1021         return 0;
1022
1023 error_free:
1024         free_page_series(region->vm_start, region->vm_top);
1025         region->vm_start = vma->vm_start = 0;
1026         region->vm_end   = vma->vm_end = 0;
1027         region->vm_top   = 0;
1028         return ret;
1029
1030 enomem:
1031         pr_err("Allocation of length %lu from process %d (%s) failed\n",
1032                len, current->pid, current->comm);
1033         show_free_areas(0, NULL);
1034         return -ENOMEM;
1035 }
1036
1037 /*
1038  * handle mapping creation for uClinux
1039  */
1040 unsigned long do_mmap(struct file *file,
1041                         unsigned long addr,
1042                         unsigned long len,
1043                         unsigned long prot,
1044                         unsigned long flags,
1045                         unsigned long pgoff,
1046                         unsigned long *populate,
1047                         struct list_head *uf)
1048 {
1049         struct vm_area_struct *vma;
1050         struct vm_region *region;
1051         struct rb_node *rb;
1052         vm_flags_t vm_flags;
1053         unsigned long capabilities, result;
1054         int ret;
1055         MA_STATE(mas, &current->mm->mm_mt, 0, 0);
1056
1057         *populate = 0;
1058
1059         /* decide whether we should attempt the mapping, and if so what sort of
1060          * mapping */
1061         ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
1062                                     &capabilities);
1063         if (ret < 0)
1064                 return ret;
1065
1066         /* we ignore the address hint */
1067         addr = 0;
1068         len = PAGE_ALIGN(len);
1069
1070         /* we've determined that we can make the mapping, now translate what we
1071          * now know into VMA flags */
1072         vm_flags = determine_vm_flags(file, prot, flags, capabilities);
1073
1074
1075         /* we're going to need to record the mapping */
1076         region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1077         if (!region)
1078                 goto error_getting_region;
1079
1080         vma = vm_area_alloc(current->mm);
1081         if (!vma)
1082                 goto error_getting_vma;
1083
1084         if (mas_preallocate(&mas, vma, GFP_KERNEL))
1085                 goto error_maple_preallocate;
1086
1087         region->vm_usage = 1;
1088         region->vm_flags = vm_flags;
1089         region->vm_pgoff = pgoff;
1090
1091         vma->vm_flags = vm_flags;
1092         vma->vm_pgoff = pgoff;
1093
1094         if (file) {
1095                 region->vm_file = get_file(file);
1096                 vma->vm_file = get_file(file);
1097         }
1098
1099         down_write(&nommu_region_sem);
1100
1101         /* if we want to share, we need to check for regions created by other
1102          * mmap() calls that overlap with our proposed mapping
1103          * - we can only share with a superset match on most regular files
1104          * - shared mappings on character devices and memory backed files are
1105          *   permitted to overlap inexactly as far as we are concerned for in
1106          *   these cases, sharing is handled in the driver or filesystem rather
1107          *   than here
1108          */
1109         if (vm_flags & VM_MAYSHARE) {
1110                 struct vm_region *pregion;
1111                 unsigned long pglen, rpglen, pgend, rpgend, start;
1112
1113                 pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1114                 pgend = pgoff + pglen;
1115
1116                 for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
1117                         pregion = rb_entry(rb, struct vm_region, vm_rb);
1118
1119                         if (!(pregion->vm_flags & VM_MAYSHARE))
1120                                 continue;
1121
1122                         /* search for overlapping mappings on the same file */
1123                         if (file_inode(pregion->vm_file) !=
1124                             file_inode(file))
1125                                 continue;
1126
1127                         if (pregion->vm_pgoff >= pgend)
1128                                 continue;
1129
1130                         rpglen = pregion->vm_end - pregion->vm_start;
1131                         rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1132                         rpgend = pregion->vm_pgoff + rpglen;
1133                         if (pgoff >= rpgend)
1134                                 continue;
1135
1136                         /* handle inexactly overlapping matches between
1137                          * mappings */
1138                         if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1139                             !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1140                                 /* new mapping is not a subset of the region */
1141                                 if (!(capabilities & NOMMU_MAP_DIRECT))
1142                                         goto sharing_violation;
1143                                 continue;
1144                         }
1145
1146                         /* we've found a region we can share */
1147                         pregion->vm_usage++;
1148                         vma->vm_region = pregion;
1149                         start = pregion->vm_start;
1150                         start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
1151                         vma->vm_start = start;
1152                         vma->vm_end = start + len;
1153
1154                         if (pregion->vm_flags & VM_MAPPED_COPY)
1155                                 vma->vm_flags |= VM_MAPPED_COPY;
1156                         else {
1157                                 ret = do_mmap_shared_file(vma);
1158                                 if (ret < 0) {
1159                                         vma->vm_region = NULL;
1160                                         vma->vm_start = 0;
1161                                         vma->vm_end = 0;
1162                                         pregion->vm_usage--;
1163                                         pregion = NULL;
1164                                         goto error_just_free;
1165                                 }
1166                         }
1167                         fput(region->vm_file);
1168                         kmem_cache_free(vm_region_jar, region);
1169                         region = pregion;
1170                         result = start;
1171                         goto share;
1172                 }
1173
1174                 /* obtain the address at which to make a shared mapping
1175                  * - this is the hook for quasi-memory character devices to
1176                  *   tell us the location of a shared mapping
1177                  */
1178                 if (capabilities & NOMMU_MAP_DIRECT) {
1179                         addr = file->f_op->get_unmapped_area(file, addr, len,
1180                                                              pgoff, flags);
1181                         if (IS_ERR_VALUE(addr)) {
1182                                 ret = addr;
1183                                 if (ret != -ENOSYS)
1184                                         goto error_just_free;
1185
1186                                 /* the driver refused to tell us where to site
1187                                  * the mapping so we'll have to attempt to copy
1188                                  * it */
1189                                 ret = -ENODEV;
1190                                 if (!(capabilities & NOMMU_MAP_COPY))
1191                                         goto error_just_free;
1192
1193                                 capabilities &= ~NOMMU_MAP_DIRECT;
1194                         } else {
1195                                 vma->vm_start = region->vm_start = addr;
1196                                 vma->vm_end = region->vm_end = addr + len;
1197                         }
1198                 }
1199         }
1200
1201         vma->vm_region = region;
1202
1203         /* set up the mapping
1204          * - the region is filled in if NOMMU_MAP_DIRECT is still set
1205          */
1206         if (file && vma->vm_flags & VM_SHARED)
1207                 ret = do_mmap_shared_file(vma);
1208         else
1209                 ret = do_mmap_private(vma, region, len, capabilities);
1210         if (ret < 0)
1211                 goto error_just_free;
1212         add_nommu_region(region);
1213
1214         /* clear anonymous mappings that don't ask for uninitialized data */
1215         if (!vma->vm_file &&
1216             (!IS_ENABLED(CONFIG_MMAP_ALLOW_UNINITIALIZED) ||
1217              !(flags & MAP_UNINITIALIZED)))
1218                 memset((void *)region->vm_start, 0,
1219                        region->vm_end - region->vm_start);
1220
1221         /* okay... we have a mapping; now we have to register it */
1222         result = vma->vm_start;
1223
1224         current->mm->total_vm += len >> PAGE_SHIFT;
1225
1226 share:
1227         mas_add_vma_to_mm(&mas, current->mm, vma);
1228
1229         /* we flush the region from the icache only when the first executable
1230          * mapping of it is made  */
1231         if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1232                 flush_icache_user_range(region->vm_start, region->vm_end);
1233                 region->vm_icache_flushed = true;
1234         }
1235
1236         up_write(&nommu_region_sem);
1237
1238         return result;
1239
1240 error_just_free:
1241         up_write(&nommu_region_sem);
1242 error:
1243         if (region->vm_file)
1244                 fput(region->vm_file);
1245         kmem_cache_free(vm_region_jar, region);
1246         if (vma->vm_file)
1247                 fput(vma->vm_file);
1248         vm_area_free(vma);
1249         return ret;
1250
1251 sharing_violation:
1252         up_write(&nommu_region_sem);
1253         mas_destroy(&mas);
1254         pr_warn("Attempt to share mismatched mappings\n");
1255         ret = -EINVAL;
1256         goto error;
1257
1258 error_getting_vma:
1259         kmem_cache_free(vm_region_jar, region);
1260         pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
1261                         len, current->pid);
1262         show_free_areas(0, NULL);
1263         return -ENOMEM;
1264
1265 error_getting_region:
1266         pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
1267                         len, current->pid);
1268         show_free_areas(0, NULL);
1269         return -ENOMEM;
1270
1271 error_maple_preallocate:
1272         kmem_cache_free(vm_region_jar, region);
1273         vm_area_free(vma);
1274         pr_warn("Allocation of vma tree for process %d failed\n", current->pid);
1275         show_free_areas(0, NULL);
1276         return -ENOMEM;
1277
1278 }
1279
1280 unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
1281                               unsigned long prot, unsigned long flags,
1282                               unsigned long fd, unsigned long pgoff)
1283 {
1284         struct file *file = NULL;
1285         unsigned long retval = -EBADF;
1286
1287         audit_mmap_fd(fd, flags);
1288         if (!(flags & MAP_ANONYMOUS)) {
1289                 file = fget(fd);
1290                 if (!file)
1291                         goto out;
1292         }
1293
1294         retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1295
1296         if (file)
1297                 fput(file);
1298 out:
1299         return retval;
1300 }
1301
1302 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1303                 unsigned long, prot, unsigned long, flags,
1304                 unsigned long, fd, unsigned long, pgoff)
1305 {
1306         return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
1307 }
1308
1309 #ifdef __ARCH_WANT_SYS_OLD_MMAP
1310 struct mmap_arg_struct {
1311         unsigned long addr;
1312         unsigned long len;
1313         unsigned long prot;
1314         unsigned long flags;
1315         unsigned long fd;
1316         unsigned long offset;
1317 };
1318
1319 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1320 {
1321         struct mmap_arg_struct a;
1322
1323         if (copy_from_user(&a, arg, sizeof(a)))
1324                 return -EFAULT;
1325         if (offset_in_page(a.offset))
1326                 return -EINVAL;
1327
1328         return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1329                                a.offset >> PAGE_SHIFT);
1330 }
1331 #endif /* __ARCH_WANT_SYS_OLD_MMAP */
1332
1333 /*
1334  * split a vma into two pieces at address 'addr', a new vma is allocated either
1335  * for the first part or the tail.
1336  */
1337 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
1338               unsigned long addr, int new_below)
1339 {
1340         struct vm_area_struct *new;
1341         struct vm_region *region;
1342         unsigned long npages;
1343         MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_end);
1344
1345         /* we're only permitted to split anonymous regions (these should have
1346          * only a single usage on the region) */
1347         if (vma->vm_file)
1348                 return -ENOMEM;
1349
1350         if (mm->map_count >= sysctl_max_map_count)
1351                 return -ENOMEM;
1352
1353         region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1354         if (!region)
1355                 return -ENOMEM;
1356
1357         new = vm_area_dup(vma);
1358         if (!new)
1359                 goto err_vma_dup;
1360
1361         if (mas_preallocate(&mas, vma, GFP_KERNEL)) {
1362                 pr_warn("Allocation of vma tree for process %d failed\n",
1363                         current->pid);
1364                 goto err_mas_preallocate;
1365         }
1366
1367         /* most fields are the same, copy all, and then fixup */
1368         *region = *vma->vm_region;
1369         new->vm_region = region;
1370
1371         npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1372
1373         if (new_below) {
1374                 region->vm_top = region->vm_end = new->vm_end = addr;
1375         } else {
1376                 region->vm_start = new->vm_start = addr;
1377                 region->vm_pgoff = new->vm_pgoff += npages;
1378         }
1379
1380         if (new->vm_ops && new->vm_ops->open)
1381                 new->vm_ops->open(new);
1382
1383         down_write(&nommu_region_sem);
1384         delete_nommu_region(vma->vm_region);
1385         if (new_below) {
1386                 vma->vm_region->vm_start = vma->vm_start = addr;
1387                 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1388         } else {
1389                 vma->vm_region->vm_end = vma->vm_end = addr;
1390                 vma->vm_region->vm_top = addr;
1391         }
1392         add_nommu_region(vma->vm_region);
1393         add_nommu_region(new->vm_region);
1394         up_write(&nommu_region_sem);
1395
1396         setup_vma_to_mm(vma, mm);
1397         setup_vma_to_mm(new, mm);
1398         mas_set_range(&mas, vma->vm_start, vma->vm_end - 1);
1399         mas_store(&mas, vma);
1400         vma_mas_store(new, &mas);
1401         return 0;
1402
1403 err_mas_preallocate:
1404         vm_area_free(new);
1405 err_vma_dup:
1406         kmem_cache_free(vm_region_jar, region);
1407         return -ENOMEM;
1408 }
1409
1410 /*
1411  * shrink a VMA by removing the specified chunk from either the beginning or
1412  * the end
1413  */
1414 static int shrink_vma(struct mm_struct *mm,
1415                       struct vm_area_struct *vma,
1416                       unsigned long from, unsigned long to)
1417 {
1418         struct vm_region *region;
1419
1420         /* adjust the VMA's pointers, which may reposition it in the MM's tree
1421          * and list */
1422         if (delete_vma_from_mm(vma))
1423                 return -ENOMEM;
1424         if (from > vma->vm_start)
1425                 vma->vm_end = from;
1426         else
1427                 vma->vm_start = to;
1428         if (add_vma_to_mm(mm, vma))
1429                 return -ENOMEM;
1430
1431         /* cut the backing region down to size */
1432         region = vma->vm_region;
1433         BUG_ON(region->vm_usage != 1);
1434
1435         down_write(&nommu_region_sem);
1436         delete_nommu_region(region);
1437         if (from > region->vm_start) {
1438                 to = region->vm_top;
1439                 region->vm_top = region->vm_end = from;
1440         } else {
1441                 region->vm_start = to;
1442         }
1443         add_nommu_region(region);
1444         up_write(&nommu_region_sem);
1445
1446         free_page_series(from, to);
1447         return 0;
1448 }
1449
1450 /*
1451  * release a mapping
1452  * - under NOMMU conditions the chunk to be unmapped must be backed by a single
1453  *   VMA, though it need not cover the whole VMA
1454  */
1455 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf)
1456 {
1457         MA_STATE(mas, &mm->mm_mt, start, start);
1458         struct vm_area_struct *vma;
1459         unsigned long end;
1460         int ret = 0;
1461
1462         len = PAGE_ALIGN(len);
1463         if (len == 0)
1464                 return -EINVAL;
1465
1466         end = start + len;
1467
1468         /* find the first potentially overlapping VMA */
1469         vma = mas_find(&mas, end - 1);
1470         if (!vma) {
1471                 static int limit;
1472                 if (limit < 5) {
1473                         pr_warn("munmap of memory not mmapped by process %d (%s): 0x%lx-0x%lx\n",
1474                                         current->pid, current->comm,
1475                                         start, start + len - 1);
1476                         limit++;
1477                 }
1478                 return -EINVAL;
1479         }
1480
1481         /* we're allowed to split an anonymous VMA but not a file-backed one */
1482         if (vma->vm_file) {
1483                 do {
1484                         if (start > vma->vm_start)
1485                                 return -EINVAL;
1486                         if (end == vma->vm_end)
1487                                 goto erase_whole_vma;
1488                         vma = mas_next(&mas, end - 1);
1489                 } while (vma);
1490                 return -EINVAL;
1491         } else {
1492                 /* the chunk must be a subset of the VMA found */
1493                 if (start == vma->vm_start && end == vma->vm_end)
1494                         goto erase_whole_vma;
1495                 if (start < vma->vm_start || end > vma->vm_end)
1496                         return -EINVAL;
1497                 if (offset_in_page(start))
1498                         return -EINVAL;
1499                 if (end != vma->vm_end && offset_in_page(end))
1500                         return -EINVAL;
1501                 if (start != vma->vm_start && end != vma->vm_end) {
1502                         ret = split_vma(mm, vma, start, 1);
1503                         if (ret < 0)
1504                                 return ret;
1505                 }
1506                 return shrink_vma(mm, vma, start, end);
1507         }
1508
1509 erase_whole_vma:
1510         if (delete_vma_from_mm(vma))
1511                 ret = -ENOMEM;
1512         delete_vma(mm, vma);
1513         return ret;
1514 }
1515
1516 int vm_munmap(unsigned long addr, size_t len)
1517 {
1518         struct mm_struct *mm = current->mm;
1519         int ret;
1520
1521         mmap_write_lock(mm);
1522         ret = do_munmap(mm, addr, len, NULL);
1523         mmap_write_unlock(mm);
1524         return ret;
1525 }
1526 EXPORT_SYMBOL(vm_munmap);
1527
1528 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1529 {
1530         return vm_munmap(addr, len);
1531 }
1532
1533 /*
1534  * release all the mappings made in a process's VM space
1535  */
1536 void exit_mmap(struct mm_struct *mm)
1537 {
1538         VMA_ITERATOR(vmi, mm, 0);
1539         struct vm_area_struct *vma;
1540
1541         if (!mm)
1542                 return;
1543
1544         mm->total_vm = 0;
1545
1546         /*
1547          * Lock the mm to avoid assert complaining even though this is the only
1548          * user of the mm
1549          */
1550         mmap_write_lock(mm);
1551         for_each_vma(vmi, vma) {
1552                 cleanup_vma_from_mm(vma);
1553                 delete_vma(mm, vma);
1554                 cond_resched();
1555         }
1556         __mt_destroy(&mm->mm_mt);
1557         mmap_write_unlock(mm);
1558 }
1559
1560 int vm_brk(unsigned long addr, unsigned long len)
1561 {
1562         return -ENOMEM;
1563 }
1564
1565 /*
1566  * expand (or shrink) an existing mapping, potentially moving it at the same
1567  * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
1568  *
1569  * under NOMMU conditions, we only permit changing a mapping's size, and only
1570  * as long as it stays within the region allocated by do_mmap_private() and the
1571  * block is not shareable
1572  *
1573  * MREMAP_FIXED is not supported under NOMMU conditions
1574  */
1575 static unsigned long do_mremap(unsigned long addr,
1576                         unsigned long old_len, unsigned long new_len,
1577                         unsigned long flags, unsigned long new_addr)
1578 {
1579         struct vm_area_struct *vma;
1580
1581         /* insanity checks first */
1582         old_len = PAGE_ALIGN(old_len);
1583         new_len = PAGE_ALIGN(new_len);
1584         if (old_len == 0 || new_len == 0)
1585                 return (unsigned long) -EINVAL;
1586
1587         if (offset_in_page(addr))
1588                 return -EINVAL;
1589
1590         if (flags & MREMAP_FIXED && new_addr != addr)
1591                 return (unsigned long) -EINVAL;
1592
1593         vma = find_vma_exact(current->mm, addr, old_len);
1594         if (!vma)
1595                 return (unsigned long) -EINVAL;
1596
1597         if (vma->vm_end != vma->vm_start + old_len)
1598                 return (unsigned long) -EFAULT;
1599
1600         if (vma->vm_flags & VM_MAYSHARE)
1601                 return (unsigned long) -EPERM;
1602
1603         if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1604                 return (unsigned long) -ENOMEM;
1605
1606         /* all checks complete - do it */
1607         vma->vm_end = vma->vm_start + new_len;
1608         return vma->vm_start;
1609 }
1610
1611 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1612                 unsigned long, new_len, unsigned long, flags,
1613                 unsigned long, new_addr)
1614 {
1615         unsigned long ret;
1616
1617         mmap_write_lock(current->mm);
1618         ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1619         mmap_write_unlock(current->mm);
1620         return ret;
1621 }
1622
1623 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1624                          unsigned int foll_flags)
1625 {
1626         return NULL;
1627 }
1628
1629 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1630                 unsigned long pfn, unsigned long size, pgprot_t prot)
1631 {
1632         if (addr != (pfn << PAGE_SHIFT))
1633                 return -EINVAL;
1634
1635         vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1636         return 0;
1637 }
1638 EXPORT_SYMBOL(remap_pfn_range);
1639
1640 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
1641 {
1642         unsigned long pfn = start >> PAGE_SHIFT;
1643         unsigned long vm_len = vma->vm_end - vma->vm_start;
1644
1645         pfn += vma->vm_pgoff;
1646         return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1647 }
1648 EXPORT_SYMBOL(vm_iomap_memory);
1649
1650 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1651                         unsigned long pgoff)
1652 {
1653         unsigned int size = vma->vm_end - vma->vm_start;
1654
1655         if (!(vma->vm_flags & VM_USERMAP))
1656                 return -EINVAL;
1657
1658         vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1659         vma->vm_end = vma->vm_start + size;
1660
1661         return 0;
1662 }
1663 EXPORT_SYMBOL(remap_vmalloc_range);
1664
1665 vm_fault_t filemap_fault(struct vm_fault *vmf)
1666 {
1667         BUG();
1668         return 0;
1669 }
1670 EXPORT_SYMBOL(filemap_fault);
1671
1672 vm_fault_t filemap_map_pages(struct vm_fault *vmf,
1673                 pgoff_t start_pgoff, pgoff_t end_pgoff)
1674 {
1675         BUG();
1676         return 0;
1677 }
1678 EXPORT_SYMBOL(filemap_map_pages);
1679
1680 int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf,
1681                        int len, unsigned int gup_flags)
1682 {
1683         struct vm_area_struct *vma;
1684         int write = gup_flags & FOLL_WRITE;
1685
1686         if (mmap_read_lock_killable(mm))
1687                 return 0;
1688
1689         /* the access must start within one of the target process's mappings */
1690         vma = find_vma(mm, addr);
1691         if (vma) {
1692                 /* don't overrun this mapping */
1693                 if (addr + len >= vma->vm_end)
1694                         len = vma->vm_end - addr;
1695
1696                 /* only read or write mappings where it is permitted */
1697                 if (write && vma->vm_flags & VM_MAYWRITE)
1698                         copy_to_user_page(vma, NULL, addr,
1699                                          (void *) addr, buf, len);
1700                 else if (!write && vma->vm_flags & VM_MAYREAD)
1701                         copy_from_user_page(vma, NULL, addr,
1702                                             buf, (void *) addr, len);
1703                 else
1704                         len = 0;
1705         } else {
1706                 len = 0;
1707         }
1708
1709         mmap_read_unlock(mm);
1710
1711         return len;
1712 }
1713
1714 /**
1715  * access_remote_vm - access another process' address space
1716  * @mm:         the mm_struct of the target address space
1717  * @addr:       start address to access
1718  * @buf:        source or destination buffer
1719  * @len:        number of bytes to transfer
1720  * @gup_flags:  flags modifying lookup behaviour
1721  *
1722  * The caller must hold a reference on @mm.
1723  */
1724 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1725                 void *buf, int len, unsigned int gup_flags)
1726 {
1727         return __access_remote_vm(mm, addr, buf, len, gup_flags);
1728 }
1729
1730 /*
1731  * Access another process' address space.
1732  * - source/target buffer must be kernel space
1733  */
1734 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
1735                 unsigned int gup_flags)
1736 {
1737         struct mm_struct *mm;
1738
1739         if (addr + len < addr)
1740                 return 0;
1741
1742         mm = get_task_mm(tsk);
1743         if (!mm)
1744                 return 0;
1745
1746         len = __access_remote_vm(mm, addr, buf, len, gup_flags);
1747
1748         mmput(mm);
1749         return len;
1750 }
1751 EXPORT_SYMBOL_GPL(access_process_vm);
1752
1753 /**
1754  * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
1755  * @inode: The inode to check
1756  * @size: The current filesize of the inode
1757  * @newsize: The proposed filesize of the inode
1758  *
1759  * Check the shared mappings on an inode on behalf of a shrinking truncate to
1760  * make sure that any outstanding VMAs aren't broken and then shrink the
1761  * vm_regions that extend beyond so that do_mmap() doesn't
1762  * automatically grant mappings that are too large.
1763  */
1764 int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
1765                                 size_t newsize)
1766 {
1767         struct vm_area_struct *vma;
1768         struct vm_region *region;
1769         pgoff_t low, high;
1770         size_t r_size, r_top;
1771
1772         low = newsize >> PAGE_SHIFT;
1773         high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1774
1775         down_write(&nommu_region_sem);
1776         i_mmap_lock_read(inode->i_mapping);
1777
1778         /* search for VMAs that fall within the dead zone */
1779         vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
1780                 /* found one - only interested if it's shared out of the page
1781                  * cache */
1782                 if (vma->vm_flags & VM_SHARED) {
1783                         i_mmap_unlock_read(inode->i_mapping);
1784                         up_write(&nommu_region_sem);
1785                         return -ETXTBSY; /* not quite true, but near enough */
1786                 }
1787         }
1788
1789         /* reduce any regions that overlap the dead zone - if in existence,
1790          * these will be pointed to by VMAs that don't overlap the dead zone
1791          *
1792          * we don't check for any regions that start beyond the EOF as there
1793          * shouldn't be any
1794          */
1795         vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) {
1796                 if (!(vma->vm_flags & VM_SHARED))
1797                         continue;
1798
1799                 region = vma->vm_region;
1800                 r_size = region->vm_top - region->vm_start;
1801                 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
1802
1803                 if (r_top > newsize) {
1804                         region->vm_top -= r_top - newsize;
1805                         if (region->vm_end > region->vm_top)
1806                                 region->vm_end = region->vm_top;
1807                 }
1808         }
1809
1810         i_mmap_unlock_read(inode->i_mapping);
1811         up_write(&nommu_region_sem);
1812         return 0;
1813 }
1814
1815 /*
1816  * Initialise sysctl_user_reserve_kbytes.
1817  *
1818  * This is intended to prevent a user from starting a single memory hogging
1819  * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
1820  * mode.
1821  *
1822  * The default value is min(3% of free memory, 128MB)
1823  * 128MB is enough to recover with sshd/login, bash, and top/kill.
1824  */
1825 static int __meminit init_user_reserve(void)
1826 {
1827         unsigned long free_kbytes;
1828
1829         free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
1830
1831         sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
1832         return 0;
1833 }
1834 subsys_initcall(init_user_reserve);
1835
1836 /*
1837  * Initialise sysctl_admin_reserve_kbytes.
1838  *
1839  * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
1840  * to log in and kill a memory hogging process.
1841  *
1842  * Systems with more than 256MB will reserve 8MB, enough to recover
1843  * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
1844  * only reserve 3% of free pages by default.
1845  */
1846 static int __meminit init_admin_reserve(void)
1847 {
1848         unsigned long free_kbytes;
1849
1850         free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
1851
1852         sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
1853         return 0;
1854 }
1855 subsys_initcall(init_admin_reserve);