1 // SPDX-License-Identifier: GPL-2.0
5 * (C) Copyright 1996 Linus Torvalds
7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
12 #include <linux/hugetlb.h>
13 #include <linux/shm.h>
14 #include <linux/ksm.h>
15 #include <linux/mman.h>
16 #include <linux/swap.h>
17 #include <linux/capability.h>
19 #include <linux/swapops.h>
20 #include <linux/highmem.h>
21 #include <linux/security.h>
22 #include <linux/syscalls.h>
23 #include <linux/mmu_notifier.h>
24 #include <linux/uaccess.h>
25 #include <linux/userfaultfd_k.h>
27 #include <asm/cacheflush.h>
28 #include <asm/tlbflush.h>
32 static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr)
38 pgd = pgd_offset(mm, addr);
39 if (pgd_none_or_clear_bad(pgd))
42 p4d = p4d_offset(pgd, addr);
43 if (p4d_none_or_clear_bad(p4d))
46 pud = pud_offset(p4d, addr);
47 if (pud_none_or_clear_bad(pud))
53 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
58 pud = get_old_pud(mm, addr);
62 pmd = pmd_offset(pud, addr);
69 static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma,
75 pgd = pgd_offset(mm, addr);
76 p4d = p4d_alloc(mm, pgd, addr);
80 return pud_alloc(mm, p4d, addr);
83 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
89 pud = alloc_new_pud(mm, vma, addr);
93 pmd = pmd_alloc(mm, pud, addr);
97 VM_BUG_ON(pmd_trans_huge(*pmd));
102 static void take_rmap_locks(struct vm_area_struct *vma)
105 i_mmap_lock_write(vma->vm_file->f_mapping);
107 anon_vma_lock_write(vma->anon_vma);
110 static void drop_rmap_locks(struct vm_area_struct *vma)
113 anon_vma_unlock_write(vma->anon_vma);
115 i_mmap_unlock_write(vma->vm_file->f_mapping);
118 static pte_t move_soft_dirty_pte(pte_t pte)
121 * Set soft dirty bit so we can notice
122 * in userspace the ptes were moved.
124 #ifdef CONFIG_MEM_SOFT_DIRTY
125 if (pte_present(pte))
126 pte = pte_mksoft_dirty(pte);
127 else if (is_swap_pte(pte))
128 pte = pte_swp_mksoft_dirty(pte);
133 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
134 unsigned long old_addr, unsigned long old_end,
135 struct vm_area_struct *new_vma, pmd_t *new_pmd,
136 unsigned long new_addr, bool need_rmap_locks)
138 struct mm_struct *mm = vma->vm_mm;
139 pte_t *old_pte, *new_pte, pte;
140 spinlock_t *old_ptl, *new_ptl;
141 bool force_flush = false;
142 unsigned long len = old_end - old_addr;
145 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
146 * locks to ensure that rmap will always observe either the old or the
147 * new ptes. This is the easiest way to avoid races with
148 * truncate_pagecache(), page migration, etc...
150 * When need_rmap_locks is false, we use other ways to avoid
153 * - During exec() shift_arg_pages(), we use a specially tagged vma
154 * which rmap call sites look for using vma_is_temporary_stack().
156 * - During mremap(), new_vma is often known to be placed after vma
157 * in rmap traversal order. This ensures rmap will always observe
158 * either the old pte, or the new pte, or both (the page table locks
159 * serialize access to individual ptes, but only rmap traversal
160 * order guarantees that we won't miss both the old and new ptes).
163 take_rmap_locks(vma);
166 * We don't have to worry about the ordering of src and dst
167 * pte locks because exclusive mmap_lock prevents deadlock.
169 old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
170 new_pte = pte_offset_map(new_pmd, new_addr);
171 new_ptl = pte_lockptr(mm, new_pmd);
172 if (new_ptl != old_ptl)
173 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
174 flush_tlb_batched_pending(vma->vm_mm);
175 arch_enter_lazy_mmu_mode();
177 for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
178 new_pte++, new_addr += PAGE_SIZE) {
179 if (pte_none(*old_pte))
182 pte = ptep_get_and_clear(mm, old_addr, old_pte);
184 * If we are remapping a valid PTE, make sure
185 * to flush TLB before we drop the PTL for the
188 * NOTE! Both old and new PTL matter: the old one
189 * for racing with page_mkclean(), the new one to
190 * make sure the physical page stays valid until
191 * the TLB entry for the old mapping has been
194 if (pte_present(pte))
196 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
197 pte = move_soft_dirty_pte(pte);
198 set_pte_at(mm, new_addr, new_pte, pte);
201 arch_leave_lazy_mmu_mode();
203 flush_tlb_range(vma, old_end - len, old_end);
204 if (new_ptl != old_ptl)
205 spin_unlock(new_ptl);
206 pte_unmap(new_pte - 1);
207 pte_unmap_unlock(old_pte - 1, old_ptl);
209 drop_rmap_locks(vma);
212 #ifdef CONFIG_HAVE_MOVE_PMD
213 static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
214 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
216 spinlock_t *old_ptl, *new_ptl;
217 struct mm_struct *mm = vma->vm_mm;
221 * The destination pmd shouldn't be established, free_pgtables()
222 * should have released it.
224 * However, there's a case during execve() where we use mremap
225 * to move the initial stack, and in that case the target area
226 * may overlap the source area (always moving down).
228 * If everything is PMD-aligned, that works fine, as moving
229 * each pmd down will clear the source pmd. But if we first
230 * have a few 4kB-only pages that get moved down, and then
231 * hit the "now the rest is PMD-aligned, let's do everything
232 * one pmd at a time", we will still have the old (now empty
233 * of any 4kB pages, but still there) PMD in the page table
236 * Warn on it once - because we really should try to figure
237 * out how to do this better - but then say "I won't move
240 * One alternative might be to just unmap the target pmd at
241 * this point, and verify that it really is empty. We'll see.
243 if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
247 * We don't have to worry about the ordering of src and dst
248 * ptlocks because exclusive mmap_lock prevents deadlock.
250 old_ptl = pmd_lock(vma->vm_mm, old_pmd);
251 new_ptl = pmd_lockptr(mm, new_pmd);
252 if (new_ptl != old_ptl)
253 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
259 VM_BUG_ON(!pmd_none(*new_pmd));
261 /* Set the new pmd */
262 set_pmd_at(mm, new_addr, new_pmd, pmd);
263 flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
264 if (new_ptl != old_ptl)
265 spin_unlock(new_ptl);
266 spin_unlock(old_ptl);
271 static inline bool move_normal_pmd(struct vm_area_struct *vma,
272 unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd,
279 #ifdef CONFIG_HAVE_MOVE_PUD
280 static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
281 unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
283 spinlock_t *old_ptl, *new_ptl;
284 struct mm_struct *mm = vma->vm_mm;
288 * The destination pud shouldn't be established, free_pgtables()
289 * should have released it.
291 if (WARN_ON_ONCE(!pud_none(*new_pud)))
295 * We don't have to worry about the ordering of src and dst
296 * ptlocks because exclusive mmap_lock prevents deadlock.
298 old_ptl = pud_lock(vma->vm_mm, old_pud);
299 new_ptl = pud_lockptr(mm, new_pud);
300 if (new_ptl != old_ptl)
301 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
307 VM_BUG_ON(!pud_none(*new_pud));
309 /* Set the new pud */
310 set_pud_at(mm, new_addr, new_pud, pud);
311 flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE);
312 if (new_ptl != old_ptl)
313 spin_unlock(new_ptl);
314 spin_unlock(old_ptl);
319 static inline bool move_normal_pud(struct vm_area_struct *vma,
320 unsigned long old_addr, unsigned long new_addr, pud_t *old_pud,
334 * Returns an extent of the corresponding size for the pgt_entry specified if
335 * valid. Else returns a smaller extent bounded by the end of the source and
336 * destination pgt_entry.
338 static __always_inline unsigned long get_extent(enum pgt_entry entry,
339 unsigned long old_addr, unsigned long old_end,
340 unsigned long new_addr)
342 unsigned long next, extent, mask, size;
359 next = (old_addr + size) & mask;
360 /* even if next overflowed, extent below will be ok */
361 extent = next - old_addr;
362 if (extent > old_end - old_addr)
363 extent = old_end - old_addr;
364 next = (new_addr + size) & mask;
365 if (extent > next - new_addr)
366 extent = next - new_addr;
371 * Attempts to speedup the move by moving entry at the level corresponding to
372 * pgt_entry. Returns true if the move was successful, else false.
374 static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma,
375 unsigned long old_addr, unsigned long new_addr,
376 void *old_entry, void *new_entry, bool need_rmap_locks)
380 /* See comment in move_ptes() */
382 take_rmap_locks(vma);
386 moved = move_normal_pmd(vma, old_addr, new_addr, old_entry,
390 moved = move_normal_pud(vma, old_addr, new_addr, old_entry,
394 moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
395 move_huge_pmd(vma, old_addr, new_addr, old_entry,
404 drop_rmap_locks(vma);
409 unsigned long move_page_tables(struct vm_area_struct *vma,
410 unsigned long old_addr, struct vm_area_struct *new_vma,
411 unsigned long new_addr, unsigned long len,
412 bool need_rmap_locks)
414 unsigned long extent, old_end;
415 struct mmu_notifier_range range;
416 pmd_t *old_pmd, *new_pmd;
418 old_end = old_addr + len;
419 flush_cache_range(vma, old_addr, old_end);
421 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
423 mmu_notifier_invalidate_range_start(&range);
425 for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
428 * If extent is PUD-sized try to speed up the move by moving at the
429 * PUD level if possible.
431 extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr);
432 if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
433 pud_t *old_pud, *new_pud;
435 old_pud = get_old_pud(vma->vm_mm, old_addr);
438 new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr);
441 if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr,
442 old_pud, new_pud, need_rmap_locks))
446 extent = get_extent(NORMAL_PMD, old_addr, old_end, new_addr);
447 old_pmd = get_old_pmd(vma->vm_mm, old_addr);
450 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
453 if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) ||
454 pmd_devmap(*old_pmd)) {
455 if (extent == HPAGE_PMD_SIZE &&
456 move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr,
457 old_pmd, new_pmd, need_rmap_locks))
459 split_huge_pmd(vma, old_pmd, old_addr);
460 if (pmd_trans_unstable(old_pmd))
462 } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) &&
463 extent == PMD_SIZE) {
465 * If the extent is PMD-sized, try to speed the move by
466 * moving at the PMD level if possible.
468 if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr,
469 old_pmd, new_pmd, need_rmap_locks))
473 if (pte_alloc(new_vma->vm_mm, new_pmd))
475 move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
476 new_pmd, new_addr, need_rmap_locks);
479 mmu_notifier_invalidate_range_end(&range);
481 return len + old_addr - old_end; /* how much done */
484 static unsigned long move_vma(struct vm_area_struct *vma,
485 unsigned long old_addr, unsigned long old_len,
486 unsigned long new_len, unsigned long new_addr,
487 bool *locked, unsigned long flags,
488 struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap)
490 struct mm_struct *mm = vma->vm_mm;
491 struct vm_area_struct *new_vma;
492 unsigned long vm_flags = vma->vm_flags;
493 unsigned long new_pgoff;
494 unsigned long moved_len;
495 unsigned long excess = 0;
496 unsigned long hiwater_vm;
499 bool need_rmap_locks;
502 * We'd prefer to avoid failure later on in do_munmap:
503 * which may split one vma into three before unmapping.
505 if (mm->map_count >= sysctl_max_map_count - 3)
508 if (vma->vm_ops && vma->vm_ops->may_split) {
509 if (vma->vm_start != old_addr)
510 err = vma->vm_ops->may_split(vma, old_addr);
511 if (!err && vma->vm_end != old_addr + old_len)
512 err = vma->vm_ops->may_split(vma, old_addr + old_len);
518 * Advise KSM to break any KSM pages in the area to be moved:
519 * it would be confusing if they were to turn up at the new
520 * location, where they happen to coincide with different KSM
521 * pages recently unmapped. But leave vma->vm_flags as it was,
522 * so KSM can come around to merge on vma and new_vma afterwards.
524 err = ksm_madvise(vma, old_addr, old_addr + old_len,
525 MADV_UNMERGEABLE, &vm_flags);
529 if (unlikely(flags & MREMAP_DONTUNMAP && vm_flags & VM_ACCOUNT)) {
530 if (security_vm_enough_memory_mm(mm, new_len >> PAGE_SHIFT))
534 new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
535 new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
538 if (unlikely(flags & MREMAP_DONTUNMAP && vm_flags & VM_ACCOUNT))
539 vm_unacct_memory(new_len >> PAGE_SHIFT);
543 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
545 if (moved_len < old_len) {
547 } else if (vma->vm_ops && vma->vm_ops->mremap) {
548 err = vma->vm_ops->mremap(new_vma);
553 * On error, move entries back from new area to old,
554 * which will succeed since page tables still there,
555 * and then proceed to unmap new area instead of old.
557 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
564 mremap_userfaultfd_prep(new_vma, uf);
567 /* Conceal VM_ACCOUNT so old reservation is not undone */
568 if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) {
569 vma->vm_flags &= ~VM_ACCOUNT;
570 excess = vma->vm_end - vma->vm_start - old_len;
571 if (old_addr > vma->vm_start &&
572 old_addr + old_len < vma->vm_end)
577 * If we failed to move page tables we still do total_vm increment
578 * since do_munmap() will decrement it by old_len == new_len.
580 * Since total_vm is about to be raised artificially high for a
581 * moment, we need to restore high watermark afterwards: if stats
582 * are taken meanwhile, total_vm and hiwater_vm appear too high.
583 * If this were a serious issue, we'd add a flag to do_munmap().
585 hiwater_vm = mm->hiwater_vm;
586 vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
588 /* Tell pfnmap has moved from this vma */
589 if (unlikely(vma->vm_flags & VM_PFNMAP))
590 untrack_pfn_moved(vma);
592 if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) {
593 /* We always clear VM_LOCKED[ONFAULT] on the old vma */
594 vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
597 * anon_vma links of the old vma is no longer needed after its page
598 * table has been moved.
600 if (new_vma != vma && vma->vm_start == old_addr &&
601 vma->vm_end == (old_addr + old_len))
602 unlink_anon_vmas(vma);
604 /* Because we won't unmap we don't need to touch locked_vm */
608 if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) {
609 /* OOM: unable to split vma, just get accounts right */
610 if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP))
611 vm_acct_memory(new_len >> PAGE_SHIFT);
615 if (vm_flags & VM_LOCKED) {
616 mm->locked_vm += new_len >> PAGE_SHIFT;
620 mm->hiwater_vm = hiwater_vm;
622 /* Restore VM_ACCOUNT if one or two pieces of vma left */
624 vma->vm_flags |= VM_ACCOUNT;
626 vma->vm_next->vm_flags |= VM_ACCOUNT;
632 static struct vm_area_struct *vma_to_resize(unsigned long addr,
633 unsigned long old_len, unsigned long new_len, unsigned long flags,
636 struct mm_struct *mm = current->mm;
637 struct vm_area_struct *vma = find_vma(mm, addr);
640 if (!vma || vma->vm_start > addr)
641 return ERR_PTR(-EFAULT);
644 * !old_len is a special case where an attempt is made to 'duplicate'
645 * a mapping. This makes no sense for private mappings as it will
646 * instead create a fresh/new mapping unrelated to the original. This
647 * is contrary to the basic idea of mremap which creates new mappings
648 * based on the original. There are no known use cases for this
649 * behavior. As a result, fail such attempts.
651 if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) {
652 pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n", current->comm, current->pid);
653 return ERR_PTR(-EINVAL);
656 if ((flags & MREMAP_DONTUNMAP) &&
657 (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)))
658 return ERR_PTR(-EINVAL);
660 if (is_vm_hugetlb_page(vma))
661 return ERR_PTR(-EINVAL);
663 /* We can't remap across vm area boundaries */
664 if (old_len > vma->vm_end - addr)
665 return ERR_PTR(-EFAULT);
667 if (new_len == old_len)
670 /* Need to be careful about a growing mapping */
671 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
672 pgoff += vma->vm_pgoff;
673 if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
674 return ERR_PTR(-EINVAL);
676 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
677 return ERR_PTR(-EFAULT);
679 if (vma->vm_flags & VM_LOCKED) {
680 unsigned long locked, lock_limit;
681 locked = mm->locked_vm << PAGE_SHIFT;
682 lock_limit = rlimit(RLIMIT_MEMLOCK);
683 locked += new_len - old_len;
684 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
685 return ERR_PTR(-EAGAIN);
688 if (!may_expand_vm(mm, vma->vm_flags,
689 (new_len - old_len) >> PAGE_SHIFT))
690 return ERR_PTR(-ENOMEM);
692 if (vma->vm_flags & VM_ACCOUNT) {
693 unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
694 if (security_vm_enough_memory_mm(mm, charged))
695 return ERR_PTR(-ENOMEM);
702 static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
703 unsigned long new_addr, unsigned long new_len, bool *locked,
704 unsigned long flags, struct vm_userfaultfd_ctx *uf,
705 struct list_head *uf_unmap_early,
706 struct list_head *uf_unmap)
708 struct mm_struct *mm = current->mm;
709 struct vm_area_struct *vma;
710 unsigned long ret = -EINVAL;
711 unsigned long charged = 0;
712 unsigned long map_flags = 0;
714 if (offset_in_page(new_addr))
717 if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
720 /* Ensure the old/new locations do not overlap */
721 if (addr + old_len > new_addr && new_addr + new_len > addr)
725 * move_vma() need us to stay 4 maps below the threshold, otherwise
726 * it will bail out at the very beginning.
727 * That is a problem if we have already unmaped the regions here
728 * (new_addr, and old_addr), because userspace will not know the
729 * state of the vma's after it gets -ENOMEM.
730 * So, to avoid such scenario we can pre-compute if the whole
731 * operation has high chances to success map-wise.
732 * Worst-scenario case is when both vma's (new_addr and old_addr) get
733 * split in 3 before unmaping it.
734 * That means 2 more maps (1 for each) to the ones we already hold.
735 * Check whether current map count plus 2 still leads us to 4 maps below
736 * the threshold, otherwise return -ENOMEM here to be more safe.
738 if ((mm->map_count + 2) >= sysctl_max_map_count - 3)
741 if (flags & MREMAP_FIXED) {
742 ret = do_munmap(mm, new_addr, new_len, uf_unmap_early);
747 if (old_len >= new_len) {
748 ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap);
749 if (ret && old_len != new_len)
754 vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
760 /* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */
761 if (flags & MREMAP_DONTUNMAP &&
762 !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) {
767 if (flags & MREMAP_FIXED)
768 map_flags |= MAP_FIXED;
770 if (vma->vm_flags & VM_MAYSHARE)
771 map_flags |= MAP_SHARED;
773 ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
774 ((addr - vma->vm_start) >> PAGE_SHIFT),
776 if (IS_ERR_VALUE(ret))
779 /* We got a new mapping */
780 if (!(flags & MREMAP_FIXED))
783 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf,
786 if (!(offset_in_page(ret)))
790 vm_unacct_memory(charged);
796 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
798 unsigned long end = vma->vm_end + delta;
799 if (end < vma->vm_end) /* overflow */
801 if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
803 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
804 0, MAP_FIXED) & ~PAGE_MASK)
810 * Expand (or shrink) an existing mapping, potentially moving it at the
811 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
813 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
814 * This option implies MREMAP_MAYMOVE.
816 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
817 unsigned long, new_len, unsigned long, flags,
818 unsigned long, new_addr)
820 struct mm_struct *mm = current->mm;
821 struct vm_area_struct *vma;
822 unsigned long ret = -EINVAL;
823 unsigned long charged = 0;
825 bool downgraded = false;
826 struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
827 LIST_HEAD(uf_unmap_early);
831 * There is a deliberate asymmetry here: we strip the pointer tag
832 * from the old address but leave the new address alone. This is
833 * for consistency with mmap(), where we prevent the creation of
834 * aliasing mappings in userspace by leaving the tag bits of the
835 * mapping address intact. A non-zero tag will cause the subsequent
836 * range checks to reject the address as invalid.
838 * See Documentation/arm64/tagged-address-abi.rst for more information.
840 addr = untagged_addr(addr);
842 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP))
845 if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
849 * MREMAP_DONTUNMAP is always a move and it does not allow resizing
852 if (flags & MREMAP_DONTUNMAP &&
853 (!(flags & MREMAP_MAYMOVE) || old_len != new_len))
857 if (offset_in_page(addr))
860 old_len = PAGE_ALIGN(old_len);
861 new_len = PAGE_ALIGN(new_len);
864 * We allow a zero old-len as a special case
865 * for DOS-emu "duplicate shm area" thing. But
866 * a zero new-len is nonsensical.
871 if (mmap_write_lock_killable(current->mm))
874 if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) {
875 ret = mremap_to(addr, old_len, new_addr, new_len,
876 &locked, flags, &uf, &uf_unmap_early,
882 * Always allow a shrinking remap: that just unmaps
883 * the unnecessary pages..
884 * __do_munmap does all the needed commit accounting, and
885 * downgrades mmap_lock to read if so directed.
887 if (old_len >= new_len) {
890 retval = __do_munmap(mm, addr+new_len, old_len - new_len,
892 if (retval < 0 && old_len != new_len) {
895 /* Returning 1 indicates mmap_lock is downgraded to read. */
896 } else if (retval == 1)
903 * Ok, we need to grow..
905 vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
911 /* old_len exactly to the end of the area..
913 if (old_len == vma->vm_end - addr) {
914 /* can we just expand the current mapping? */
915 if (vma_expandable(vma, new_len - old_len)) {
916 int pages = (new_len - old_len) >> PAGE_SHIFT;
918 if (vma_adjust(vma, vma->vm_start, addr + new_len,
919 vma->vm_pgoff, NULL)) {
924 vm_stat_account(mm, vma->vm_flags, pages);
925 if (vma->vm_flags & VM_LOCKED) {
926 mm->locked_vm += pages;
936 * We weren't able to just expand or shrink the area,
937 * we need to create a new one and move it..
940 if (flags & MREMAP_MAYMOVE) {
941 unsigned long map_flags = 0;
942 if (vma->vm_flags & VM_MAYSHARE)
943 map_flags |= MAP_SHARED;
945 new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
947 ((addr - vma->vm_start) >> PAGE_SHIFT),
949 if (IS_ERR_VALUE(new_addr)) {
954 ret = move_vma(vma, addr, old_len, new_len, new_addr,
955 &locked, flags, &uf, &uf_unmap);
958 if (offset_in_page(ret)) {
959 vm_unacct_memory(charged);
963 mmap_read_unlock(current->mm);
965 mmap_write_unlock(current->mm);
966 if (locked && new_len > old_len)
967 mm_populate(new_addr + old_len, new_len - old_len);
968 userfaultfd_unmap_complete(mm, &uf_unmap_early);
969 mremap_userfaultfd_complete(&uf, addr, ret, old_len);
970 userfaultfd_unmap_complete(mm, &uf_unmap);