1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2015 Red Hat, Inc.
9 #include <linux/sched/signal.h>
10 #include <linux/pagemap.h>
11 #include <linux/rmap.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 #include <linux/userfaultfd_k.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/hugetlb.h>
17 #include <linux/shmem_fs.h>
18 #include <asm/tlbflush.h>
22 static __always_inline
23 struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm,
24 unsigned long dst_start,
28 * Make sure that the dst range is both valid and fully within a
29 * single existing vma.
31 struct vm_area_struct *dst_vma;
33 dst_vma = find_vma(dst_mm, dst_start);
34 if (!range_in_vma(dst_vma, dst_start, dst_start + len))
38 * Check the vma is registered in uffd, this is required to
39 * enforce the VM_MAYWRITE check done at uffd registration
42 if (!dst_vma->vm_userfaultfd_ctx.ctx)
49 * Install PTEs, to map dst_addr (within dst_vma) to page.
51 * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem
52 * and anon, and for both shared and private VMAs.
54 int mfill_atomic_install_pte(pmd_t *dst_pmd,
55 struct vm_area_struct *dst_vma,
56 unsigned long dst_addr, struct page *page,
57 bool newly_allocated, uffd_flags_t flags)
60 struct mm_struct *dst_mm = dst_vma->vm_mm;
61 pte_t _dst_pte, *dst_pte;
62 bool writable = dst_vma->vm_flags & VM_WRITE;
63 bool vm_shared = dst_vma->vm_flags & VM_SHARED;
64 bool page_in_cache = page_mapping(page);
68 pgoff_t offset, max_off;
70 _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
71 _dst_pte = pte_mkdirty(_dst_pte);
72 if (page_in_cache && !vm_shared)
75 _dst_pte = pte_mkwrite(_dst_pte);
76 if (flags & MFILL_ATOMIC_WP)
77 _dst_pte = pte_mkuffd_wp(_dst_pte);
80 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
84 if (vma_is_shmem(dst_vma)) {
85 /* serialize against truncate with the page table lock */
86 inode = dst_vma->vm_file->f_inode;
87 offset = linear_page_index(dst_vma, dst_addr);
88 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
90 if (unlikely(offset >= max_off))
96 * We allow to overwrite a pte marker: consider when both MISSING|WP
97 * registered, we firstly wr-protect a none pte which has no page cache
98 * page backing it, then access the page.
100 if (!pte_none_mostly(ptep_get(dst_pte)))
103 folio = page_folio(page);
105 /* Usually, cache pages are already added to LRU */
107 folio_add_lru(folio);
108 page_add_file_rmap(page, dst_vma, false);
110 page_add_new_anon_rmap(page, dst_vma, dst_addr);
111 folio_add_lru_vma(folio, dst_vma);
115 * Must happen after rmap, as mm_counter() checks mapping (via
116 * PageAnon()), which is set by __page_set_anon_rmap().
118 inc_mm_counter(dst_mm, mm_counter(page));
120 set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
122 /* No need to invalidate - it was non-present before */
123 update_mmu_cache(dst_vma, dst_addr, dst_pte);
126 pte_unmap_unlock(dst_pte, ptl);
131 static int mfill_atomic_pte_copy(pmd_t *dst_pmd,
132 struct vm_area_struct *dst_vma,
133 unsigned long dst_addr,
134 unsigned long src_addr,
136 struct folio **foliop)
144 folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma,
149 kaddr = kmap_local_folio(folio, 0);
151 * The read mmap_lock is held here. Despite the
152 * mmap_lock being read recursive a deadlock is still
153 * possible if a writer has taken a lock. For example:
155 * process A thread 1 takes read lock on own mmap_lock
156 * process A thread 2 calls mmap, blocks taking write lock
157 * process B thread 1 takes page fault, read lock on own mmap lock
158 * process B thread 2 calls mmap, blocks taking write lock
159 * process A thread 1 blocks taking read lock on process B
160 * process B thread 1 blocks taking read lock on process A
162 * Disable page faults to prevent potential deadlock
163 * and retry the copy outside the mmap_lock.
166 ret = copy_from_user(kaddr, (const void __user *) src_addr,
171 /* fallback to copy_from_user outside mmap_lock */
175 /* don't free the page */
179 flush_dcache_folio(folio);
186 * The memory barrier inside __folio_mark_uptodate makes sure that
187 * preceding stores to the page contents become visible before
188 * the set_pte_at() write.
190 __folio_mark_uptodate(folio);
193 if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL))
196 ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
197 &folio->page, true, flags);
207 static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd,
208 struct vm_area_struct *dst_vma,
209 unsigned long dst_addr)
211 pte_t _dst_pte, *dst_pte;
214 pgoff_t offset, max_off;
217 _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
218 dst_vma->vm_page_prot));
220 dst_pte = pte_offset_map_lock(dst_vma->vm_mm, dst_pmd, dst_addr, &ptl);
223 if (dst_vma->vm_file) {
224 /* the shmem MAP_PRIVATE case requires checking the i_size */
225 inode = dst_vma->vm_file->f_inode;
226 offset = linear_page_index(dst_vma, dst_addr);
227 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
229 if (unlikely(offset >= max_off))
233 if (!pte_none(ptep_get(dst_pte)))
235 set_pte_at(dst_vma->vm_mm, dst_addr, dst_pte, _dst_pte);
236 /* No need to invalidate - it was non-present before */
237 update_mmu_cache(dst_vma, dst_addr, dst_pte);
240 pte_unmap_unlock(dst_pte, ptl);
245 /* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */
246 static int mfill_atomic_pte_continue(pmd_t *dst_pmd,
247 struct vm_area_struct *dst_vma,
248 unsigned long dst_addr,
251 struct inode *inode = file_inode(dst_vma->vm_file);
252 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
257 ret = shmem_get_folio(inode, pgoff, &folio, SGP_NOALLOC);
258 /* Our caller expects us to return -EFAULT if we failed to find folio */
268 page = folio_file_page(folio, pgoff);
269 if (PageHWPoison(page)) {
274 ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
289 static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
295 pgd = pgd_offset(mm, address);
296 p4d = p4d_alloc(mm, pgd, address);
299 pud = pud_alloc(mm, p4d, address);
303 * Note that we didn't run this because the pmd was
304 * missing, the *pmd may be already established and in
305 * turn it may also be a trans_huge_pmd.
307 return pmd_alloc(mm, pud, address);
310 #ifdef CONFIG_HUGETLB_PAGE
312 * mfill_atomic processing for HUGETLB vmas. Note that this routine is
313 * called with mmap_lock held, it will release mmap_lock before returning.
315 static __always_inline ssize_t mfill_atomic_hugetlb(
316 struct vm_area_struct *dst_vma,
317 unsigned long dst_start,
318 unsigned long src_start,
322 struct mm_struct *dst_mm = dst_vma->vm_mm;
323 int vm_shared = dst_vma->vm_flags & VM_SHARED;
326 unsigned long src_addr, dst_addr;
329 unsigned long vma_hpagesize;
332 struct address_space *mapping;
335 * There is no default zero huge page for all huge page sizes as
336 * supported by hugetlb. A PMD_SIZE huge pages may exist as used
337 * by THP. Since we can not reliably insert a zero page, this
338 * feature is not supported.
340 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) {
341 mmap_read_unlock(dst_mm);
345 src_addr = src_start;
346 dst_addr = dst_start;
349 vma_hpagesize = vma_kernel_pagesize(dst_vma);
352 * Validate alignment based on huge page size
355 if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1))
360 * On routine entry dst_vma is set. If we had to drop mmap_lock and
361 * retry, dst_vma will be set to NULL and we must lookup again.
365 dst_vma = find_dst_vma(dst_mm, dst_start, len);
366 if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
370 if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
373 vm_shared = dst_vma->vm_flags & VM_SHARED;
377 * If not shared, ensure the dst_vma has a anon_vma.
381 if (unlikely(anon_vma_prepare(dst_vma)))
385 while (src_addr < src_start + len) {
386 BUG_ON(dst_addr >= dst_start + len);
389 * Serialize via vma_lock and hugetlb_fault_mutex.
390 * vma_lock ensures the dst_pte remains valid even
391 * in the case of shared pmds. fault mutex prevents
392 * races with other faulting threads.
394 idx = linear_page_index(dst_vma, dst_addr);
395 mapping = dst_vma->vm_file->f_mapping;
396 hash = hugetlb_fault_mutex_hash(mapping, idx);
397 mutex_lock(&hugetlb_fault_mutex_table[hash]);
398 hugetlb_vma_lock_read(dst_vma);
401 dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize);
403 hugetlb_vma_unlock_read(dst_vma);
404 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
408 if (!uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE) &&
409 !huge_pte_none_mostly(huge_ptep_get(dst_pte))) {
411 hugetlb_vma_unlock_read(dst_vma);
412 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
416 err = hugetlb_mfill_atomic_pte(dst_pte, dst_vma, dst_addr,
417 src_addr, flags, &folio);
419 hugetlb_vma_unlock_read(dst_vma);
420 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
424 if (unlikely(err == -ENOENT)) {
425 mmap_read_unlock(dst_mm);
428 err = copy_folio_from_user(folio,
429 (const void __user *)src_addr, true);
434 mmap_read_lock(dst_mm);
442 dst_addr += vma_hpagesize;
443 src_addr += vma_hpagesize;
444 copied += vma_hpagesize;
446 if (fatal_signal_pending(current))
454 mmap_read_unlock(dst_mm);
460 BUG_ON(!copied && !err);
461 return copied ? copied : err;
463 #else /* !CONFIG_HUGETLB_PAGE */
464 /* fail at build time if gcc attempts to use this */
465 extern ssize_t mfill_atomic_hugetlb(struct vm_area_struct *dst_vma,
466 unsigned long dst_start,
467 unsigned long src_start,
470 #endif /* CONFIG_HUGETLB_PAGE */
472 static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd,
473 struct vm_area_struct *dst_vma,
474 unsigned long dst_addr,
475 unsigned long src_addr,
477 struct folio **foliop)
481 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
482 return mfill_atomic_pte_continue(dst_pmd, dst_vma,
487 * The normal page fault path for a shmem will invoke the
488 * fault, fill the hole in the file and COW it right away. The
489 * result generates plain anonymous memory. So when we are
490 * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll
491 * generate anonymous memory directly without actually filling
492 * the hole. For the MAP_PRIVATE case the robustness check
493 * only happens in the pagetable (to verify it's still none)
494 * and not in the radix tree.
496 if (!(dst_vma->vm_flags & VM_SHARED)) {
497 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY))
498 err = mfill_atomic_pte_copy(dst_pmd, dst_vma,
502 err = mfill_atomic_pte_zeropage(dst_pmd,
505 err = shmem_mfill_atomic_pte(dst_pmd, dst_vma,
513 static __always_inline ssize_t mfill_atomic(struct mm_struct *dst_mm,
514 unsigned long dst_start,
515 unsigned long src_start,
517 atomic_t *mmap_changing,
520 struct vm_area_struct *dst_vma;
523 unsigned long src_addr, dst_addr;
528 * Sanitize the command parameters:
530 BUG_ON(dst_start & ~PAGE_MASK);
531 BUG_ON(len & ~PAGE_MASK);
533 /* Does the address range wrap, or is the span zero-sized? */
534 BUG_ON(src_start + len <= src_start);
535 BUG_ON(dst_start + len <= dst_start);
537 src_addr = src_start;
538 dst_addr = dst_start;
542 mmap_read_lock(dst_mm);
545 * If memory mappings are changing because of non-cooperative
546 * operation (e.g. mremap) running in parallel, bail out and
547 * request the user to retry later
550 if (mmap_changing && atomic_read(mmap_changing))
554 * Make sure the vma is not shared, that the dst range is
555 * both valid and fully within a single existing vma.
558 dst_vma = find_dst_vma(dst_mm, dst_start, len);
564 * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
565 * it will overwrite vm_ops, so vma_is_anonymous must return false.
567 if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
568 dst_vma->vm_flags & VM_SHARED))
572 * validate 'mode' now that we know the dst_vma: don't allow
573 * a wrprotect copy if the userfaultfd didn't register as WP.
575 if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP))
579 * If this is a HUGETLB vma, pass off to appropriate routine
581 if (is_vm_hugetlb_page(dst_vma))
582 return mfill_atomic_hugetlb(dst_vma, dst_start,
583 src_start, len, flags);
585 if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
587 if (!vma_is_shmem(dst_vma) &&
588 uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE))
592 * Ensure the dst_vma has a anon_vma or this page
593 * would get a NULL anon_vma when moved in the
597 if (!(dst_vma->vm_flags & VM_SHARED) &&
598 unlikely(anon_vma_prepare(dst_vma)))
601 while (src_addr < src_start + len) {
604 BUG_ON(dst_addr >= dst_start + len);
606 dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
607 if (unlikely(!dst_pmd)) {
612 dst_pmdval = pmdp_get_lockless(dst_pmd);
614 * If the dst_pmd is mapped as THP don't
615 * override it and just be strict.
617 if (unlikely(pmd_trans_huge(dst_pmdval))) {
621 if (unlikely(pmd_none(dst_pmdval)) &&
622 unlikely(__pte_alloc(dst_mm, dst_pmd))) {
626 /* If an huge pmd materialized from under us fail */
627 if (unlikely(pmd_trans_huge(*dst_pmd))) {
632 BUG_ON(pmd_none(*dst_pmd));
633 BUG_ON(pmd_trans_huge(*dst_pmd));
635 err = mfill_atomic_pte(dst_pmd, dst_vma, dst_addr,
636 src_addr, flags, &folio);
639 if (unlikely(err == -ENOENT)) {
642 mmap_read_unlock(dst_mm);
645 kaddr = kmap_local_folio(folio, 0);
646 err = copy_from_user(kaddr,
647 (const void __user *) src_addr,
654 flush_dcache_folio(folio);
660 dst_addr += PAGE_SIZE;
661 src_addr += PAGE_SIZE;
664 if (fatal_signal_pending(current))
672 mmap_read_unlock(dst_mm);
678 BUG_ON(!copied && !err);
679 return copied ? copied : err;
682 ssize_t mfill_atomic_copy(struct mm_struct *dst_mm, unsigned long dst_start,
683 unsigned long src_start, unsigned long len,
684 atomic_t *mmap_changing, uffd_flags_t flags)
686 return mfill_atomic(dst_mm, dst_start, src_start, len, mmap_changing,
687 uffd_flags_set_mode(flags, MFILL_ATOMIC_COPY));
690 ssize_t mfill_atomic_zeropage(struct mm_struct *dst_mm, unsigned long start,
691 unsigned long len, atomic_t *mmap_changing)
693 return mfill_atomic(dst_mm, start, 0, len, mmap_changing,
694 uffd_flags_set_mode(0, MFILL_ATOMIC_ZEROPAGE));
697 ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long start,
698 unsigned long len, atomic_t *mmap_changing,
701 return mfill_atomic(dst_mm, start, 0, len, mmap_changing,
702 uffd_flags_set_mode(flags, MFILL_ATOMIC_CONTINUE));
705 long uffd_wp_range(struct vm_area_struct *dst_vma,
706 unsigned long start, unsigned long len, bool enable_wp)
708 unsigned int mm_cp_flags;
709 struct mmu_gather tlb;
712 VM_WARN_ONCE(start < dst_vma->vm_start || start + len > dst_vma->vm_end,
713 "The address range exceeds VMA boundary.\n");
715 mm_cp_flags = MM_CP_UFFD_WP;
717 mm_cp_flags = MM_CP_UFFD_WP_RESOLVE;
720 * vma->vm_page_prot already reflects that uffd-wp is enabled for this
721 * VMA (see userfaultfd_set_vm_flags()) and that all PTEs are supposed
722 * to be write-protected as default whenever protection changes.
723 * Try upgrading write permissions manually.
725 if (!enable_wp && vma_wants_manual_pte_write_upgrade(dst_vma))
726 mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE;
727 tlb_gather_mmu(&tlb, dst_vma->vm_mm);
728 ret = change_protection(&tlb, dst_vma, start, start + len, mm_cp_flags);
729 tlb_finish_mmu(&tlb);
734 int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
735 unsigned long len, bool enable_wp,
736 atomic_t *mmap_changing)
738 unsigned long end = start + len;
739 unsigned long _start, _end;
740 struct vm_area_struct *dst_vma;
741 unsigned long page_mask;
743 VMA_ITERATOR(vmi, dst_mm, start);
746 * Sanitize the command parameters:
748 BUG_ON(start & ~PAGE_MASK);
749 BUG_ON(len & ~PAGE_MASK);
751 /* Does the address range wrap, or is the span zero-sized? */
752 BUG_ON(start + len <= start);
754 mmap_read_lock(dst_mm);
757 * If memory mappings are changing because of non-cooperative
758 * operation (e.g. mremap) running in parallel, bail out and
759 * request the user to retry later
762 if (mmap_changing && atomic_read(mmap_changing))
766 for_each_vma_range(vmi, dst_vma, end) {
768 if (!userfaultfd_wp(dst_vma)) {
773 if (is_vm_hugetlb_page(dst_vma)) {
775 page_mask = vma_kernel_pagesize(dst_vma) - 1;
776 if ((start & page_mask) || (len & page_mask))
780 _start = max(dst_vma->vm_start, start);
781 _end = min(dst_vma->vm_end, end);
783 err = uffd_wp_range(dst_vma, _start, _end - _start, enable_wp);
785 /* Return 0 on success, <0 on failures */
791 mmap_read_unlock(dst_mm);