1 // SPDX-License-Identifier: GPL-2.0
3 * Device Memory Migration functionality.
5 * Originally written by Jérôme Glisse.
7 #include <linux/export.h>
8 #include <linux/memremap.h>
9 #include <linux/migrate.h>
11 #include <linux/mm_inline.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/oom.h>
14 #include <linux/pagewalk.h>
15 #include <linux/rmap.h>
16 #include <linux/swapops.h>
17 #include <asm/tlbflush.h>
20 static int migrate_vma_collect_skip(unsigned long start,
24 struct migrate_vma *migrate = walk->private;
27 for (addr = start; addr < end; addr += PAGE_SIZE) {
28 migrate->dst[migrate->npages] = 0;
29 migrate->src[migrate->npages++] = 0;
35 static int migrate_vma_collect_hole(unsigned long start,
37 __always_unused int depth,
40 struct migrate_vma *migrate = walk->private;
43 /* Only allow populating anonymous memory. */
44 if (!vma_is_anonymous(walk->vma))
45 return migrate_vma_collect_skip(start, end, walk);
47 for (addr = start; addr < end; addr += PAGE_SIZE) {
48 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
49 migrate->dst[migrate->npages] = 0;
57 static int migrate_vma_collect_pmd(pmd_t *pmdp,
62 struct migrate_vma *migrate = walk->private;
63 struct vm_area_struct *vma = walk->vma;
64 struct mm_struct *mm = vma->vm_mm;
65 unsigned long addr = start, unmapped = 0;
71 return migrate_vma_collect_hole(start, end, -1, walk);
73 if (pmd_trans_huge(*pmdp)) {
76 ptl = pmd_lock(mm, pmdp);
77 if (unlikely(!pmd_trans_huge(*pmdp))) {
82 page = pmd_page(*pmdp);
83 if (is_huge_zero_page(page)) {
85 split_huge_pmd(vma, pmdp, addr);
86 if (pmd_trans_unstable(pmdp))
87 return migrate_vma_collect_skip(start, end,
94 if (unlikely(!trylock_page(page)))
95 return migrate_vma_collect_skip(start, end,
97 ret = split_huge_page(page);
101 return migrate_vma_collect_skip(start, end,
104 return migrate_vma_collect_hole(start, end, -1,
109 if (unlikely(pmd_bad(*pmdp)))
110 return migrate_vma_collect_skip(start, end, walk);
112 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
113 arch_enter_lazy_mmu_mode();
115 for (; addr < end; addr += PAGE_SIZE, ptep++) {
116 unsigned long mpfn = 0, pfn;
124 if (vma_is_anonymous(vma)) {
125 mpfn = MIGRATE_PFN_MIGRATE;
131 if (!pte_present(pte)) {
133 * Only care about unaddressable device page special
134 * page table entry. Other special swap entries are not
135 * migratable, and we ignore regular swapped page.
137 entry = pte_to_swp_entry(pte);
138 if (!is_device_private_entry(entry))
141 page = pfn_swap_entry_to_page(entry);
142 if (!(migrate->flags &
143 MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
144 page->pgmap->owner != migrate->pgmap_owner)
147 mpfn = migrate_pfn(page_to_pfn(page)) |
149 if (is_writable_device_private_entry(entry))
150 mpfn |= MIGRATE_PFN_WRITE;
153 if (is_zero_pfn(pfn) &&
154 (migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) {
155 mpfn = MIGRATE_PFN_MIGRATE;
159 page = vm_normal_page(migrate->vma, addr, pte);
160 if (page && !is_zone_device_page(page) &&
161 !(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM))
163 else if (page && is_device_coherent_page(page) &&
164 (!(migrate->flags & MIGRATE_VMA_SELECT_DEVICE_COHERENT) ||
165 page->pgmap->owner != migrate->pgmap_owner))
167 mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
168 mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
171 /* FIXME support THP */
172 if (!page || !page->mapping || PageTransCompound(page)) {
178 * By getting a reference on the page we pin it and that blocks
179 * any kind of migration. Side effect is that it "freezes" the
182 * We drop this reference after isolating the page from the lru
183 * for non device page (device page are not on the lru and thus
184 * can't be dropped from it).
189 * We rely on trylock_page() to avoid deadlock between
190 * concurrent migrations where each is waiting on the others
191 * page lock. If we can't immediately lock the page we fail this
192 * migration as it is only best effort anyway.
194 * If we can lock the page it's safe to set up a migration entry
195 * now. In the common case where the page is mapped once in a
196 * single process setting up the migration entry now is an
197 * optimisation to avoid walking the rmap later with
200 if (trylock_page(page)) {
204 flush_cache_page(vma, addr, pte_pfn(*ptep));
205 anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
206 if (anon_exclusive) {
207 pte = ptep_clear_flush(vma, addr, ptep);
209 if (page_try_share_anon_rmap(page)) {
210 set_pte_at(mm, addr, ptep, pte);
217 pte = ptep_get_and_clear(mm, addr, ptep);
222 /* Set the dirty flag on the folio now the pte is gone. */
224 folio_mark_dirty(page_folio(page));
226 /* Setup special migration page table entry */
227 if (mpfn & MIGRATE_PFN_WRITE)
228 entry = make_writable_migration_entry(
230 else if (anon_exclusive)
231 entry = make_readable_exclusive_migration_entry(
234 entry = make_readable_migration_entry(
236 if (pte_present(pte)) {
238 entry = make_migration_entry_young(entry);
240 entry = make_migration_entry_dirty(entry);
242 swp_pte = swp_entry_to_pte(entry);
243 if (pte_present(pte)) {
244 if (pte_soft_dirty(pte))
245 swp_pte = pte_swp_mksoft_dirty(swp_pte);
246 if (pte_uffd_wp(pte))
247 swp_pte = pte_swp_mkuffd_wp(swp_pte);
249 if (pte_swp_soft_dirty(pte))
250 swp_pte = pte_swp_mksoft_dirty(swp_pte);
251 if (pte_swp_uffd_wp(pte))
252 swp_pte = pte_swp_mkuffd_wp(swp_pte);
254 set_pte_at(mm, addr, ptep, swp_pte);
257 * This is like regular unmap: we remove the rmap and
258 * drop page refcount. Page won't be freed, as we took
259 * a reference just above.
261 page_remove_rmap(page, vma, false);
264 if (pte_present(pte))
272 migrate->dst[migrate->npages] = 0;
273 migrate->src[migrate->npages++] = mpfn;
276 /* Only flush the TLB if we actually modified any entries */
278 flush_tlb_range(walk->vma, start, end);
280 arch_leave_lazy_mmu_mode();
281 pte_unmap_unlock(ptep - 1, ptl);
286 static const struct mm_walk_ops migrate_vma_walk_ops = {
287 .pmd_entry = migrate_vma_collect_pmd,
288 .pte_hole = migrate_vma_collect_hole,
292 * migrate_vma_collect() - collect pages over a range of virtual addresses
293 * @migrate: migrate struct containing all migration information
295 * This will walk the CPU page table. For each virtual address backed by a
296 * valid page, it updates the src array and takes a reference on the page, in
297 * order to pin the page until we lock it and unmap it.
299 static void migrate_vma_collect(struct migrate_vma *migrate)
301 struct mmu_notifier_range range;
304 * Note that the pgmap_owner is passed to the mmu notifier callback so
305 * that the registered device driver can skip invalidating device
306 * private page mappings that won't be migrated.
308 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_MIGRATE, 0,
309 migrate->vma, migrate->vma->vm_mm, migrate->start, migrate->end,
310 migrate->pgmap_owner);
311 mmu_notifier_invalidate_range_start(&range);
313 walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end,
314 &migrate_vma_walk_ops, migrate);
316 mmu_notifier_invalidate_range_end(&range);
317 migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
321 * migrate_vma_check_page() - check if page is pinned or not
322 * @page: struct page to check
324 * Pinned pages cannot be migrated. This is the same test as in
325 * folio_migrate_mapping(), except that here we allow migration of a
328 static bool migrate_vma_check_page(struct page *page)
331 * One extra ref because caller holds an extra reference, either from
332 * isolate_lru_page() for a regular page, or migrate_vma_collect() for
338 * FIXME support THP (transparent huge page), it is bit more complex to
339 * check them than regular pages, because they can be mapped with a pmd
340 * or with a pte (split pte mapping).
342 if (PageCompound(page))
345 /* Page from ZONE_DEVICE have one extra reference */
346 if (is_zone_device_page(page))
349 /* For file back page */
350 if (page_mapping(page))
351 extra += 1 + page_has_private(page);
353 if ((page_count(page) - extra) > page_mapcount(page))
360 * migrate_vma_unmap() - replace page mapping with special migration pte entry
361 * @migrate: migrate struct containing all migration information
363 * Isolate pages from the LRU and replace mappings (CPU page table pte) with a
364 * special migration pte entry and check if it has been pinned. Pinned pages are
365 * restored because we cannot migrate them.
367 * This is the last step before we call the device driver callback to allocate
368 * destination memory and copy contents of original page over to new page.
370 static void migrate_vma_unmap(struct migrate_vma *migrate)
372 const unsigned long npages = migrate->npages;
373 unsigned long i, restore = 0;
374 bool allow_drain = true;
378 for (i = 0; i < npages; i++) {
379 struct page *page = migrate_pfn_to_page(migrate->src[i]);
385 /* ZONE_DEVICE pages are not on LRU */
386 if (!is_zone_device_page(page)) {
387 if (!PageLRU(page) && allow_drain) {
388 /* Drain CPU's pagevec */
393 if (isolate_lru_page(page)) {
394 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
400 /* Drop the reference we took in collect */
404 folio = page_folio(page);
405 if (folio_mapped(folio))
406 try_to_migrate(folio, 0);
408 if (page_mapped(page) || !migrate_vma_check_page(page)) {
409 if (!is_zone_device_page(page)) {
411 putback_lru_page(page);
414 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
421 for (i = 0; i < npages && restore; i++) {
422 struct page *page = migrate_pfn_to_page(migrate->src[i]);
425 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
428 folio = page_folio(page);
429 remove_migration_ptes(folio, folio, false);
439 * migrate_vma_setup() - prepare to migrate a range of memory
440 * @args: contains the vma, start, and pfns arrays for the migration
442 * Returns: negative errno on failures, 0 when 0 or more pages were migrated
445 * Prepare to migrate a range of memory virtual address range by collecting all
446 * the pages backing each virtual address in the range, saving them inside the
447 * src array. Then lock those pages and unmap them. Once the pages are locked
448 * and unmapped, check whether each page is pinned or not. Pages that aren't
449 * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the
450 * corresponding src array entry. Then restores any pages that are pinned, by
451 * remapping and unlocking those pages.
453 * The caller should then allocate destination memory and copy source memory to
454 * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE
455 * flag set). Once these are allocated and copied, the caller must update each
456 * corresponding entry in the dst array with the pfn value of the destination
457 * page and with MIGRATE_PFN_VALID. Destination pages must be locked via
460 * Note that the caller does not have to migrate all the pages that are marked
461 * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from
462 * device memory to system memory. If the caller cannot migrate a device page
463 * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe
464 * consequences for the userspace process, so it must be avoided if at all
467 * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we
468 * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus
469 * allowing the caller to allocate device memory for those unbacked virtual
470 * addresses. For this the caller simply has to allocate device memory and
471 * properly set the destination entry like for regular migration. Note that
472 * this can still fail, and thus inside the device driver you must check if the
473 * migration was successful for those entries after calling migrate_vma_pages(),
474 * just like for regular migration.
476 * After that, the callers must call migrate_vma_pages() to go over each entry
477 * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag
478 * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set,
479 * then migrate_vma_pages() to migrate struct page information from the source
480 * struct page to the destination struct page. If it fails to migrate the
481 * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the
484 * At this point all successfully migrated pages have an entry in the src
485 * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst
486 * array entry with MIGRATE_PFN_VALID flag set.
488 * Once migrate_vma_pages() returns the caller may inspect which pages were
489 * successfully migrated, and which were not. Successfully migrated pages will
490 * have the MIGRATE_PFN_MIGRATE flag set for their src array entry.
492 * It is safe to update device page table after migrate_vma_pages() because
493 * both destination and source page are still locked, and the mmap_lock is held
494 * in read mode (hence no one can unmap the range being migrated).
496 * Once the caller is done cleaning up things and updating its page table (if it
497 * chose to do so, this is not an obligation) it finally calls
498 * migrate_vma_finalize() to update the CPU page table to point to new pages
499 * for successfully migrated pages or otherwise restore the CPU page table to
500 * point to the original source pages.
502 int migrate_vma_setup(struct migrate_vma *args)
504 long nr_pages = (args->end - args->start) >> PAGE_SHIFT;
506 args->start &= PAGE_MASK;
507 args->end &= PAGE_MASK;
508 if (!args->vma || is_vm_hugetlb_page(args->vma) ||
509 (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma))
513 if (args->start < args->vma->vm_start ||
514 args->start >= args->vma->vm_end)
516 if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end)
518 if (!args->src || !args->dst)
521 memset(args->src, 0, sizeof(*args->src) * nr_pages);
525 migrate_vma_collect(args);
528 migrate_vma_unmap(args);
531 * At this point pages are locked and unmapped, and thus they have
532 * stable content and can safely be copied to destination memory that
533 * is allocated by the drivers.
538 EXPORT_SYMBOL(migrate_vma_setup);
541 * This code closely matches the code in:
542 * __handle_mm_fault()
544 * do_anonymous_page()
545 * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
546 * private or coherent page.
548 static void migrate_vma_insert_page(struct migrate_vma *migrate,
553 struct vm_area_struct *vma = migrate->vma;
554 struct mm_struct *mm = vma->vm_mm;
564 /* Only allow populating anonymous memory */
565 if (!vma_is_anonymous(vma))
568 pgdp = pgd_offset(mm, addr);
569 p4dp = p4d_alloc(mm, pgdp, addr);
572 pudp = pud_alloc(mm, p4dp, addr);
575 pmdp = pmd_alloc(mm, pudp, addr);
579 if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp))
583 * Use pte_alloc() instead of pte_alloc_map(). We can't run
584 * pte_offset_map() on pmds where a huge pmd might be created
585 * from a different thread.
587 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
588 * parallel threads are excluded by other means.
590 * Here we only have mmap_read_lock(mm).
592 if (pte_alloc(mm, pmdp))
595 /* See the comment in pte_alloc_one_map() */
596 if (unlikely(pmd_trans_unstable(pmdp)))
599 if (unlikely(anon_vma_prepare(vma)))
601 if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL))
605 * The memory barrier inside __SetPageUptodate makes sure that
606 * preceding stores to the page contents become visible before
607 * the set_pte_at() write.
609 __SetPageUptodate(page);
611 if (is_device_private_page(page)) {
612 swp_entry_t swp_entry;
614 if (vma->vm_flags & VM_WRITE)
615 swp_entry = make_writable_device_private_entry(
618 swp_entry = make_readable_device_private_entry(
620 entry = swp_entry_to_pte(swp_entry);
622 if (is_zone_device_page(page) &&
623 !is_device_coherent_page(page)) {
624 pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
627 entry = mk_pte(page, vma->vm_page_prot);
628 if (vma->vm_flags & VM_WRITE)
629 entry = pte_mkwrite(pte_mkdirty(entry));
632 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
634 if (check_stable_address_space(mm))
637 if (pte_present(*ptep)) {
638 unsigned long pfn = pte_pfn(*ptep);
640 if (!is_zero_pfn(pfn))
643 } else if (!pte_none(*ptep))
647 * Check for userfaultfd but do not deliver the fault. Instead,
650 if (userfaultfd_missing(vma))
653 inc_mm_counter(mm, MM_ANONPAGES);
654 page_add_new_anon_rmap(page, vma, addr);
655 if (!is_zone_device_page(page))
656 lru_cache_add_inactive_or_unevictable(page, vma);
660 flush_cache_page(vma, addr, pte_pfn(*ptep));
661 ptep_clear_flush_notify(vma, addr, ptep);
662 set_pte_at_notify(mm, addr, ptep, entry);
663 update_mmu_cache(vma, addr, ptep);
665 /* No need to invalidate - it was non-present before */
666 set_pte_at(mm, addr, ptep, entry);
667 update_mmu_cache(vma, addr, ptep);
670 pte_unmap_unlock(ptep, ptl);
671 *src = MIGRATE_PFN_MIGRATE;
675 pte_unmap_unlock(ptep, ptl);
677 *src &= ~MIGRATE_PFN_MIGRATE;
681 * migrate_vma_pages() - migrate meta-data from src page to dst page
682 * @migrate: migrate struct containing all migration information
684 * This migrates struct page meta-data from source struct page to destination
685 * struct page. This effectively finishes the migration from source page to the
688 void migrate_vma_pages(struct migrate_vma *migrate)
690 const unsigned long npages = migrate->npages;
691 const unsigned long start = migrate->start;
692 struct mmu_notifier_range range;
693 unsigned long addr, i;
694 bool notified = false;
696 for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) {
697 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
698 struct page *page = migrate_pfn_to_page(migrate->src[i]);
699 struct address_space *mapping;
703 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
709 * The only time there is no vma is when called from
710 * migrate_device_coherent_page(). However this isn't
711 * called if the page could not be unmapped.
713 VM_BUG_ON(!migrate->vma);
714 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE))
719 mmu_notifier_range_init_owner(&range,
720 MMU_NOTIFY_MIGRATE, 0, migrate->vma,
721 migrate->vma->vm_mm, addr, migrate->end,
722 migrate->pgmap_owner);
723 mmu_notifier_invalidate_range_start(&range);
725 migrate_vma_insert_page(migrate, addr, newpage,
730 mapping = page_mapping(page);
732 if (is_device_private_page(newpage) ||
733 is_device_coherent_page(newpage)) {
735 * For now only support anonymous memory migrating to
736 * device private or coherent memory.
739 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
742 } else if (is_zone_device_page(newpage)) {
744 * Other types of ZONE_DEVICE page are not supported.
746 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
750 r = migrate_folio(mapping, page_folio(newpage),
751 page_folio(page), MIGRATE_SYNC_NO_COPY);
752 if (r != MIGRATEPAGE_SUCCESS)
753 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
757 * No need to double call mmu_notifier->invalidate_range() callback as
758 * the above ptep_clear_flush_notify() inside migrate_vma_insert_page()
759 * did already call it.
762 mmu_notifier_invalidate_range_only_end(&range);
764 EXPORT_SYMBOL(migrate_vma_pages);
767 * migrate_vma_finalize() - restore CPU page table entry
768 * @migrate: migrate struct containing all migration information
770 * This replaces the special migration pte entry with either a mapping to the
771 * new page if migration was successful for that page, or to the original page
774 * This also unlocks the pages and puts them back on the lru, or drops the extra
775 * refcount, for device pages.
777 void migrate_vma_finalize(struct migrate_vma *migrate)
779 const unsigned long npages = migrate->npages;
782 for (i = 0; i < npages; i++) {
783 struct folio *dst, *src;
784 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
785 struct page *page = migrate_pfn_to_page(migrate->src[i]);
789 unlock_page(newpage);
795 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
797 unlock_page(newpage);
803 src = page_folio(page);
804 dst = page_folio(newpage);
805 remove_migration_ptes(src, dst, false);
808 if (is_zone_device_page(page))
811 putback_lru_page(page);
813 if (newpage != page) {
814 unlock_page(newpage);
815 if (is_zone_device_page(newpage))
818 putback_lru_page(newpage);
822 EXPORT_SYMBOL(migrate_vma_finalize);
825 * Migrate a device coherent page back to normal memory. The caller should have
826 * a reference on page which will be copied to the new page if migration is
827 * successful or dropped on failure.
829 int migrate_device_coherent_page(struct page *page)
831 unsigned long src_pfn, dst_pfn = 0;
832 struct migrate_vma args;
835 WARN_ON_ONCE(PageCompound(page));
838 src_pfn = migrate_pfn(page_to_pfn(page)) | MIGRATE_PFN_MIGRATE;
846 * We don't have a VMA and don't need to walk the page tables to find
847 * the source page. So call migrate_vma_unmap() directly to unmap the
848 * page as migrate_vma_setup() will fail if args.vma == NULL.
850 migrate_vma_unmap(&args);
851 if (!(src_pfn & MIGRATE_PFN_MIGRATE))
854 dpage = alloc_page(GFP_USER | __GFP_NOWARN);
857 dst_pfn = migrate_pfn(page_to_pfn(dpage));
860 migrate_vma_pages(&args);
861 if (src_pfn & MIGRATE_PFN_MIGRATE)
862 copy_highpage(dpage, page);
863 migrate_vma_finalize(&args);
865 if (src_pfn & MIGRATE_PFN_MIGRATE)