1 // SPDX-License-Identifier: GPL-2.0
3 * Device Memory Migration functionality.
5 * Originally written by Jérôme Glisse.
7 #include <linux/export.h>
8 #include <linux/memremap.h>
9 #include <linux/migrate.h>
10 #include <linux/mm_inline.h>
11 #include <linux/mmu_notifier.h>
12 #include <linux/oom.h>
13 #include <linux/pagewalk.h>
14 #include <linux/rmap.h>
15 #include <linux/swapops.h>
16 #include <asm/tlbflush.h>
19 static int migrate_vma_collect_skip(unsigned long start,
23 struct migrate_vma *migrate = walk->private;
26 for (addr = start; addr < end; addr += PAGE_SIZE) {
27 migrate->dst[migrate->npages] = 0;
28 migrate->src[migrate->npages++] = 0;
34 static int migrate_vma_collect_hole(unsigned long start,
36 __always_unused int depth,
39 struct migrate_vma *migrate = walk->private;
42 /* Only allow populating anonymous memory. */
43 if (!vma_is_anonymous(walk->vma))
44 return migrate_vma_collect_skip(start, end, walk);
46 for (addr = start; addr < end; addr += PAGE_SIZE) {
47 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
48 migrate->dst[migrate->npages] = 0;
56 static int migrate_vma_collect_pmd(pmd_t *pmdp,
61 struct migrate_vma *migrate = walk->private;
62 struct vm_area_struct *vma = walk->vma;
63 struct mm_struct *mm = vma->vm_mm;
64 unsigned long addr = start, unmapped = 0;
70 return migrate_vma_collect_hole(start, end, -1, walk);
72 if (pmd_trans_huge(*pmdp)) {
75 ptl = pmd_lock(mm, pmdp);
76 if (unlikely(!pmd_trans_huge(*pmdp))) {
81 page = pmd_page(*pmdp);
82 if (is_huge_zero_page(page)) {
84 split_huge_pmd(vma, pmdp, addr);
85 if (pmd_trans_unstable(pmdp))
86 return migrate_vma_collect_skip(start, end,
93 if (unlikely(!trylock_page(page)))
94 return migrate_vma_collect_skip(start, end,
96 ret = split_huge_page(page);
100 return migrate_vma_collect_skip(start, end,
103 return migrate_vma_collect_hole(start, end, -1,
108 if (unlikely(pmd_bad(*pmdp)))
109 return migrate_vma_collect_skip(start, end, walk);
111 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
112 arch_enter_lazy_mmu_mode();
114 for (; addr < end; addr += PAGE_SIZE, ptep++) {
115 unsigned long mpfn = 0, pfn;
123 if (vma_is_anonymous(vma)) {
124 mpfn = MIGRATE_PFN_MIGRATE;
130 if (!pte_present(pte)) {
132 * Only care about unaddressable device page special
133 * page table entry. Other special swap entries are not
134 * migratable, and we ignore regular swapped page.
136 entry = pte_to_swp_entry(pte);
137 if (!is_device_private_entry(entry))
140 page = pfn_swap_entry_to_page(entry);
141 if (!(migrate->flags &
142 MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
143 page->pgmap->owner != migrate->pgmap_owner)
146 mpfn = migrate_pfn(page_to_pfn(page)) |
148 if (is_writable_device_private_entry(entry))
149 mpfn |= MIGRATE_PFN_WRITE;
151 if (!(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM))
154 if (is_zero_pfn(pfn)) {
155 mpfn = MIGRATE_PFN_MIGRATE;
159 page = vm_normal_page(migrate->vma, addr, pte);
160 mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
161 mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
164 /* FIXME support THP */
165 if (!page || !page->mapping || PageTransCompound(page)) {
171 * By getting a reference on the page we pin it and that blocks
172 * any kind of migration. Side effect is that it "freezes" the
175 * We drop this reference after isolating the page from the lru
176 * for non device page (device page are not on the lru and thus
177 * can't be dropped from it).
182 * Optimize for the common case where page is only mapped once
183 * in one process. If we can lock the page, then we can safely
184 * set up a special migration page table entry now.
186 if (trylock_page(page)) {
190 anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
191 if (anon_exclusive) {
192 flush_cache_page(vma, addr, pte_pfn(*ptep));
193 ptep_clear_flush(vma, addr, ptep);
195 if (page_try_share_anon_rmap(page)) {
196 set_pte_at(mm, addr, ptep, pte);
203 ptep_get_and_clear(mm, addr, ptep);
208 /* Setup special migration page table entry */
209 if (mpfn & MIGRATE_PFN_WRITE)
210 entry = make_writable_migration_entry(
212 else if (anon_exclusive)
213 entry = make_readable_exclusive_migration_entry(
216 entry = make_readable_migration_entry(
218 swp_pte = swp_entry_to_pte(entry);
219 if (pte_present(pte)) {
220 if (pte_soft_dirty(pte))
221 swp_pte = pte_swp_mksoft_dirty(swp_pte);
222 if (pte_uffd_wp(pte))
223 swp_pte = pte_swp_mkuffd_wp(swp_pte);
225 if (pte_swp_soft_dirty(pte))
226 swp_pte = pte_swp_mksoft_dirty(swp_pte);
227 if (pte_swp_uffd_wp(pte))
228 swp_pte = pte_swp_mkuffd_wp(swp_pte);
230 set_pte_at(mm, addr, ptep, swp_pte);
233 * This is like regular unmap: we remove the rmap and
234 * drop page refcount. Page won't be freed, as we took
235 * a reference just above.
237 page_remove_rmap(page, vma, false);
240 if (pte_present(pte))
248 migrate->dst[migrate->npages] = 0;
249 migrate->src[migrate->npages++] = mpfn;
251 arch_leave_lazy_mmu_mode();
252 pte_unmap_unlock(ptep - 1, ptl);
254 /* Only flush the TLB if we actually modified any entries */
256 flush_tlb_range(walk->vma, start, end);
261 static const struct mm_walk_ops migrate_vma_walk_ops = {
262 .pmd_entry = migrate_vma_collect_pmd,
263 .pte_hole = migrate_vma_collect_hole,
267 * migrate_vma_collect() - collect pages over a range of virtual addresses
268 * @migrate: migrate struct containing all migration information
270 * This will walk the CPU page table. For each virtual address backed by a
271 * valid page, it updates the src array and takes a reference on the page, in
272 * order to pin the page until we lock it and unmap it.
274 static void migrate_vma_collect(struct migrate_vma *migrate)
276 struct mmu_notifier_range range;
279 * Note that the pgmap_owner is passed to the mmu notifier callback so
280 * that the registered device driver can skip invalidating device
281 * private page mappings that won't be migrated.
283 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_MIGRATE, 0,
284 migrate->vma, migrate->vma->vm_mm, migrate->start, migrate->end,
285 migrate->pgmap_owner);
286 mmu_notifier_invalidate_range_start(&range);
288 walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end,
289 &migrate_vma_walk_ops, migrate);
291 mmu_notifier_invalidate_range_end(&range);
292 migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
296 * migrate_vma_check_page() - check if page is pinned or not
297 * @page: struct page to check
299 * Pinned pages cannot be migrated. This is the same test as in
300 * folio_migrate_mapping(), except that here we allow migration of a
303 static bool migrate_vma_check_page(struct page *page)
306 * One extra ref because caller holds an extra reference, either from
307 * isolate_lru_page() for a regular page, or migrate_vma_collect() for
313 * FIXME support THP (transparent huge page), it is bit more complex to
314 * check them than regular pages, because they can be mapped with a pmd
315 * or with a pte (split pte mapping).
317 if (PageCompound(page))
320 /* Page from ZONE_DEVICE have one extra reference */
321 if (is_zone_device_page(page))
324 /* For file back page */
325 if (page_mapping(page))
326 extra += 1 + page_has_private(page);
328 if ((page_count(page) - extra) > page_mapcount(page))
335 * migrate_vma_unmap() - replace page mapping with special migration pte entry
336 * @migrate: migrate struct containing all migration information
338 * Isolate pages from the LRU and replace mappings (CPU page table pte) with a
339 * special migration pte entry and check if it has been pinned. Pinned pages are
340 * restored because we cannot migrate them.
342 * This is the last step before we call the device driver callback to allocate
343 * destination memory and copy contents of original page over to new page.
345 static void migrate_vma_unmap(struct migrate_vma *migrate)
347 const unsigned long npages = migrate->npages;
348 unsigned long i, restore = 0;
349 bool allow_drain = true;
353 for (i = 0; i < npages; i++) {
354 struct page *page = migrate_pfn_to_page(migrate->src[i]);
360 /* ZONE_DEVICE pages are not on LRU */
361 if (!is_zone_device_page(page)) {
362 if (!PageLRU(page) && allow_drain) {
363 /* Drain CPU's pagevec */
368 if (isolate_lru_page(page)) {
369 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
375 /* Drop the reference we took in collect */
379 folio = page_folio(page);
380 if (folio_mapped(folio))
381 try_to_migrate(folio, 0);
383 if (page_mapped(page) || !migrate_vma_check_page(page)) {
384 if (!is_zone_device_page(page)) {
386 putback_lru_page(page);
389 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
396 for (i = 0; i < npages && restore; i++) {
397 struct page *page = migrate_pfn_to_page(migrate->src[i]);
400 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
403 folio = page_folio(page);
404 remove_migration_ptes(folio, folio, false);
414 * migrate_vma_setup() - prepare to migrate a range of memory
415 * @args: contains the vma, start, and pfns arrays for the migration
417 * Returns: negative errno on failures, 0 when 0 or more pages were migrated
420 * Prepare to migrate a range of memory virtual address range by collecting all
421 * the pages backing each virtual address in the range, saving them inside the
422 * src array. Then lock those pages and unmap them. Once the pages are locked
423 * and unmapped, check whether each page is pinned or not. Pages that aren't
424 * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the
425 * corresponding src array entry. Then restores any pages that are pinned, by
426 * remapping and unlocking those pages.
428 * The caller should then allocate destination memory and copy source memory to
429 * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE
430 * flag set). Once these are allocated and copied, the caller must update each
431 * corresponding entry in the dst array with the pfn value of the destination
432 * page and with MIGRATE_PFN_VALID. Destination pages must be locked via
435 * Note that the caller does not have to migrate all the pages that are marked
436 * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from
437 * device memory to system memory. If the caller cannot migrate a device page
438 * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe
439 * consequences for the userspace process, so it must be avoided if at all
442 * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we
443 * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus
444 * allowing the caller to allocate device memory for those unbacked virtual
445 * addresses. For this the caller simply has to allocate device memory and
446 * properly set the destination entry like for regular migration. Note that
447 * this can still fail, and thus inside the device driver you must check if the
448 * migration was successful for those entries after calling migrate_vma_pages(),
449 * just like for regular migration.
451 * After that, the callers must call migrate_vma_pages() to go over each entry
452 * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag
453 * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set,
454 * then migrate_vma_pages() to migrate struct page information from the source
455 * struct page to the destination struct page. If it fails to migrate the
456 * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the
459 * At this point all successfully migrated pages have an entry in the src
460 * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst
461 * array entry with MIGRATE_PFN_VALID flag set.
463 * Once migrate_vma_pages() returns the caller may inspect which pages were
464 * successfully migrated, and which were not. Successfully migrated pages will
465 * have the MIGRATE_PFN_MIGRATE flag set for their src array entry.
467 * It is safe to update device page table after migrate_vma_pages() because
468 * both destination and source page are still locked, and the mmap_lock is held
469 * in read mode (hence no one can unmap the range being migrated).
471 * Once the caller is done cleaning up things and updating its page table (if it
472 * chose to do so, this is not an obligation) it finally calls
473 * migrate_vma_finalize() to update the CPU page table to point to new pages
474 * for successfully migrated pages or otherwise restore the CPU page table to
475 * point to the original source pages.
477 int migrate_vma_setup(struct migrate_vma *args)
479 long nr_pages = (args->end - args->start) >> PAGE_SHIFT;
481 args->start &= PAGE_MASK;
482 args->end &= PAGE_MASK;
483 if (!args->vma || is_vm_hugetlb_page(args->vma) ||
484 (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma))
488 if (args->start < args->vma->vm_start ||
489 args->start >= args->vma->vm_end)
491 if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end)
493 if (!args->src || !args->dst)
496 memset(args->src, 0, sizeof(*args->src) * nr_pages);
500 migrate_vma_collect(args);
503 migrate_vma_unmap(args);
506 * At this point pages are locked and unmapped, and thus they have
507 * stable content and can safely be copied to destination memory that
508 * is allocated by the drivers.
513 EXPORT_SYMBOL(migrate_vma_setup);
516 * This code closely matches the code in:
517 * __handle_mm_fault()
519 * do_anonymous_page()
520 * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
523 static void migrate_vma_insert_page(struct migrate_vma *migrate,
528 struct vm_area_struct *vma = migrate->vma;
529 struct mm_struct *mm = vma->vm_mm;
539 /* Only allow populating anonymous memory */
540 if (!vma_is_anonymous(vma))
543 pgdp = pgd_offset(mm, addr);
544 p4dp = p4d_alloc(mm, pgdp, addr);
547 pudp = pud_alloc(mm, p4dp, addr);
550 pmdp = pmd_alloc(mm, pudp, addr);
554 if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp))
558 * Use pte_alloc() instead of pte_alloc_map(). We can't run
559 * pte_offset_map() on pmds where a huge pmd might be created
560 * from a different thread.
562 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
563 * parallel threads are excluded by other means.
565 * Here we only have mmap_read_lock(mm).
567 if (pte_alloc(mm, pmdp))
570 /* See the comment in pte_alloc_one_map() */
571 if (unlikely(pmd_trans_unstable(pmdp)))
574 if (unlikely(anon_vma_prepare(vma)))
576 if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL))
580 * The memory barrier inside __SetPageUptodate makes sure that
581 * preceding stores to the page contents become visible before
582 * the set_pte_at() write.
584 __SetPageUptodate(page);
586 if (is_device_private_page(page)) {
587 swp_entry_t swp_entry;
589 if (vma->vm_flags & VM_WRITE)
590 swp_entry = make_writable_device_private_entry(
593 swp_entry = make_readable_device_private_entry(
595 entry = swp_entry_to_pte(swp_entry);
598 * For now we only support migrating to un-addressable device
601 if (is_zone_device_page(page)) {
602 pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
605 entry = mk_pte(page, vma->vm_page_prot);
606 if (vma->vm_flags & VM_WRITE)
607 entry = pte_mkwrite(pte_mkdirty(entry));
610 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
612 if (check_stable_address_space(mm))
615 if (pte_present(*ptep)) {
616 unsigned long pfn = pte_pfn(*ptep);
618 if (!is_zero_pfn(pfn))
621 } else if (!pte_none(*ptep))
625 * Check for userfaultfd but do not deliver the fault. Instead,
628 if (userfaultfd_missing(vma))
631 inc_mm_counter(mm, MM_ANONPAGES);
632 page_add_new_anon_rmap(page, vma, addr);
633 if (!is_zone_device_page(page))
634 lru_cache_add_inactive_or_unevictable(page, vma);
638 flush_cache_page(vma, addr, pte_pfn(*ptep));
639 ptep_clear_flush_notify(vma, addr, ptep);
640 set_pte_at_notify(mm, addr, ptep, entry);
641 update_mmu_cache(vma, addr, ptep);
643 /* No need to invalidate - it was non-present before */
644 set_pte_at(mm, addr, ptep, entry);
645 update_mmu_cache(vma, addr, ptep);
648 pte_unmap_unlock(ptep, ptl);
649 *src = MIGRATE_PFN_MIGRATE;
653 pte_unmap_unlock(ptep, ptl);
655 *src &= ~MIGRATE_PFN_MIGRATE;
659 * migrate_vma_pages() - migrate meta-data from src page to dst page
660 * @migrate: migrate struct containing all migration information
662 * This migrates struct page meta-data from source struct page to destination
663 * struct page. This effectively finishes the migration from source page to the
666 void migrate_vma_pages(struct migrate_vma *migrate)
668 const unsigned long npages = migrate->npages;
669 const unsigned long start = migrate->start;
670 struct mmu_notifier_range range;
671 unsigned long addr, i;
672 bool notified = false;
674 for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) {
675 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
676 struct page *page = migrate_pfn_to_page(migrate->src[i]);
677 struct address_space *mapping;
681 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
686 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE))
691 mmu_notifier_range_init_owner(&range,
692 MMU_NOTIFY_MIGRATE, 0, migrate->vma,
693 migrate->vma->vm_mm, addr, migrate->end,
694 migrate->pgmap_owner);
695 mmu_notifier_invalidate_range_start(&range);
697 migrate_vma_insert_page(migrate, addr, newpage,
702 mapping = page_mapping(page);
704 if (is_device_private_page(newpage)) {
706 * For now only support private anonymous when migrating
707 * to un-addressable device memory.
710 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
713 } else if (is_zone_device_page(newpage)) {
715 * Other types of ZONE_DEVICE page are not supported.
717 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
721 r = migrate_folio(mapping, page_folio(newpage),
722 page_folio(page), MIGRATE_SYNC_NO_COPY);
723 if (r != MIGRATEPAGE_SUCCESS)
724 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
728 * No need to double call mmu_notifier->invalidate_range() callback as
729 * the above ptep_clear_flush_notify() inside migrate_vma_insert_page()
730 * did already call it.
733 mmu_notifier_invalidate_range_only_end(&range);
735 EXPORT_SYMBOL(migrate_vma_pages);
738 * migrate_vma_finalize() - restore CPU page table entry
739 * @migrate: migrate struct containing all migration information
741 * This replaces the special migration pte entry with either a mapping to the
742 * new page if migration was successful for that page, or to the original page
745 * This also unlocks the pages and puts them back on the lru, or drops the extra
746 * refcount, for device pages.
748 void migrate_vma_finalize(struct migrate_vma *migrate)
750 const unsigned long npages = migrate->npages;
753 for (i = 0; i < npages; i++) {
754 struct folio *dst, *src;
755 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
756 struct page *page = migrate_pfn_to_page(migrate->src[i]);
760 unlock_page(newpage);
766 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
768 unlock_page(newpage);
774 src = page_folio(page);
775 dst = page_folio(newpage);
776 remove_migration_ptes(src, dst, false);
779 if (is_zone_device_page(page))
782 putback_lru_page(page);
784 if (newpage != page) {
785 unlock_page(newpage);
786 if (is_zone_device_page(newpage))
789 putback_lru_page(newpage);
793 EXPORT_SYMBOL(migrate_vma_finalize);