1 // SPDX-License-Identifier: GPL-2.0
3 * Memory Migration functionality - linux/mm/migrate.c
5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
7 * Page migration was first developed in the context of the memory hotplug
8 * project. The main authors of the migration code are:
10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11 * Hirokazu Takahashi <taka@valinux.co.jp>
12 * Dave Hansen <haveblue@us.ibm.com>
16 #include <linux/migrate.h>
17 #include <linux/export.h>
18 #include <linux/swap.h>
19 #include <linux/swapops.h>
20 #include <linux/pagemap.h>
21 #include <linux/buffer_head.h>
22 #include <linux/mm_inline.h>
23 #include <linux/nsproxy.h>
24 #include <linux/pagevec.h>
25 #include <linux/ksm.h>
26 #include <linux/rmap.h>
27 #include <linux/topology.h>
28 #include <linux/cpu.h>
29 #include <linux/cpuset.h>
30 #include <linux/writeback.h>
31 #include <linux/mempolicy.h>
32 #include <linux/vmalloc.h>
33 #include <linux/security.h>
34 #include <linux/backing-dev.h>
35 #include <linux/compaction.h>
36 #include <linux/syscalls.h>
37 #include <linux/compat.h>
38 #include <linux/hugetlb.h>
39 #include <linux/hugetlb_cgroup.h>
40 #include <linux/gfp.h>
41 #include <linux/pfn_t.h>
42 #include <linux/memremap.h>
43 #include <linux/userfaultfd_k.h>
44 #include <linux/balloon_compaction.h>
45 #include <linux/page_idle.h>
46 #include <linux/page_owner.h>
47 #include <linux/sched/mm.h>
48 #include <linux/ptrace.h>
49 #include <linux/oom.h>
50 #include <linux/memory.h>
51 #include <linux/random.h>
52 #include <linux/sched/sysctl.h>
53 #include <linux/memory-tiers.h>
55 #include <asm/tlbflush.h>
57 #include <trace/events/migrate.h>
61 bool isolate_movable_page(struct page *page, isolate_mode_t mode)
63 struct folio *folio = folio_get_nontail_page(page);
64 const struct movable_operations *mops;
67 * Avoid burning cycles with pages that are yet under __free_pages(),
68 * or just got freed under us.
70 * In case we 'win' a race for a movable page being freed under us and
71 * raise its refcount preventing __free_pages() from doing its job
72 * the put_page() at the end of this block will take care of
73 * release this page, thus avoiding a nasty leakage.
78 if (unlikely(folio_test_slab(folio)))
80 /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
83 * Check movable flag before taking the page lock because
84 * we use non-atomic bitops on newly allocated page flags so
85 * unconditionally grabbing the lock ruins page's owner side.
87 if (unlikely(!__folio_test_movable(folio)))
89 /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
91 if (unlikely(folio_test_slab(folio)))
95 * As movable pages are not isolated from LRU lists, concurrent
96 * compaction threads can race against page migration functions
97 * as well as race against the releasing a page.
99 * In order to avoid having an already isolated movable page
100 * being (wrongly) re-isolated while it is under migration,
101 * or to avoid attempting to isolate pages being released,
102 * lets be sure we have the page lock
103 * before proceeding with the movable page isolation steps.
105 if (unlikely(!folio_trylock(folio)))
108 if (!folio_test_movable(folio) || folio_test_isolated(folio))
109 goto out_no_isolated;
111 mops = folio_movable_ops(folio);
112 VM_BUG_ON_FOLIO(!mops, folio);
114 if (!mops->isolate_page(&folio->page, mode))
115 goto out_no_isolated;
117 /* Driver shouldn't use PG_isolated bit of page->flags */
118 WARN_ON_ONCE(folio_test_isolated(folio));
119 folio_set_isolated(folio);
132 static void putback_movable_folio(struct folio *folio)
134 const struct movable_operations *mops = folio_movable_ops(folio);
136 mops->putback_page(&folio->page);
137 folio_clear_isolated(folio);
141 * Put previously isolated pages back onto the appropriate lists
142 * from where they were once taken off for compaction/migration.
144 * This function shall be used whenever the isolated pageset has been
145 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
146 * and isolate_hugetlb().
148 void putback_movable_pages(struct list_head *l)
151 struct folio *folio2;
153 list_for_each_entry_safe(folio, folio2, l, lru) {
154 if (unlikely(folio_test_hugetlb(folio))) {
155 folio_putback_active_hugetlb(folio);
158 list_del(&folio->lru);
160 * We isolated non-lru movable folio so here we can use
161 * __PageMovable because LRU folio's mapping cannot have
162 * PAGE_MAPPING_MOVABLE.
164 if (unlikely(__folio_test_movable(folio))) {
165 VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
167 if (folio_test_movable(folio))
168 putback_movable_folio(folio);
170 folio_clear_isolated(folio);
174 node_stat_mod_folio(folio, NR_ISOLATED_ANON +
175 folio_is_file_lru(folio), -folio_nr_pages(folio));
176 folio_putback_lru(folio);
182 * Restore a potential migration pte to a working pte entry
184 static bool remove_migration_pte(struct folio *folio,
185 struct vm_area_struct *vma, unsigned long addr, void *old)
187 DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
189 while (page_vma_mapped_walk(&pvmw)) {
190 rmap_t rmap_flags = RMAP_NONE;
194 unsigned long idx = 0;
196 /* pgoff is invalid for ksm pages, but they are never large */
197 if (folio_test_large(folio) && !folio_test_hugetlb(folio))
198 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
199 new = folio_page(folio, idx);
201 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
202 /* PMD-mapped THP migration entry */
204 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
205 !folio_test_pmd_mappable(folio), folio);
206 remove_migration_pmd(&pvmw, new);
212 pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
213 if (pte_swp_soft_dirty(*pvmw.pte))
214 pte = pte_mksoft_dirty(pte);
216 entry = pte_to_swp_entry(*pvmw.pte);
217 if (!is_migration_entry_young(entry))
218 pte = pte_mkold(pte);
219 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
220 pte = pte_mkdirty(pte);
221 if (is_writable_migration_entry(entry))
222 pte = pte_mkwrite(pte);
223 else if (pte_swp_uffd_wp(*pvmw.pte))
224 pte = pte_mkuffd_wp(pte);
226 if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
227 rmap_flags |= RMAP_EXCLUSIVE;
229 if (unlikely(is_device_private_page(new))) {
231 entry = make_writable_device_private_entry(
234 entry = make_readable_device_private_entry(
236 pte = swp_entry_to_pte(entry);
237 if (pte_swp_soft_dirty(*pvmw.pte))
238 pte = pte_swp_mksoft_dirty(pte);
239 if (pte_swp_uffd_wp(*pvmw.pte))
240 pte = pte_swp_mkuffd_wp(pte);
243 #ifdef CONFIG_HUGETLB_PAGE
244 if (folio_test_hugetlb(folio)) {
245 unsigned int shift = huge_page_shift(hstate_vma(vma));
247 pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
248 if (folio_test_anon(folio))
249 hugepage_add_anon_rmap(new, vma, pvmw.address,
252 page_dup_file_rmap(new, true);
253 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
257 if (folio_test_anon(folio))
258 page_add_anon_rmap(new, vma, pvmw.address,
261 page_add_file_rmap(new, vma, false);
262 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
264 if (vma->vm_flags & VM_LOCKED)
267 trace_remove_migration_pte(pvmw.address, pte_val(pte),
268 compound_order(new));
270 /* No need to invalidate - it was non-present before */
271 update_mmu_cache(vma, pvmw.address, pvmw.pte);
278 * Get rid of all migration entries and replace them by
279 * references to the indicated page.
281 void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
283 struct rmap_walk_control rwc = {
284 .rmap_one = remove_migration_pte,
289 rmap_walk_locked(dst, &rwc);
291 rmap_walk(dst, &rwc);
295 * Something used the pte of a page under migration. We need to
296 * get to the page and wait until migration is finished.
297 * When we return from this function the fault will be retried.
299 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
300 unsigned long address)
307 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
314 if (!is_swap_pte(pte))
317 entry = pte_to_swp_entry(pte);
318 if (!is_migration_entry(entry))
321 migration_entry_wait_on_locked(entry, ptl);
327 #ifdef CONFIG_HUGETLB_PAGE
329 * The vma read lock must be held upon entry. Holding that lock prevents either
330 * the pte or the ptl from being freed.
332 * This function will release the vma lock before returning.
334 void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep)
336 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
339 hugetlb_vma_assert_locked(vma);
341 pte = huge_ptep_get(ptep);
343 if (unlikely(!is_hugetlb_entry_migration(pte))) {
345 hugetlb_vma_unlock_read(vma);
348 * If migration entry existed, safe to release vma lock
349 * here because the pgtable page won't be freed without the
350 * pgtable lock released. See comment right above pgtable
351 * lock release in migration_entry_wait_on_locked().
353 hugetlb_vma_unlock_read(vma);
354 migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
359 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
360 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
364 ptl = pmd_lock(mm, pmd);
365 if (!is_pmd_migration_entry(*pmd))
367 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
374 static int folio_expected_refs(struct address_space *mapping,
381 refs += folio_nr_pages(folio);
382 if (folio_test_private(folio))
389 * Replace the page in the mapping.
391 * The number of remaining references must be:
392 * 1 for anonymous pages without a mapping
393 * 2 for pages with a mapping
394 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
396 int folio_migrate_mapping(struct address_space *mapping,
397 struct folio *newfolio, struct folio *folio, int extra_count)
399 XA_STATE(xas, &mapping->i_pages, folio_index(folio));
400 struct zone *oldzone, *newzone;
402 int expected_count = folio_expected_refs(mapping, folio) + extra_count;
403 long nr = folio_nr_pages(folio);
406 /* Anonymous page without mapping */
407 if (folio_ref_count(folio) != expected_count)
410 /* No turning back from here */
411 newfolio->index = folio->index;
412 newfolio->mapping = folio->mapping;
413 if (folio_test_swapbacked(folio))
414 __folio_set_swapbacked(newfolio);
416 return MIGRATEPAGE_SUCCESS;
419 oldzone = folio_zone(folio);
420 newzone = folio_zone(newfolio);
423 if (!folio_ref_freeze(folio, expected_count)) {
424 xas_unlock_irq(&xas);
429 * Now we know that no one else is looking at the folio:
430 * no turning back from here.
432 newfolio->index = folio->index;
433 newfolio->mapping = folio->mapping;
434 folio_ref_add(newfolio, nr); /* add cache reference */
435 if (folio_test_swapbacked(folio)) {
436 __folio_set_swapbacked(newfolio);
437 if (folio_test_swapcache(folio)) {
438 folio_set_swapcache(newfolio);
439 newfolio->private = folio_get_private(folio);
442 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
445 /* Move dirty while page refs frozen and newpage not yet exposed */
446 dirty = folio_test_dirty(folio);
448 folio_clear_dirty(folio);
449 folio_set_dirty(newfolio);
452 xas_store(&xas, newfolio);
455 * Drop cache reference from old page by unfreezing
456 * to one less reference.
457 * We know this isn't the last reference.
459 folio_ref_unfreeze(folio, expected_count - nr);
462 /* Leave irq disabled to prevent preemption while updating stats */
465 * If moved to a different zone then also account
466 * the page for that zone. Other VM counters will be
467 * taken care of when we establish references to the
468 * new page and drop references to the old page.
470 * Note that anonymous pages are accounted for
471 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
472 * are mapped to swap space.
474 if (newzone != oldzone) {
475 struct lruvec *old_lruvec, *new_lruvec;
476 struct mem_cgroup *memcg;
478 memcg = folio_memcg(folio);
479 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
480 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
482 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
483 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
484 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
485 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
486 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
489 if (folio_test_swapcache(folio)) {
490 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
491 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
494 if (dirty && mapping_can_writeback(mapping)) {
495 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
496 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
497 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
498 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
503 return MIGRATEPAGE_SUCCESS;
505 EXPORT_SYMBOL(folio_migrate_mapping);
508 * The expected number of remaining references is the same as that
509 * of folio_migrate_mapping().
511 int migrate_huge_page_move_mapping(struct address_space *mapping,
512 struct folio *dst, struct folio *src)
514 XA_STATE(xas, &mapping->i_pages, folio_index(src));
518 expected_count = 2 + folio_has_private(src);
519 if (!folio_ref_freeze(src, expected_count)) {
520 xas_unlock_irq(&xas);
524 dst->index = src->index;
525 dst->mapping = src->mapping;
529 xas_store(&xas, dst);
531 folio_ref_unfreeze(src, expected_count - 1);
533 xas_unlock_irq(&xas);
535 return MIGRATEPAGE_SUCCESS;
539 * Copy the flags and some other ancillary information
541 void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
545 if (folio_test_error(folio))
546 folio_set_error(newfolio);
547 if (folio_test_referenced(folio))
548 folio_set_referenced(newfolio);
549 if (folio_test_uptodate(folio))
550 folio_mark_uptodate(newfolio);
551 if (folio_test_clear_active(folio)) {
552 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
553 folio_set_active(newfolio);
554 } else if (folio_test_clear_unevictable(folio))
555 folio_set_unevictable(newfolio);
556 if (folio_test_workingset(folio))
557 folio_set_workingset(newfolio);
558 if (folio_test_checked(folio))
559 folio_set_checked(newfolio);
561 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
562 * migration entries. We can still have PG_anon_exclusive set on an
563 * effectively unmapped and unreferenced first sub-pages of an
564 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
566 if (folio_test_mappedtodisk(folio))
567 folio_set_mappedtodisk(newfolio);
569 /* Move dirty on pages not done by folio_migrate_mapping() */
570 if (folio_test_dirty(folio))
571 folio_set_dirty(newfolio);
573 if (folio_test_young(folio))
574 folio_set_young(newfolio);
575 if (folio_test_idle(folio))
576 folio_set_idle(newfolio);
579 * Copy NUMA information to the new page, to prevent over-eager
580 * future migrations of this same page.
582 cpupid = page_cpupid_xchg_last(&folio->page, -1);
584 * For memory tiering mode, when migrate between slow and fast
585 * memory node, reset cpupid, because that is used to record
586 * page access time in slow memory node.
588 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
589 bool f_toptier = node_is_toptier(page_to_nid(&folio->page));
590 bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page));
592 if (f_toptier != t_toptier)
595 page_cpupid_xchg_last(&newfolio->page, cpupid);
597 folio_migrate_ksm(newfolio, folio);
599 * Please do not reorder this without considering how mm/ksm.c's
600 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
602 if (folio_test_swapcache(folio))
603 folio_clear_swapcache(folio);
604 folio_clear_private(folio);
606 /* page->private contains hugetlb specific flags */
607 if (!folio_test_hugetlb(folio))
608 folio->private = NULL;
611 * If any waiters have accumulated on the new page then
614 if (folio_test_writeback(newfolio))
615 folio_end_writeback(newfolio);
618 * PG_readahead shares the same bit with PG_reclaim. The above
619 * end_page_writeback() may clear PG_readahead mistakenly, so set the
622 if (folio_test_readahead(folio))
623 folio_set_readahead(newfolio);
625 folio_copy_owner(newfolio, folio);
627 if (!folio_test_hugetlb(folio))
628 mem_cgroup_migrate(folio, newfolio);
630 EXPORT_SYMBOL(folio_migrate_flags);
632 void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
634 folio_copy(newfolio, folio);
635 folio_migrate_flags(newfolio, folio);
637 EXPORT_SYMBOL(folio_migrate_copy);
639 /************************************************************
640 * Migration functions
641 ***********************************************************/
643 int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
644 struct folio *src, enum migrate_mode mode, int extra_count)
648 BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */
650 rc = folio_migrate_mapping(mapping, dst, src, extra_count);
652 if (rc != MIGRATEPAGE_SUCCESS)
655 if (mode != MIGRATE_SYNC_NO_COPY)
656 folio_migrate_copy(dst, src);
658 folio_migrate_flags(dst, src);
659 return MIGRATEPAGE_SUCCESS;
663 * migrate_folio() - Simple folio migration.
664 * @mapping: The address_space containing the folio.
665 * @dst: The folio to migrate the data to.
666 * @src: The folio containing the current data.
667 * @mode: How to migrate the page.
669 * Common logic to directly migrate a single LRU folio suitable for
670 * folios that do not use PagePrivate/PagePrivate2.
672 * Folios are locked upon entry and exit.
674 int migrate_folio(struct address_space *mapping, struct folio *dst,
675 struct folio *src, enum migrate_mode mode)
677 return migrate_folio_extra(mapping, dst, src, mode, 0);
679 EXPORT_SYMBOL(migrate_folio);
682 /* Returns true if all buffers are successfully locked */
683 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
684 enum migrate_mode mode)
686 struct buffer_head *bh = head;
687 struct buffer_head *failed_bh;
690 if (!trylock_buffer(bh)) {
691 if (mode == MIGRATE_ASYNC)
693 if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh))
698 bh = bh->b_this_page;
699 } while (bh != head);
704 /* We failed to lock the buffer and cannot stall. */
707 while (bh != failed_bh) {
709 bh = bh->b_this_page;
715 static int __buffer_migrate_folio(struct address_space *mapping,
716 struct folio *dst, struct folio *src, enum migrate_mode mode,
719 struct buffer_head *bh, *head;
723 head = folio_buffers(src);
725 return migrate_folio(mapping, dst, src, mode);
727 /* Check whether page does not have extra refs before we do more work */
728 expected_count = folio_expected_refs(mapping, src);
729 if (folio_ref_count(src) != expected_count)
732 if (!buffer_migrate_lock_buffers(head, mode))
737 bool invalidated = false;
741 spin_lock(&mapping->private_lock);
744 if (atomic_read(&bh->b_count)) {
748 bh = bh->b_this_page;
749 } while (bh != head);
755 spin_unlock(&mapping->private_lock);
756 invalidate_bh_lrus();
758 goto recheck_buffers;
762 rc = folio_migrate_mapping(mapping, dst, src, 0);
763 if (rc != MIGRATEPAGE_SUCCESS)
766 folio_attach_private(dst, folio_detach_private(src));
770 set_bh_page(bh, &dst->page, bh_offset(bh));
771 bh = bh->b_this_page;
772 } while (bh != head);
774 if (mode != MIGRATE_SYNC_NO_COPY)
775 folio_migrate_copy(dst, src);
777 folio_migrate_flags(dst, src);
779 rc = MIGRATEPAGE_SUCCESS;
782 spin_unlock(&mapping->private_lock);
786 bh = bh->b_this_page;
787 } while (bh != head);
793 * buffer_migrate_folio() - Migration function for folios with buffers.
794 * @mapping: The address space containing @src.
795 * @dst: The folio to migrate to.
796 * @src: The folio to migrate from.
797 * @mode: How to migrate the folio.
799 * This function can only be used if the underlying filesystem guarantees
800 * that no other references to @src exist. For example attached buffer
801 * heads are accessed only under the folio lock. If your filesystem cannot
802 * provide this guarantee, buffer_migrate_folio_norefs() may be more
805 * Return: 0 on success or a negative errno on failure.
807 int buffer_migrate_folio(struct address_space *mapping,
808 struct folio *dst, struct folio *src, enum migrate_mode mode)
810 return __buffer_migrate_folio(mapping, dst, src, mode, false);
812 EXPORT_SYMBOL(buffer_migrate_folio);
815 * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
816 * @mapping: The address space containing @src.
817 * @dst: The folio to migrate to.
818 * @src: The folio to migrate from.
819 * @mode: How to migrate the folio.
821 * Like buffer_migrate_folio() except that this variant is more careful
822 * and checks that there are also no buffer head references. This function
823 * is the right one for mappings where buffer heads are directly looked
824 * up and referenced (such as block device mappings).
826 * Return: 0 on success or a negative errno on failure.
828 int buffer_migrate_folio_norefs(struct address_space *mapping,
829 struct folio *dst, struct folio *src, enum migrate_mode mode)
831 return __buffer_migrate_folio(mapping, dst, src, mode, true);
833 EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
836 int filemap_migrate_folio(struct address_space *mapping,
837 struct folio *dst, struct folio *src, enum migrate_mode mode)
841 ret = folio_migrate_mapping(mapping, dst, src, 0);
842 if (ret != MIGRATEPAGE_SUCCESS)
845 if (folio_get_private(src))
846 folio_attach_private(dst, folio_detach_private(src));
848 if (mode != MIGRATE_SYNC_NO_COPY)
849 folio_migrate_copy(dst, src);
851 folio_migrate_flags(dst, src);
852 return MIGRATEPAGE_SUCCESS;
854 EXPORT_SYMBOL_GPL(filemap_migrate_folio);
857 * Writeback a folio to clean the dirty state
859 static int writeout(struct address_space *mapping, struct folio *folio)
861 struct writeback_control wbc = {
862 .sync_mode = WB_SYNC_NONE,
865 .range_end = LLONG_MAX,
870 if (!mapping->a_ops->writepage)
871 /* No write method for the address space */
874 if (!folio_clear_dirty_for_io(folio))
875 /* Someone else already triggered a write */
879 * A dirty folio may imply that the underlying filesystem has
880 * the folio on some queue. So the folio must be clean for
881 * migration. Writeout may mean we lose the lock and the
882 * folio state is no longer what we checked for earlier.
883 * At this point we know that the migration attempt cannot
886 remove_migration_ptes(folio, folio, false);
888 rc = mapping->a_ops->writepage(&folio->page, &wbc);
890 if (rc != AOP_WRITEPAGE_ACTIVATE)
891 /* unlocked. Relock */
894 return (rc < 0) ? -EIO : -EAGAIN;
898 * Default handling if a filesystem does not provide a migration function.
900 static int fallback_migrate_folio(struct address_space *mapping,
901 struct folio *dst, struct folio *src, enum migrate_mode mode)
903 if (folio_test_dirty(src)) {
904 /* Only writeback folios in full synchronous migration */
907 case MIGRATE_SYNC_NO_COPY:
912 return writeout(mapping, src);
916 * Buffers may be managed in a filesystem specific way.
917 * We must have no buffers or drop them.
919 if (folio_test_private(src) &&
920 !filemap_release_folio(src, GFP_KERNEL))
921 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
923 return migrate_folio(mapping, dst, src, mode);
927 * Move a page to a newly allocated page
928 * The page is locked and all ptes have been successfully removed.
930 * The new page will have replaced the old page if this function
935 * MIGRATEPAGE_SUCCESS - success
937 static int move_to_new_folio(struct folio *dst, struct folio *src,
938 enum migrate_mode mode)
941 bool is_lru = !__PageMovable(&src->page);
943 VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
944 VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
946 if (likely(is_lru)) {
947 struct address_space *mapping = folio_mapping(src);
950 rc = migrate_folio(mapping, dst, src, mode);
951 else if (mapping->a_ops->migrate_folio)
953 * Most folios have a mapping and most filesystems
954 * provide a migrate_folio callback. Anonymous folios
955 * are part of swap space which also has its own
956 * migrate_folio callback. This is the most common path
957 * for page migration.
959 rc = mapping->a_ops->migrate_folio(mapping, dst, src,
962 rc = fallback_migrate_folio(mapping, dst, src, mode);
964 const struct movable_operations *mops;
967 * In case of non-lru page, it could be released after
968 * isolation step. In that case, we shouldn't try migration.
970 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
971 if (!folio_test_movable(src)) {
972 rc = MIGRATEPAGE_SUCCESS;
973 folio_clear_isolated(src);
977 mops = folio_movable_ops(src);
978 rc = mops->migrate_page(&dst->page, &src->page, mode);
979 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
980 !folio_test_isolated(src));
984 * When successful, old pagecache src->mapping must be cleared before
985 * src is freed; but stats require that PageAnon be left as PageAnon.
987 if (rc == MIGRATEPAGE_SUCCESS) {
988 if (__PageMovable(&src->page)) {
989 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
992 * We clear PG_movable under page_lock so any compactor
993 * cannot try to migrate this page.
995 folio_clear_isolated(src);
999 * Anonymous and movable src->mapping will be cleared by
1000 * free_pages_prepare so don't reset it here for keeping
1001 * the type to work PageAnon, for example.
1003 if (!folio_mapping_flags(src))
1004 src->mapping = NULL;
1006 if (likely(!folio_is_zone_device(dst)))
1007 flush_dcache_folio(dst);
1014 * To record some information during migration, we use some unused
1015 * fields (mapping and private) of struct folio of the newly allocated
1016 * destination folio. This is safe because nobody is using them
1019 union migration_ptr {
1020 struct anon_vma *anon_vma;
1021 struct address_space *mapping;
1023 static void __migrate_folio_record(struct folio *dst,
1024 unsigned long page_was_mapped,
1025 struct anon_vma *anon_vma)
1027 union migration_ptr ptr = { .anon_vma = anon_vma };
1028 dst->mapping = ptr.mapping;
1029 dst->private = (void *)page_was_mapped;
1032 static void __migrate_folio_extract(struct folio *dst,
1033 int *page_was_mappedp,
1034 struct anon_vma **anon_vmap)
1036 union migration_ptr ptr = { .mapping = dst->mapping };
1037 *anon_vmap = ptr.anon_vma;
1038 *page_was_mappedp = (unsigned long)dst->private;
1039 dst->mapping = NULL;
1040 dst->private = NULL;
1043 /* Restore the source folio to the original state upon failure */
1044 static void migrate_folio_undo_src(struct folio *src,
1045 int page_was_mapped,
1046 struct anon_vma *anon_vma,
1048 struct list_head *ret)
1050 if (page_was_mapped)
1051 remove_migration_ptes(src, src, false);
1052 /* Drop an anon_vma reference if we took one */
1054 put_anon_vma(anon_vma);
1058 list_move_tail(&src->lru, ret);
1061 /* Restore the destination folio to the original state upon failure */
1062 static void migrate_folio_undo_dst(struct folio *dst, bool locked,
1063 free_folio_t put_new_folio, unsigned long private)
1068 put_new_folio(dst, private);
1073 /* Cleanup src folio upon migration success */
1074 static void migrate_folio_done(struct folio *src,
1075 enum migrate_reason reason)
1078 * Compaction can migrate also non-LRU pages which are
1079 * not accounted to NR_ISOLATED_*. They can be recognized
1082 if (likely(!__folio_test_movable(src)))
1083 mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1084 folio_is_file_lru(src), -folio_nr_pages(src));
1086 if (reason != MR_MEMORY_FAILURE)
1087 /* We release the page in page_handle_poison. */
1091 /* Obtain the lock on page, remove all ptes. */
1092 static int migrate_folio_unmap(new_folio_t get_new_folio,
1093 free_folio_t put_new_folio, unsigned long private,
1094 struct folio *src, struct folio **dstp, enum migrate_mode mode,
1095 enum migrate_reason reason, struct list_head *ret)
1099 int page_was_mapped = 0;
1100 struct anon_vma *anon_vma = NULL;
1101 bool is_lru = !__PageMovable(&src->page);
1102 bool locked = false;
1103 bool dst_locked = false;
1105 if (folio_ref_count(src) == 1) {
1106 /* Folio was freed from under us. So we are done. */
1107 folio_clear_active(src);
1108 folio_clear_unevictable(src);
1109 /* free_pages_prepare() will clear PG_isolated. */
1110 list_del(&src->lru);
1111 migrate_folio_done(src, reason);
1112 return MIGRATEPAGE_SUCCESS;
1115 dst = get_new_folio(src, private);
1120 dst->private = NULL;
1122 if (!folio_trylock(src)) {
1123 if (mode == MIGRATE_ASYNC)
1127 * It's not safe for direct compaction to call lock_page.
1128 * For example, during page readahead pages are added locked
1129 * to the LRU. Later, when the IO completes the pages are
1130 * marked uptodate and unlocked. However, the queueing
1131 * could be merging multiple pages for one bio (e.g.
1132 * mpage_readahead). If an allocation happens for the
1133 * second or third page, the process can end up locking
1134 * the same page twice and deadlocking. Rather than
1135 * trying to be clever about what pages can be locked,
1136 * avoid the use of lock_page for direct compaction
1139 if (current->flags & PF_MEMALLOC)
1143 * In "light" mode, we can wait for transient locks (eg
1144 * inserting a page into the page table), but it's not
1145 * worth waiting for I/O.
1147 if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src))
1154 if (folio_test_writeback(src)) {
1156 * Only in the case of a full synchronous migration is it
1157 * necessary to wait for PageWriteback. In the async case,
1158 * the retry loop is too short and in the sync-light case,
1159 * the overhead of stalling is too much
1163 case MIGRATE_SYNC_NO_COPY:
1169 folio_wait_writeback(src);
1173 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1174 * we cannot notice that anon_vma is freed while we migrate a page.
1175 * This get_anon_vma() delays freeing anon_vma pointer until the end
1176 * of migration. File cache pages are no problem because of page_lock()
1177 * File Caches may use write_page() or lock_page() in migration, then,
1178 * just care Anon page here.
1180 * Only folio_get_anon_vma() understands the subtleties of
1181 * getting a hold on an anon_vma from outside one of its mms.
1182 * But if we cannot get anon_vma, then we won't need it anyway,
1183 * because that implies that the anon page is no longer mapped
1184 * (and cannot be remapped so long as we hold the page lock).
1186 if (folio_test_anon(src) && !folio_test_ksm(src))
1187 anon_vma = folio_get_anon_vma(src);
1190 * Block others from accessing the new page when we get around to
1191 * establishing additional references. We are usually the only one
1192 * holding a reference to dst at this point. We used to have a BUG
1193 * here if folio_trylock(dst) fails, but would like to allow for
1194 * cases where there might be a race with the previous use of dst.
1195 * This is much like races on refcount of oldpage: just don't BUG().
1197 if (unlikely(!folio_trylock(dst)))
1201 if (unlikely(!is_lru)) {
1202 __migrate_folio_record(dst, page_was_mapped, anon_vma);
1203 return MIGRATEPAGE_UNMAP;
1207 * Corner case handling:
1208 * 1. When a new swap-cache page is read into, it is added to the LRU
1209 * and treated as swapcache but it has no rmap yet.
1210 * Calling try_to_unmap() against a src->mapping==NULL page will
1211 * trigger a BUG. So handle it here.
1212 * 2. An orphaned page (see truncate_cleanup_page) might have
1213 * fs-private metadata. The page can be picked up due to memory
1214 * offlining. Everywhere else except page reclaim, the page is
1215 * invisible to the vm, so the page can not be migrated. So try to
1216 * free the metadata, so the page can be freed.
1218 if (!src->mapping) {
1219 if (folio_test_private(src)) {
1220 try_to_free_buffers(src);
1223 } else if (folio_mapped(src)) {
1224 /* Establish migration ptes */
1225 VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1226 !folio_test_ksm(src) && !anon_vma, src);
1227 try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
1228 page_was_mapped = 1;
1231 if (!folio_mapped(src)) {
1232 __migrate_folio_record(dst, page_was_mapped, anon_vma);
1233 return MIGRATEPAGE_UNMAP;
1238 * A folio that has not been unmapped will be restored to
1239 * right list unless we want to retry.
1244 migrate_folio_undo_src(src, page_was_mapped, anon_vma, locked, ret);
1245 migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
1250 /* Migrate the folio to the newly allocated folio in dst. */
1251 static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
1252 struct folio *src, struct folio *dst,
1253 enum migrate_mode mode, enum migrate_reason reason,
1254 struct list_head *ret)
1257 int page_was_mapped = 0;
1258 struct anon_vma *anon_vma = NULL;
1259 bool is_lru = !__PageMovable(&src->page);
1260 struct list_head *prev;
1262 __migrate_folio_extract(dst, &page_was_mapped, &anon_vma);
1263 prev = dst->lru.prev;
1264 list_del(&dst->lru);
1266 rc = move_to_new_folio(dst, src, mode);
1270 if (unlikely(!is_lru))
1271 goto out_unlock_both;
1274 * When successful, push dst to LRU immediately: so that if it
1275 * turns out to be an mlocked page, remove_migration_ptes() will
1276 * automatically build up the correct dst->mlock_count for it.
1278 * We would like to do something similar for the old page, when
1279 * unsuccessful, and other cases when a page has been temporarily
1280 * isolated from the unevictable LRU: but this case is the easiest.
1283 if (page_was_mapped)
1286 if (page_was_mapped)
1287 remove_migration_ptes(src, dst, false);
1291 set_page_owner_migrate_reason(&dst->page, reason);
1293 * If migration is successful, decrease refcount of dst,
1294 * which will not free the page because new page owner increased
1300 * A folio that has been migrated has all references removed
1301 * and will be freed.
1303 list_del(&src->lru);
1304 /* Drop an anon_vma reference if we took one */
1306 put_anon_vma(anon_vma);
1308 migrate_folio_done(src, reason);
1313 * A folio that has not been migrated will be restored to
1314 * right list unless we want to retry.
1316 if (rc == -EAGAIN) {
1317 list_add(&dst->lru, prev);
1318 __migrate_folio_record(dst, page_was_mapped, anon_vma);
1322 migrate_folio_undo_src(src, page_was_mapped, anon_vma, true, ret);
1323 migrate_folio_undo_dst(dst, true, put_new_folio, private);
1329 * Counterpart of unmap_and_move_page() for hugepage migration.
1331 * This function doesn't wait the completion of hugepage I/O
1332 * because there is no race between I/O and migration for hugepage.
1333 * Note that currently hugepage I/O occurs only in direct I/O
1334 * where no lock is held and PG_writeback is irrelevant,
1335 * and writeback status of all subpages are counted in the reference
1336 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1337 * under direct I/O, the reference of the head page is 512 and a bit more.)
1338 * This means that when we try to migrate hugepage whose subpages are
1339 * doing direct I/O, some references remain after try_to_unmap() and
1340 * hugepage migration fails without data corruption.
1342 * There is also no race when direct I/O is issued on the page under migration,
1343 * because then pte is replaced with migration swap entry and direct I/O code
1344 * will wait in the page fault for migration to complete.
1346 static int unmap_and_move_huge_page(new_folio_t get_new_folio,
1347 free_folio_t put_new_folio, unsigned long private,
1348 struct folio *src, int force, enum migrate_mode mode,
1349 int reason, struct list_head *ret)
1353 int page_was_mapped = 0;
1354 struct anon_vma *anon_vma = NULL;
1355 struct address_space *mapping = NULL;
1357 if (folio_ref_count(src) == 1) {
1358 /* page was freed from under us. So we are done. */
1359 folio_putback_active_hugetlb(src);
1360 return MIGRATEPAGE_SUCCESS;
1363 dst = get_new_folio(src, private);
1367 if (!folio_trylock(src)) {
1372 case MIGRATE_SYNC_NO_COPY:
1381 * Check for pages which are in the process of being freed. Without
1382 * folio_mapping() set, hugetlbfs specific move page routine will not
1383 * be called and we could leak usage counts for subpools.
1385 if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
1390 if (folio_test_anon(src))
1391 anon_vma = folio_get_anon_vma(src);
1393 if (unlikely(!folio_trylock(dst)))
1396 if (folio_mapped(src)) {
1397 enum ttu_flags ttu = 0;
1399 if (!folio_test_anon(src)) {
1401 * In shared mappings, try_to_unmap could potentially
1402 * call huge_pmd_unshare. Because of this, take
1403 * semaphore in write mode here and set TTU_RMAP_LOCKED
1404 * to let lower levels know we have taken the lock.
1406 mapping = hugetlb_page_mapping_lock_write(&src->page);
1407 if (unlikely(!mapping))
1408 goto unlock_put_anon;
1410 ttu = TTU_RMAP_LOCKED;
1413 try_to_migrate(src, ttu);
1414 page_was_mapped = 1;
1416 if (ttu & TTU_RMAP_LOCKED)
1417 i_mmap_unlock_write(mapping);
1420 if (!folio_mapped(src))
1421 rc = move_to_new_folio(dst, src, mode);
1423 if (page_was_mapped)
1424 remove_migration_ptes(src,
1425 rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
1432 put_anon_vma(anon_vma);
1434 if (rc == MIGRATEPAGE_SUCCESS) {
1435 move_hugetlb_state(src, dst, reason);
1436 put_new_folio = NULL;
1442 if (rc == MIGRATEPAGE_SUCCESS)
1443 folio_putback_active_hugetlb(src);
1444 else if (rc != -EAGAIN)
1445 list_move_tail(&src->lru, ret);
1448 * If migration was not successful and there's a freeing callback, use
1449 * it. Otherwise, put_page() will drop the reference grabbed during
1453 put_new_folio(dst, private);
1455 folio_putback_active_hugetlb(dst);
1460 static inline int try_split_folio(struct folio *folio, struct list_head *split_folios)
1465 rc = split_folio_to_list(folio, split_folios);
1466 folio_unlock(folio);
1468 list_move_tail(&folio->lru, split_folios);
1473 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1474 #define NR_MAX_BATCHED_MIGRATION HPAGE_PMD_NR
1476 #define NR_MAX_BATCHED_MIGRATION 512
1478 #define NR_MAX_MIGRATE_PAGES_RETRY 10
1479 #define NR_MAX_MIGRATE_ASYNC_RETRY 3
1480 #define NR_MAX_MIGRATE_SYNC_RETRY \
1481 (NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
1483 struct migrate_pages_stats {
1484 int nr_succeeded; /* Normal and large folios migrated successfully, in
1485 units of base pages */
1486 int nr_failed_pages; /* Normal and large folios failed to be migrated, in
1487 units of base pages. Untried folios aren't counted */
1488 int nr_thp_succeeded; /* THP migrated successfully */
1489 int nr_thp_failed; /* THP failed to be migrated */
1490 int nr_thp_split; /* THP split before migrating */
1494 * Returns the number of hugetlb folios that were not migrated, or an error code
1495 * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
1496 * any more because the list has become empty or no retryable hugetlb folios
1497 * exist any more. It is caller's responsibility to call putback_movable_pages()
1500 static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
1501 free_folio_t put_new_folio, unsigned long private,
1502 enum migrate_mode mode, int reason,
1503 struct migrate_pages_stats *stats,
1504 struct list_head *ret_folios)
1508 int nr_retry_pages = 0;
1510 struct folio *folio, *folio2;
1513 for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
1517 list_for_each_entry_safe(folio, folio2, from, lru) {
1518 if (!folio_test_hugetlb(folio))
1521 nr_pages = folio_nr_pages(folio);
1526 * Migratability of hugepages depends on architectures and
1527 * their size. This check is necessary because some callers
1528 * of hugepage migration like soft offline and memory
1529 * hotremove don't walk through page tables or check whether
1530 * the hugepage is pmd-based or not before kicking migration.
1532 if (!hugepage_migration_supported(folio_hstate(folio))) {
1534 stats->nr_failed_pages += nr_pages;
1535 list_move_tail(&folio->lru, ret_folios);
1539 rc = unmap_and_move_huge_page(get_new_folio,
1540 put_new_folio, private,
1541 folio, pass > 2, mode,
1542 reason, ret_folios);
1545 * Success: hugetlb folio will be put back
1546 * -EAGAIN: stay on the from list
1547 * -ENOMEM: stay on the from list
1548 * Other errno: put on ret_folios list
1553 * When memory is low, don't bother to try to migrate
1554 * other folios, just exit.
1556 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1560 nr_retry_pages += nr_pages;
1562 case MIGRATEPAGE_SUCCESS:
1563 stats->nr_succeeded += nr_pages;
1567 * Permanent failure (-EBUSY, etc.):
1568 * unlike -EAGAIN case, the failed folio is
1569 * removed from migration folio list and not
1570 * retried in the next outer loop.
1573 stats->nr_failed_pages += nr_pages;
1579 * nr_failed is number of hugetlb folios failed to be migrated. After
1580 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
1584 stats->nr_failed_pages += nr_retry_pages;
1590 * migrate_pages_batch() first unmaps folios in the from list as many as
1591 * possible, then move the unmapped folios.
1593 * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
1594 * lock or bit when we have locked more than one folio. Which may cause
1595 * deadlock (e.g., for loop device). So, if mode != MIGRATE_ASYNC, the
1596 * length of the from list must be <= 1.
1598 static int migrate_pages_batch(struct list_head *from,
1599 new_folio_t get_new_folio, free_folio_t put_new_folio,
1600 unsigned long private, enum migrate_mode mode, int reason,
1601 struct list_head *ret_folios, struct list_head *split_folios,
1602 struct migrate_pages_stats *stats, int nr_pass)
1607 int nr_retry_pages = 0;
1609 bool is_thp = false;
1610 struct folio *folio, *folio2, *dst = NULL, *dst2;
1611 int rc, rc_saved = 0, nr_pages;
1612 LIST_HEAD(unmap_folios);
1613 LIST_HEAD(dst_folios);
1614 bool nosplit = (reason == MR_NUMA_MISPLACED);
1616 VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
1617 !list_empty(from) && !list_is_singular(from));
1619 for (pass = 0; pass < nr_pass && retry; pass++) {
1624 list_for_each_entry_safe(folio, folio2, from, lru) {
1625 is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
1626 nr_pages = folio_nr_pages(folio);
1631 * Large folio migration might be unsupported or
1632 * the allocation might be failed so we should retry
1633 * on the same folio with the large folio split
1636 * Split folios are put in split_folios, and
1637 * we will migrate them after the rest of the
1638 * list is processed.
1640 if (!thp_migration_supported() && is_thp) {
1642 stats->nr_thp_failed++;
1643 if (!try_split_folio(folio, split_folios)) {
1644 stats->nr_thp_split++;
1647 stats->nr_failed_pages += nr_pages;
1648 list_move_tail(&folio->lru, ret_folios);
1652 rc = migrate_folio_unmap(get_new_folio, put_new_folio,
1653 private, folio, &dst, mode, reason,
1657 * Success: folio will be freed
1658 * Unmap: folio will be put on unmap_folios list,
1659 * dst folio put on dst_folios list
1660 * -EAGAIN: stay on the from list
1661 * -ENOMEM: stay on the from list
1662 * Other errno: put on ret_folios list
1667 * When memory is low, don't bother to try to migrate
1668 * other folios, move unmapped folios, then exit.
1671 stats->nr_thp_failed += is_thp;
1672 /* Large folio NUMA faulting doesn't split to retry. */
1673 if (folio_test_large(folio) && !nosplit) {
1674 int ret = try_split_folio(folio, split_folios);
1677 stats->nr_thp_split += is_thp;
1679 } else if (reason == MR_LONGTERM_PIN &&
1682 * Try again to split large folio to
1683 * mitigate the failure of longterm pinning.
1686 thp_retry += is_thp;
1687 nr_retry_pages += nr_pages;
1688 /* Undo duplicated failure counting. */
1690 stats->nr_thp_failed -= is_thp;
1695 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1696 /* nr_failed isn't updated for not used */
1697 stats->nr_thp_failed += thp_retry;
1699 if (list_empty(&unmap_folios))
1705 thp_retry += is_thp;
1706 nr_retry_pages += nr_pages;
1708 case MIGRATEPAGE_SUCCESS:
1709 stats->nr_succeeded += nr_pages;
1710 stats->nr_thp_succeeded += is_thp;
1712 case MIGRATEPAGE_UNMAP:
1713 list_move_tail(&folio->lru, &unmap_folios);
1714 list_add_tail(&dst->lru, &dst_folios);
1718 * Permanent failure (-EBUSY, etc.):
1719 * unlike -EAGAIN case, the failed folio is
1720 * removed from migration folio list and not
1721 * retried in the next outer loop.
1724 stats->nr_thp_failed += is_thp;
1725 stats->nr_failed_pages += nr_pages;
1731 stats->nr_thp_failed += thp_retry;
1732 stats->nr_failed_pages += nr_retry_pages;
1734 /* Flush TLBs for all unmapped folios */
1735 try_to_unmap_flush();
1738 for (pass = 0; pass < nr_pass && retry; pass++) {
1743 dst = list_first_entry(&dst_folios, struct folio, lru);
1744 dst2 = list_next_entry(dst, lru);
1745 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1746 is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
1747 nr_pages = folio_nr_pages(folio);
1751 rc = migrate_folio_move(put_new_folio, private,
1753 reason, ret_folios);
1756 * Success: folio will be freed
1757 * -EAGAIN: stay on the unmap_folios list
1758 * Other errno: put on ret_folios list
1763 thp_retry += is_thp;
1764 nr_retry_pages += nr_pages;
1766 case MIGRATEPAGE_SUCCESS:
1767 stats->nr_succeeded += nr_pages;
1768 stats->nr_thp_succeeded += is_thp;
1772 stats->nr_thp_failed += is_thp;
1773 stats->nr_failed_pages += nr_pages;
1777 dst2 = list_next_entry(dst, lru);
1781 stats->nr_thp_failed += thp_retry;
1782 stats->nr_failed_pages += nr_retry_pages;
1784 rc = rc_saved ? : nr_failed;
1786 /* Cleanup remaining folios */
1787 dst = list_first_entry(&dst_folios, struct folio, lru);
1788 dst2 = list_next_entry(dst, lru);
1789 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1790 int page_was_mapped = 0;
1791 struct anon_vma *anon_vma = NULL;
1793 __migrate_folio_extract(dst, &page_was_mapped, &anon_vma);
1794 migrate_folio_undo_src(folio, page_was_mapped, anon_vma,
1796 list_del(&dst->lru);
1797 migrate_folio_undo_dst(dst, true, put_new_folio, private);
1799 dst2 = list_next_entry(dst, lru);
1805 static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
1806 free_folio_t put_new_folio, unsigned long private,
1807 enum migrate_mode mode, int reason,
1808 struct list_head *ret_folios, struct list_head *split_folios,
1809 struct migrate_pages_stats *stats)
1811 int rc, nr_failed = 0;
1813 struct migrate_pages_stats astats;
1815 memset(&astats, 0, sizeof(astats));
1816 /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
1817 rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC,
1818 reason, &folios, split_folios, &astats,
1819 NR_MAX_MIGRATE_ASYNC_RETRY);
1820 stats->nr_succeeded += astats.nr_succeeded;
1821 stats->nr_thp_succeeded += astats.nr_thp_succeeded;
1822 stats->nr_thp_split += astats.nr_thp_split;
1824 stats->nr_failed_pages += astats.nr_failed_pages;
1825 stats->nr_thp_failed += astats.nr_thp_failed;
1826 list_splice_tail(&folios, ret_folios);
1829 stats->nr_thp_failed += astats.nr_thp_split;
1830 nr_failed += astats.nr_thp_split;
1832 * Fall back to migrate all failed folios one by one synchronously. All
1833 * failed folios except split THPs will be retried, so their failure
1836 list_splice_tail_init(&folios, from);
1837 while (!list_empty(from)) {
1838 list_move(from->next, &folios);
1839 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
1840 private, mode, reason, ret_folios,
1841 split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
1842 list_splice_tail_init(&folios, ret_folios);
1852 * migrate_pages - migrate the folios specified in a list, to the free folios
1853 * supplied as the target for the page migration
1855 * @from: The list of folios to be migrated.
1856 * @get_new_folio: The function used to allocate free folios to be used
1857 * as the target of the folio migration.
1858 * @put_new_folio: The function used to free target folios if migration
1859 * fails, or NULL if no special handling is necessary.
1860 * @private: Private data to be passed on to get_new_folio()
1861 * @mode: The migration mode that specifies the constraints for
1862 * folio migration, if any.
1863 * @reason: The reason for folio migration.
1864 * @ret_succeeded: Set to the number of folios migrated successfully if
1865 * the caller passes a non-NULL pointer.
1867 * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
1868 * are movable any more because the list has become empty or no retryable folios
1869 * exist any more. It is caller's responsibility to call putback_movable_pages()
1872 * Returns the number of {normal folio, large folio, hugetlb} that were not
1873 * migrated, or an error code. The number of large folio splits will be
1874 * considered as the number of non-migrated large folio, no matter how many
1875 * split folios of the large folio are migrated successfully.
1877 int migrate_pages(struct list_head *from, new_folio_t get_new_folio,
1878 free_folio_t put_new_folio, unsigned long private,
1879 enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
1883 struct folio *folio, *folio2;
1885 LIST_HEAD(ret_folios);
1886 LIST_HEAD(split_folios);
1887 struct migrate_pages_stats stats;
1889 trace_mm_migrate_pages_start(mode, reason);
1891 memset(&stats, 0, sizeof(stats));
1893 rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private,
1894 mode, reason, &stats, &ret_folios);
1900 list_for_each_entry_safe(folio, folio2, from, lru) {
1901 /* Retried hugetlb folios will be kept in list */
1902 if (folio_test_hugetlb(folio)) {
1903 list_move_tail(&folio->lru, &ret_folios);
1907 nr_pages += folio_nr_pages(folio);
1908 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
1911 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
1912 list_cut_before(&folios, from, &folio2->lru);
1914 list_splice_init(from, &folios);
1915 if (mode == MIGRATE_ASYNC)
1916 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
1917 private, mode, reason, &ret_folios,
1918 &split_folios, &stats,
1919 NR_MAX_MIGRATE_PAGES_RETRY);
1921 rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio,
1922 private, mode, reason, &ret_folios,
1923 &split_folios, &stats);
1924 list_splice_tail_init(&folios, &ret_folios);
1927 list_splice_tail(&split_folios, &ret_folios);
1930 if (!list_empty(&split_folios)) {
1932 * Failure isn't counted since all split folios of a large folio
1933 * is counted as 1 failure already. And, we only try to migrate
1934 * with minimal effort, force MIGRATE_ASYNC mode and retry once.
1936 migrate_pages_batch(&split_folios, get_new_folio,
1937 put_new_folio, private, MIGRATE_ASYNC, reason,
1938 &ret_folios, NULL, &stats, 1);
1939 list_splice_tail_init(&split_folios, &ret_folios);
1942 if (!list_empty(from))
1946 * Put the permanent failure folio back to migration list, they
1947 * will be put back to the right list by the caller.
1949 list_splice(&ret_folios, from);
1952 * Return 0 in case all split folios of fail-to-migrate large folios
1953 * are migrated successfully.
1955 if (list_empty(from))
1958 count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
1959 count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
1960 count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
1961 count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
1962 count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
1963 trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
1964 stats.nr_thp_succeeded, stats.nr_thp_failed,
1965 stats.nr_thp_split, mode, reason);
1968 *ret_succeeded = stats.nr_succeeded;
1973 struct folio *alloc_migration_target(struct folio *src, unsigned long private)
1975 struct migration_target_control *mtc;
1977 unsigned int order = 0;
1981 mtc = (struct migration_target_control *)private;
1982 gfp_mask = mtc->gfp_mask;
1984 if (nid == NUMA_NO_NODE)
1985 nid = folio_nid(src);
1987 if (folio_test_hugetlb(src)) {
1988 struct hstate *h = folio_hstate(src);
1990 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
1991 return alloc_hugetlb_folio_nodemask(h, nid,
1992 mtc->nmask, gfp_mask);
1995 if (folio_test_large(src)) {
1997 * clear __GFP_RECLAIM to make the migration callback
1998 * consistent with regular THP allocations.
2000 gfp_mask &= ~__GFP_RECLAIM;
2001 gfp_mask |= GFP_TRANSHUGE;
2002 order = folio_order(src);
2004 zidx = zone_idx(folio_zone(src));
2005 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
2006 gfp_mask |= __GFP_HIGHMEM;
2008 return __folio_alloc(gfp_mask, order, nid, mtc->nmask);
2013 static int store_status(int __user *status, int start, int value, int nr)
2016 if (put_user(value, status + start))
2024 static int do_move_pages_to_node(struct mm_struct *mm,
2025 struct list_head *pagelist, int node)
2028 struct migration_target_control mtc = {
2030 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
2033 err = migrate_pages(pagelist, alloc_migration_target, NULL,
2034 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
2036 putback_movable_pages(pagelist);
2041 * Resolves the given address to a struct page, isolates it from the LRU and
2042 * puts it to the given pagelist.
2044 * errno - if the page cannot be found/isolated
2045 * 0 - when it doesn't have to be migrated because it is already on the
2047 * 1 - when it has been queued
2049 static int add_page_for_migration(struct mm_struct *mm, const void __user *p,
2050 int node, struct list_head *pagelist, bool migrate_all)
2052 struct vm_area_struct *vma;
2059 addr = (unsigned long)untagged_addr_remote(mm, p);
2062 vma = vma_lookup(mm, addr);
2063 if (!vma || !vma_migratable(vma))
2066 /* FOLL_DUMP to ignore special (like zero) pages */
2067 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
2069 err = PTR_ERR(page);
2077 if (is_zone_device_page(page))
2081 if (page_to_nid(page) == node)
2085 if (page_mapcount(page) > 1 && !migrate_all)
2088 if (PageHuge(page)) {
2089 if (PageHead(page)) {
2090 isolated = isolate_hugetlb(page_folio(page), pagelist);
2091 err = isolated ? 1 : -EBUSY;
2096 head = compound_head(page);
2097 isolated = isolate_lru_page(head);
2104 list_add_tail(&head->lru, pagelist);
2105 mod_node_page_state(page_pgdat(head),
2106 NR_ISOLATED_ANON + page_is_file_lru(head),
2107 thp_nr_pages(head));
2111 * Either remove the duplicate refcount from
2112 * isolate_lru_page() or drop the page ref if it was
2117 mmap_read_unlock(mm);
2121 static int move_pages_and_store_status(struct mm_struct *mm, int node,
2122 struct list_head *pagelist, int __user *status,
2123 int start, int i, unsigned long nr_pages)
2127 if (list_empty(pagelist))
2130 err = do_move_pages_to_node(mm, pagelist, node);
2133 * Positive err means the number of failed
2134 * pages to migrate. Since we are going to
2135 * abort and return the number of non-migrated
2136 * pages, so need to include the rest of the
2137 * nr_pages that have not been attempted as
2141 err += nr_pages - i;
2144 return store_status(status, start, node, i - start);
2148 * Migrate an array of page address onto an array of nodes and fill
2149 * the corresponding array of status.
2151 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
2152 unsigned long nr_pages,
2153 const void __user * __user *pages,
2154 const int __user *nodes,
2155 int __user *status, int flags)
2157 int current_node = NUMA_NO_NODE;
2158 LIST_HEAD(pagelist);
2162 lru_cache_disable();
2164 for (i = start = 0; i < nr_pages; i++) {
2165 const void __user *p;
2169 if (get_user(p, pages + i))
2171 if (get_user(node, nodes + i))
2175 if (node < 0 || node >= MAX_NUMNODES)
2177 if (!node_state(node, N_MEMORY))
2181 if (!node_isset(node, task_nodes))
2184 if (current_node == NUMA_NO_NODE) {
2185 current_node = node;
2187 } else if (node != current_node) {
2188 err = move_pages_and_store_status(mm, current_node,
2189 &pagelist, status, start, i, nr_pages);
2193 current_node = node;
2197 * Errors in the page lookup or isolation are not fatal and we simply
2198 * report them via status
2200 err = add_page_for_migration(mm, p, current_node, &pagelist,
2201 flags & MPOL_MF_MOVE_ALL);
2204 /* The page is successfully queued for migration */
2209 * The move_pages() man page does not have an -EEXIST choice, so
2210 * use -EFAULT instead.
2216 * If the page is already on the target node (!err), store the
2217 * node, otherwise, store the err.
2219 err = store_status(status, i, err ? : current_node, 1);
2223 err = move_pages_and_store_status(mm, current_node, &pagelist,
2224 status, start, i, nr_pages);
2226 /* We have accounted for page i */
2231 current_node = NUMA_NO_NODE;
2234 /* Make sure we do not overwrite the existing error */
2235 err1 = move_pages_and_store_status(mm, current_node, &pagelist,
2236 status, start, i, nr_pages);
2245 * Determine the nodes of an array of pages and store it in an array of status.
2247 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
2248 const void __user **pages, int *status)
2254 for (i = 0; i < nr_pages; i++) {
2255 unsigned long addr = (unsigned long)(*pages);
2256 struct vm_area_struct *vma;
2260 vma = vma_lookup(mm, addr);
2264 /* FOLL_DUMP to ignore special (like zero) pages */
2265 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
2267 err = PTR_ERR(page);
2275 if (!is_zone_device_page(page))
2276 err = page_to_nid(page);
2286 mmap_read_unlock(mm);
2289 static int get_compat_pages_array(const void __user *chunk_pages[],
2290 const void __user * __user *pages,
2291 unsigned long chunk_nr)
2293 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
2297 for (i = 0; i < chunk_nr; i++) {
2298 if (get_user(p, pages32 + i))
2300 chunk_pages[i] = compat_ptr(p);
2307 * Determine the nodes of a user array of pages and store it in
2308 * a user array of status.
2310 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
2311 const void __user * __user *pages,
2314 #define DO_PAGES_STAT_CHUNK_NR 16UL
2315 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
2316 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
2319 unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
2321 if (in_compat_syscall()) {
2322 if (get_compat_pages_array(chunk_pages, pages,
2326 if (copy_from_user(chunk_pages, pages,
2327 chunk_nr * sizeof(*chunk_pages)))
2331 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2333 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
2338 nr_pages -= chunk_nr;
2340 return nr_pages ? -EFAULT : 0;
2343 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
2345 struct task_struct *task;
2346 struct mm_struct *mm;
2349 * There is no need to check if current process has the right to modify
2350 * the specified process when they are same.
2354 *mem_nodes = cpuset_mems_allowed(current);
2358 /* Find the mm_struct */
2360 task = find_task_by_vpid(pid);
2363 return ERR_PTR(-ESRCH);
2365 get_task_struct(task);
2368 * Check if this process has the right to modify the specified
2369 * process. Use the regular "ptrace_may_access()" checks.
2371 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
2373 mm = ERR_PTR(-EPERM);
2378 mm = ERR_PTR(security_task_movememory(task));
2381 *mem_nodes = cpuset_mems_allowed(task);
2382 mm = get_task_mm(task);
2384 put_task_struct(task);
2386 mm = ERR_PTR(-EINVAL);
2391 * Move a list of pages in the address space of the currently executing
2394 static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2395 const void __user * __user *pages,
2396 const int __user *nodes,
2397 int __user *status, int flags)
2399 struct mm_struct *mm;
2401 nodemask_t task_nodes;
2404 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
2407 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2410 mm = find_mm_struct(pid, &task_nodes);
2415 err = do_pages_move(mm, task_nodes, nr_pages, pages,
2416 nodes, status, flags);
2418 err = do_pages_stat(mm, nr_pages, pages, status);
2424 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2425 const void __user * __user *, pages,
2426 const int __user *, nodes,
2427 int __user *, status, int, flags)
2429 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2432 #ifdef CONFIG_NUMA_BALANCING
2434 * Returns true if this is a safe migration target node for misplaced NUMA
2435 * pages. Currently it only checks the watermarks which is crude.
2437 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
2438 unsigned long nr_migrate_pages)
2442 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2443 struct zone *zone = pgdat->node_zones + z;
2445 if (!managed_zone(zone))
2448 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
2449 if (!zone_watermark_ok(zone, 0,
2450 high_wmark_pages(zone) +
2459 static struct folio *alloc_misplaced_dst_folio(struct folio *src,
2462 int nid = (int) data;
2463 int order = folio_order(src);
2464 gfp_t gfp = __GFP_THISNODE;
2467 gfp |= GFP_TRANSHUGE_LIGHT;
2469 gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2471 gfp &= ~__GFP_RECLAIM;
2473 return __folio_alloc_node(gfp, order, nid);
2476 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
2478 int nr_pages = thp_nr_pages(page);
2479 int order = compound_order(page);
2481 VM_BUG_ON_PAGE(order && !PageTransHuge(page), page);
2483 /* Do not migrate THP mapped by multiple processes */
2484 if (PageTransHuge(page) && total_mapcount(page) > 1)
2487 /* Avoid migrating to a node that is nearly full */
2488 if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2491 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2493 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2494 if (managed_zone(pgdat->node_zones + z))
2497 wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE);
2501 if (!isolate_lru_page(page))
2504 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page),
2508 * Isolating the page has taken another reference, so the
2509 * caller's reference can be safely dropped without the page
2510 * disappearing underneath us during migration.
2517 * Attempt to migrate a misplaced page to the specified destination
2518 * node. Caller is expected to have an elevated reference count on
2519 * the page that will be dropped by this function before returning.
2521 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
2524 pg_data_t *pgdat = NODE_DATA(node);
2527 unsigned int nr_succeeded;
2528 LIST_HEAD(migratepages);
2529 int nr_pages = thp_nr_pages(page);
2532 * Don't migrate file pages that are mapped in multiple processes
2533 * with execute permissions as they are probably shared libraries.
2535 if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
2536 (vma->vm_flags & VM_EXEC))
2540 * Also do not migrate dirty pages as not all filesystems can move
2541 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
2543 if (page_is_file_lru(page) && PageDirty(page))
2546 isolated = numamigrate_isolate_page(pgdat, page);
2550 list_add(&page->lru, &migratepages);
2551 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
2552 NULL, node, MIGRATE_ASYNC,
2553 MR_NUMA_MISPLACED, &nr_succeeded);
2555 if (!list_empty(&migratepages)) {
2556 list_del(&page->lru);
2557 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
2558 page_is_file_lru(page), -nr_pages);
2559 putback_lru_page(page);
2564 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2565 if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node))
2566 mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
2569 BUG_ON(!list_empty(&migratepages));
2576 #endif /* CONFIG_NUMA_BALANCING */
2577 #endif /* CONFIG_NUMA */