2 * Memory Migration functionality - linux/mm/migration.c
4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 * Page migration was first developed in the context of the memory hotplug
7 * project. The main authors of the migration code are:
9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10 * Hirokazu Takahashi <taka@valinux.co.jp>
11 * Dave Hansen <haveblue@us.ibm.com>
15 #include <linux/migrate.h>
16 #include <linux/export.h>
17 #include <linux/swap.h>
18 #include <linux/swapops.h>
19 #include <linux/pagemap.h>
20 #include <linux/buffer_head.h>
21 #include <linux/mm_inline.h>
22 #include <linux/nsproxy.h>
23 #include <linux/pagevec.h>
24 #include <linux/ksm.h>
25 #include <linux/rmap.h>
26 #include <linux/topology.h>
27 #include <linux/cpu.h>
28 #include <linux/cpuset.h>
29 #include <linux/writeback.h>
30 #include <linux/mempolicy.h>
31 #include <linux/vmalloc.h>
32 #include <linux/security.h>
33 #include <linux/memcontrol.h>
34 #include <linux/syscalls.h>
35 #include <linux/hugetlb.h>
36 #include <linux/hugetlb_cgroup.h>
37 #include <linux/gfp.h>
38 #include <linux/balloon_compaction.h>
40 #include <asm/tlbflush.h>
42 #define CREATE_TRACE_POINTS
43 #include <trace/events/migrate.h>
48 * migrate_prep() needs to be called before we start compiling a list of pages
49 * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
50 * undesirable, use migrate_prep_local()
52 int migrate_prep(void)
55 * Clear the LRU lists so pages can be isolated.
56 * Note that pages may be moved off the LRU after we have
57 * drained them. Those pages will fail to migrate like other
58 * pages that may be busy.
65 /* Do the necessary work of migrate_prep but not if it involves other CPUs */
66 int migrate_prep_local(void)
74 * Add isolated pages on the list back to the LRU under page lock
75 * to avoid leaking evictable pages back onto unevictable list.
77 void putback_lru_pages(struct list_head *l)
82 list_for_each_entry_safe(page, page2, l, lru) {
84 dec_zone_page_state(page, NR_ISOLATED_ANON +
85 page_is_file_cache(page));
86 putback_lru_page(page);
91 * Put previously isolated pages back onto the appropriate lists
92 * from where they were once taken off for compaction/migration.
94 * This function shall be used instead of putback_lru_pages(),
95 * whenever the isolated pageset has been built by isolate_migratepages_range()
97 void putback_movable_pages(struct list_head *l)
102 list_for_each_entry_safe(page, page2, l, lru) {
103 if (unlikely(PageHuge(page))) {
104 putback_active_hugepage(page);
107 list_del(&page->lru);
108 dec_zone_page_state(page, NR_ISOLATED_ANON +
109 page_is_file_cache(page));
110 if (unlikely(isolated_balloon_page(page)))
111 balloon_page_putback(page);
113 putback_lru_page(page);
118 * Restore a potential migration pte to a working pte entry
120 static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
121 unsigned long addr, void *old)
123 struct mm_struct *mm = vma->vm_mm;
129 if (unlikely(PageHuge(new))) {
130 ptep = huge_pte_offset(mm, addr);
133 ptl = &mm->page_table_lock;
135 pmd = mm_find_pmd(mm, addr);
138 if (pmd_trans_huge(*pmd))
141 ptep = pte_offset_map(pmd, addr);
144 * Peek to check is_swap_pte() before taking ptlock? No, we
145 * can race mremap's move_ptes(), which skips anon_vma lock.
148 ptl = pte_lockptr(mm, pmd);
153 if (!is_swap_pte(pte))
156 entry = pte_to_swp_entry(pte);
158 if (!is_migration_entry(entry) ||
159 migration_entry_to_page(entry) != old)
163 pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
164 if (pte_swp_soft_dirty(*ptep))
165 pte = pte_mksoft_dirty(pte);
166 if (is_write_migration_entry(entry))
167 pte = pte_mkwrite(pte);
168 #ifdef CONFIG_HUGETLB_PAGE
170 pte = pte_mkhuge(pte);
171 pte = arch_make_huge_pte(pte, vma, new, 0);
174 flush_dcache_page(new);
175 set_pte_at(mm, addr, ptep, pte);
179 hugepage_add_anon_rmap(new, vma, addr);
182 } else if (PageAnon(new))
183 page_add_anon_rmap(new, vma, addr);
185 page_add_file_rmap(new);
187 /* No need to invalidate - it was non-present before */
188 update_mmu_cache(vma, addr, ptep);
190 pte_unmap_unlock(ptep, ptl);
196 * Get rid of all migration entries and replace them by
197 * references to the indicated page.
199 static void remove_migration_ptes(struct page *old, struct page *new)
201 rmap_walk(new, remove_migration_pte, old);
205 * Something used the pte of a page under migration. We need to
206 * get to the page and wait until migration is finished.
207 * When we return from this function the fault will be retried.
209 static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
218 if (!is_swap_pte(pte))
221 entry = pte_to_swp_entry(pte);
222 if (!is_migration_entry(entry))
225 page = migration_entry_to_page(entry);
228 * Once radix-tree replacement of page migration started, page_count
229 * *must* be zero. And, we don't want to call wait_on_page_locked()
230 * against a page without get_page().
231 * So, we use get_page_unless_zero(), here. Even failed, page fault
234 if (!get_page_unless_zero(page))
236 pte_unmap_unlock(ptep, ptl);
237 wait_on_page_locked(page);
241 pte_unmap_unlock(ptep, ptl);
244 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
245 unsigned long address)
247 spinlock_t *ptl = pte_lockptr(mm, pmd);
248 pte_t *ptep = pte_offset_map(pmd, address);
249 __migration_entry_wait(mm, ptep, ptl);
252 void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte)
254 spinlock_t *ptl = &(mm)->page_table_lock;
255 __migration_entry_wait(mm, pte, ptl);
259 /* Returns true if all buffers are successfully locked */
260 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
261 enum migrate_mode mode)
263 struct buffer_head *bh = head;
265 /* Simple case, sync compaction */
266 if (mode != MIGRATE_ASYNC) {
270 bh = bh->b_this_page;
272 } while (bh != head);
277 /* async case, we cannot block on lock_buffer so use trylock_buffer */
280 if (!trylock_buffer(bh)) {
282 * We failed to lock the buffer and cannot stall in
283 * async migration. Release the taken locks
285 struct buffer_head *failed_bh = bh;
288 while (bh != failed_bh) {
291 bh = bh->b_this_page;
296 bh = bh->b_this_page;
297 } while (bh != head);
301 static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
302 enum migrate_mode mode)
306 #endif /* CONFIG_BLOCK */
309 * Replace the page in the mapping.
311 * The number of remaining references must be:
312 * 1 for anonymous pages without a mapping
313 * 2 for pages with a mapping
314 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
316 int migrate_page_move_mapping(struct address_space *mapping,
317 struct page *newpage, struct page *page,
318 struct buffer_head *head, enum migrate_mode mode)
320 int expected_count = 0;
324 /* Anonymous page without mapping */
325 if (page_count(page) != 1)
327 return MIGRATEPAGE_SUCCESS;
330 spin_lock_irq(&mapping->tree_lock);
332 pslot = radix_tree_lookup_slot(&mapping->page_tree,
335 expected_count = 2 + page_has_private(page);
336 if (page_count(page) != expected_count ||
337 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
338 spin_unlock_irq(&mapping->tree_lock);
342 if (!page_freeze_refs(page, expected_count)) {
343 spin_unlock_irq(&mapping->tree_lock);
348 * In the async migration case of moving a page with buffers, lock the
349 * buffers using trylock before the mapping is moved. If the mapping
350 * was moved, we later failed to lock the buffers and could not move
351 * the mapping back due to an elevated page count, we would have to
352 * block waiting on other references to be dropped.
354 if (mode == MIGRATE_ASYNC && head &&
355 !buffer_migrate_lock_buffers(head, mode)) {
356 page_unfreeze_refs(page, expected_count);
357 spin_unlock_irq(&mapping->tree_lock);
362 * Now we know that no one else is looking at the page.
364 get_page(newpage); /* add cache reference */
365 if (PageSwapCache(page)) {
366 SetPageSwapCache(newpage);
367 set_page_private(newpage, page_private(page));
370 radix_tree_replace_slot(pslot, newpage);
373 * Drop cache reference from old page by unfreezing
374 * to one less reference.
375 * We know this isn't the last reference.
377 page_unfreeze_refs(page, expected_count - 1);
380 * If moved to a different zone then also account
381 * the page for that zone. Other VM counters will be
382 * taken care of when we establish references to the
383 * new page and drop references to the old page.
385 * Note that anonymous pages are accounted for
386 * via NR_FILE_PAGES and NR_ANON_PAGES if they
387 * are mapped to swap space.
389 __dec_zone_page_state(page, NR_FILE_PAGES);
390 __inc_zone_page_state(newpage, NR_FILE_PAGES);
391 if (!PageSwapCache(page) && PageSwapBacked(page)) {
392 __dec_zone_page_state(page, NR_SHMEM);
393 __inc_zone_page_state(newpage, NR_SHMEM);
395 spin_unlock_irq(&mapping->tree_lock);
397 return MIGRATEPAGE_SUCCESS;
401 * The expected number of remaining references is the same as that
402 * of migrate_page_move_mapping().
404 int migrate_huge_page_move_mapping(struct address_space *mapping,
405 struct page *newpage, struct page *page)
411 if (page_count(page) != 1)
413 return MIGRATEPAGE_SUCCESS;
416 spin_lock_irq(&mapping->tree_lock);
418 pslot = radix_tree_lookup_slot(&mapping->page_tree,
421 expected_count = 2 + page_has_private(page);
422 if (page_count(page) != expected_count ||
423 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
424 spin_unlock_irq(&mapping->tree_lock);
428 if (!page_freeze_refs(page, expected_count)) {
429 spin_unlock_irq(&mapping->tree_lock);
435 radix_tree_replace_slot(pslot, newpage);
437 page_unfreeze_refs(page, expected_count - 1);
439 spin_unlock_irq(&mapping->tree_lock);
440 return MIGRATEPAGE_SUCCESS;
444 * Copy the page to its new location
446 void migrate_page_copy(struct page *newpage, struct page *page)
448 if (PageHuge(page) || PageTransHuge(page))
449 copy_huge_page(newpage, page);
451 copy_highpage(newpage, page);
454 SetPageError(newpage);
455 if (PageReferenced(page))
456 SetPageReferenced(newpage);
457 if (PageUptodate(page))
458 SetPageUptodate(newpage);
459 if (TestClearPageActive(page)) {
460 VM_BUG_ON(PageUnevictable(page));
461 SetPageActive(newpage);
462 } else if (TestClearPageUnevictable(page))
463 SetPageUnevictable(newpage);
464 if (PageChecked(page))
465 SetPageChecked(newpage);
466 if (PageMappedToDisk(page))
467 SetPageMappedToDisk(newpage);
469 if (PageDirty(page)) {
470 clear_page_dirty_for_io(page);
472 * Want to mark the page and the radix tree as dirty, and
473 * redo the accounting that clear_page_dirty_for_io undid,
474 * but we can't use set_page_dirty because that function
475 * is actually a signal that all of the page has become dirty.
476 * Whereas only part of our page may be dirty.
478 if (PageSwapBacked(page))
479 SetPageDirty(newpage);
481 __set_page_dirty_nobuffers(newpage);
484 mlock_migrate_page(newpage, page);
485 ksm_migrate_page(newpage, page);
487 * Please do not reorder this without considering how mm/ksm.c's
488 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
490 ClearPageSwapCache(page);
491 ClearPagePrivate(page);
492 set_page_private(page, 0);
495 * If any waiters have accumulated on the new page then
498 if (PageWriteback(newpage))
499 end_page_writeback(newpage);
502 /************************************************************
503 * Migration functions
504 ***********************************************************/
506 /* Always fail migration. Used for mappings that are not movable */
507 int fail_migrate_page(struct address_space *mapping,
508 struct page *newpage, struct page *page)
512 EXPORT_SYMBOL(fail_migrate_page);
515 * Common logic to directly migrate a single page suitable for
516 * pages that do not use PagePrivate/PagePrivate2.
518 * Pages are locked upon entry and exit.
520 int migrate_page(struct address_space *mapping,
521 struct page *newpage, struct page *page,
522 enum migrate_mode mode)
526 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
528 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode);
530 if (rc != MIGRATEPAGE_SUCCESS)
533 migrate_page_copy(newpage, page);
534 return MIGRATEPAGE_SUCCESS;
536 EXPORT_SYMBOL(migrate_page);
540 * Migration function for pages with buffers. This function can only be used
541 * if the underlying filesystem guarantees that no other references to "page"
544 int buffer_migrate_page(struct address_space *mapping,
545 struct page *newpage, struct page *page, enum migrate_mode mode)
547 struct buffer_head *bh, *head;
550 if (!page_has_buffers(page))
551 return migrate_page(mapping, newpage, page, mode);
553 head = page_buffers(page);
555 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode);
557 if (rc != MIGRATEPAGE_SUCCESS)
561 * In the async case, migrate_page_move_mapping locked the buffers
562 * with an IRQ-safe spinlock held. In the sync case, the buffers
563 * need to be locked now
565 if (mode != MIGRATE_ASYNC)
566 BUG_ON(!buffer_migrate_lock_buffers(head, mode));
568 ClearPagePrivate(page);
569 set_page_private(newpage, page_private(page));
570 set_page_private(page, 0);
576 set_bh_page(bh, newpage, bh_offset(bh));
577 bh = bh->b_this_page;
579 } while (bh != head);
581 SetPagePrivate(newpage);
583 migrate_page_copy(newpage, page);
589 bh = bh->b_this_page;
591 } while (bh != head);
593 return MIGRATEPAGE_SUCCESS;
595 EXPORT_SYMBOL(buffer_migrate_page);
599 * Writeback a page to clean the dirty state
601 static int writeout(struct address_space *mapping, struct page *page)
603 struct writeback_control wbc = {
604 .sync_mode = WB_SYNC_NONE,
607 .range_end = LLONG_MAX,
612 if (!mapping->a_ops->writepage)
613 /* No write method for the address space */
616 if (!clear_page_dirty_for_io(page))
617 /* Someone else already triggered a write */
621 * A dirty page may imply that the underlying filesystem has
622 * the page on some queue. So the page must be clean for
623 * migration. Writeout may mean we loose the lock and the
624 * page state is no longer what we checked for earlier.
625 * At this point we know that the migration attempt cannot
628 remove_migration_ptes(page, page);
630 rc = mapping->a_ops->writepage(page, &wbc);
632 if (rc != AOP_WRITEPAGE_ACTIVATE)
633 /* unlocked. Relock */
636 return (rc < 0) ? -EIO : -EAGAIN;
640 * Default handling if a filesystem does not provide a migration function.
642 static int fallback_migrate_page(struct address_space *mapping,
643 struct page *newpage, struct page *page, enum migrate_mode mode)
645 if (PageDirty(page)) {
646 /* Only writeback pages in full synchronous migration */
647 if (mode != MIGRATE_SYNC)
649 return writeout(mapping, page);
653 * Buffers may be managed in a filesystem specific way.
654 * We must have no buffers or drop them.
656 if (page_has_private(page) &&
657 !try_to_release_page(page, GFP_KERNEL))
660 return migrate_page(mapping, newpage, page, mode);
664 * Move a page to a newly allocated page
665 * The page is locked and all ptes have been successfully removed.
667 * The new page will have replaced the old page if this function
672 * MIGRATEPAGE_SUCCESS - success
674 static int move_to_new_page(struct page *newpage, struct page *page,
675 int remap_swapcache, enum migrate_mode mode)
677 struct address_space *mapping;
681 * Block others from accessing the page when we get around to
682 * establishing additional references. We are the only one
683 * holding a reference to the new page at this point.
685 if (!trylock_page(newpage))
688 /* Prepare mapping for the new page.*/
689 newpage->index = page->index;
690 newpage->mapping = page->mapping;
691 if (PageSwapBacked(page))
692 SetPageSwapBacked(newpage);
694 mapping = page_mapping(page);
696 rc = migrate_page(mapping, newpage, page, mode);
697 else if (mapping->a_ops->migratepage)
699 * Most pages have a mapping and most filesystems provide a
700 * migratepage callback. Anonymous pages are part of swap
701 * space which also has its own migratepage callback. This
702 * is the most common path for page migration.
704 rc = mapping->a_ops->migratepage(mapping,
705 newpage, page, mode);
707 rc = fallback_migrate_page(mapping, newpage, page, mode);
709 if (rc != MIGRATEPAGE_SUCCESS) {
710 newpage->mapping = NULL;
713 remove_migration_ptes(page, newpage);
714 page->mapping = NULL;
717 unlock_page(newpage);
722 static int __unmap_and_move(struct page *page, struct page *newpage,
723 int force, enum migrate_mode mode)
726 int remap_swapcache = 1;
727 struct mem_cgroup *mem;
728 struct anon_vma *anon_vma = NULL;
730 if (!trylock_page(page)) {
731 if (!force || mode == MIGRATE_ASYNC)
735 * It's not safe for direct compaction to call lock_page.
736 * For example, during page readahead pages are added locked
737 * to the LRU. Later, when the IO completes the pages are
738 * marked uptodate and unlocked. However, the queueing
739 * could be merging multiple pages for one bio (e.g.
740 * mpage_readpages). If an allocation happens for the
741 * second or third page, the process can end up locking
742 * the same page twice and deadlocking. Rather than
743 * trying to be clever about what pages can be locked,
744 * avoid the use of lock_page for direct compaction
747 if (current->flags & PF_MEMALLOC)
753 /* charge against new page */
754 mem_cgroup_prepare_migration(page, newpage, &mem);
756 if (PageWriteback(page)) {
758 * Only in the case of a full synchronous migration is it
759 * necessary to wait for PageWriteback. In the async case,
760 * the retry loop is too short and in the sync-light case,
761 * the overhead of stalling is too much
763 if (mode != MIGRATE_SYNC) {
769 wait_on_page_writeback(page);
772 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
773 * we cannot notice that anon_vma is freed while we migrates a page.
774 * This get_anon_vma() delays freeing anon_vma pointer until the end
775 * of migration. File cache pages are no problem because of page_lock()
776 * File Caches may use write_page() or lock_page() in migration, then,
777 * just care Anon page here.
779 if (PageAnon(page) && !PageKsm(page)) {
781 * Only page_lock_anon_vma_read() understands the subtleties of
782 * getting a hold on an anon_vma from outside one of its mms.
784 anon_vma = page_get_anon_vma(page);
789 } else if (PageSwapCache(page)) {
791 * We cannot be sure that the anon_vma of an unmapped
792 * swapcache page is safe to use because we don't
793 * know in advance if the VMA that this page belonged
794 * to still exists. If the VMA and others sharing the
795 * data have been freed, then the anon_vma could
796 * already be invalid.
798 * To avoid this possibility, swapcache pages get
799 * migrated but are not remapped when migration
808 if (unlikely(balloon_page_movable(page))) {
810 * A ballooned page does not need any special attention from
811 * physical to virtual reverse mapping procedures.
812 * Skip any attempt to unmap PTEs or to remap swap cache,
813 * in order to avoid burning cycles at rmap level, and perform
814 * the page migration right away (proteced by page lock).
816 rc = balloon_page_migrate(newpage, page, mode);
821 * Corner case handling:
822 * 1. When a new swap-cache page is read into, it is added to the LRU
823 * and treated as swapcache but it has no rmap yet.
824 * Calling try_to_unmap() against a page->mapping==NULL page will
825 * trigger a BUG. So handle it here.
826 * 2. An orphaned page (see truncate_complete_page) might have
827 * fs-private metadata. The page can be picked up due to memory
828 * offlining. Everywhere else except page reclaim, the page is
829 * invisible to the vm, so the page can not be migrated. So try to
830 * free the metadata, so the page can be freed.
832 if (!page->mapping) {
833 VM_BUG_ON(PageAnon(page));
834 if (page_has_private(page)) {
835 try_to_free_buffers(page);
841 /* Establish migration ptes or remove ptes */
842 try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
845 if (!page_mapped(page))
846 rc = move_to_new_page(newpage, page, remap_swapcache, mode);
848 if (rc && remap_swapcache)
849 remove_migration_ptes(page, page);
851 /* Drop an anon_vma reference if we took one */
853 put_anon_vma(anon_vma);
856 mem_cgroup_end_migration(mem, page, newpage,
857 (rc == MIGRATEPAGE_SUCCESS ||
858 rc == MIGRATEPAGE_BALLOON_SUCCESS));
865 * Obtain the lock on page, remove all ptes and migrate the page
866 * to the newly allocated page in newpage.
868 static int unmap_and_move(new_page_t get_new_page, unsigned long private,
869 struct page *page, int force, enum migrate_mode mode)
873 struct page *newpage = get_new_page(page, private, &result);
878 if (page_count(page) == 1) {
879 /* page was freed from under us. So we are done. */
883 if (unlikely(PageTransHuge(page)))
884 if (unlikely(split_huge_page(page)))
887 rc = __unmap_and_move(page, newpage, force, mode);
889 if (unlikely(rc == MIGRATEPAGE_BALLOON_SUCCESS)) {
891 * A ballooned page has been migrated already.
892 * Now, it's the time to wrap-up counters,
893 * handle the page back to Buddy and return.
895 dec_zone_page_state(page, NR_ISOLATED_ANON +
896 page_is_file_cache(page));
897 balloon_page_free(page);
898 return MIGRATEPAGE_SUCCESS;
903 * A page that has been migrated has all references
904 * removed and will be freed. A page that has not been
905 * migrated will have kepts its references and be
908 list_del(&page->lru);
909 dec_zone_page_state(page, NR_ISOLATED_ANON +
910 page_is_file_cache(page));
911 putback_lru_page(page);
914 * Move the new page to the LRU. If migration was not successful
915 * then this will free the page.
917 putback_lru_page(newpage);
922 *result = page_to_nid(newpage);
928 * Counterpart of unmap_and_move_page() for hugepage migration.
930 * This function doesn't wait the completion of hugepage I/O
931 * because there is no race between I/O and migration for hugepage.
932 * Note that currently hugepage I/O occurs only in direct I/O
933 * where no lock is held and PG_writeback is irrelevant,
934 * and writeback status of all subpages are counted in the reference
935 * count of the head page (i.e. if all subpages of a 2MB hugepage are
936 * under direct I/O, the reference of the head page is 512 and a bit more.)
937 * This means that when we try to migrate hugepage whose subpages are
938 * doing direct I/O, some references remain after try_to_unmap() and
939 * hugepage migration fails without data corruption.
941 * There is also no race when direct I/O is issued on the page under migration,
942 * because then pte is replaced with migration swap entry and direct I/O code
943 * will wait in the page fault for migration to complete.
945 static int unmap_and_move_huge_page(new_page_t get_new_page,
946 unsigned long private, struct page *hpage,
947 int force, enum migrate_mode mode)
951 struct page *new_hpage = get_new_page(hpage, private, &result);
952 struct anon_vma *anon_vma = NULL;
955 * Movability of hugepages depends on architectures and hugepage size.
956 * This check is necessary because some callers of hugepage migration
957 * like soft offline and memory hotremove don't walk through page
958 * tables or check whether the hugepage is pmd-based or not before
961 if (!hugepage_migration_support(page_hstate(hpage)))
969 if (!trylock_page(hpage)) {
970 if (!force || mode != MIGRATE_SYNC)
976 anon_vma = page_get_anon_vma(hpage);
978 try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
980 if (!page_mapped(hpage))
981 rc = move_to_new_page(new_hpage, hpage, 1, mode);
984 remove_migration_ptes(hpage, hpage);
987 put_anon_vma(anon_vma);
990 hugetlb_cgroup_migrate(hpage, new_hpage);
995 putback_active_hugepage(hpage);
1001 *result = page_to_nid(new_hpage);
1007 * migrate_pages - migrate the pages specified in a list, to the free pages
1008 * supplied as the target for the page migration
1010 * @from: The list of pages to be migrated.
1011 * @get_new_page: The function used to allocate free pages to be used
1012 * as the target of the page migration.
1013 * @private: Private data to be passed on to get_new_page()
1014 * @mode: The migration mode that specifies the constraints for
1015 * page migration, if any.
1016 * @reason: The reason for page migration.
1018 * The function returns after 10 attempts or if no pages are movable any more
1019 * because the list has become empty or no retryable pages exist any more.
1020 * The caller should call putback_lru_pages() to return pages to the LRU
1021 * or free list only if ret != 0.
1023 * Returns the number of pages that were not migrated, or an error code.
1025 int migrate_pages(struct list_head *from, new_page_t get_new_page,
1026 unsigned long private, enum migrate_mode mode, int reason)
1030 int nr_succeeded = 0;
1034 int swapwrite = current->flags & PF_SWAPWRITE;
1038 current->flags |= PF_SWAPWRITE;
1040 for(pass = 0; pass < 10 && retry; pass++) {
1043 list_for_each_entry_safe(page, page2, from, lru) {
1047 rc = unmap_and_move_huge_page(get_new_page,
1048 private, page, pass > 2, mode);
1050 rc = unmap_and_move(get_new_page, private,
1051 page, pass > 2, mode);
1059 case MIGRATEPAGE_SUCCESS:
1063 /* Permanent failure */
1069 rc = nr_failed + retry;
1072 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1074 count_vm_events(PGMIGRATE_FAIL, nr_failed);
1075 trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
1078 current->flags &= ~PF_SWAPWRITE;
1085 * Move a list of individual pages
1087 struct page_to_node {
1094 static struct page *new_page_node(struct page *p, unsigned long private,
1097 struct page_to_node *pm = (struct page_to_node *)private;
1099 while (pm->node != MAX_NUMNODES && pm->page != p)
1102 if (pm->node == MAX_NUMNODES)
1105 *result = &pm->status;
1108 return alloc_huge_page_node(page_hstate(compound_head(p)),
1111 return alloc_pages_exact_node(pm->node,
1112 GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
1116 * Move a set of pages as indicated in the pm array. The addr
1117 * field must be set to the virtual address of the page to be moved
1118 * and the node number must contain a valid target node.
1119 * The pm array ends with node = MAX_NUMNODES.
1121 static int do_move_page_to_node_array(struct mm_struct *mm,
1122 struct page_to_node *pm,
1126 struct page_to_node *pp;
1127 LIST_HEAD(pagelist);
1129 down_read(&mm->mmap_sem);
1132 * Build a list of pages to migrate
1134 for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
1135 struct vm_area_struct *vma;
1139 vma = find_vma(mm, pp->addr);
1140 if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
1143 page = follow_page(vma, pp->addr, FOLL_GET|FOLL_SPLIT);
1145 err = PTR_ERR(page);
1153 /* Use PageReserved to check for zero page */
1154 if (PageReserved(page))
1158 err = page_to_nid(page);
1160 if (err == pp->node)
1162 * Node already in the right place
1167 if (page_mapcount(page) > 1 &&
1171 if (PageHuge(page)) {
1172 isolate_huge_page(page, &pagelist);
1176 err = isolate_lru_page(page);
1178 list_add_tail(&page->lru, &pagelist);
1179 inc_zone_page_state(page, NR_ISOLATED_ANON +
1180 page_is_file_cache(page));
1184 * Either remove the duplicate refcount from
1185 * isolate_lru_page() or drop the page ref if it was
1194 if (!list_empty(&pagelist)) {
1195 err = migrate_pages(&pagelist, new_page_node,
1196 (unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL);
1198 putback_movable_pages(&pagelist);
1201 up_read(&mm->mmap_sem);
1206 * Migrate an array of page address onto an array of nodes and fill
1207 * the corresponding array of status.
1209 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1210 unsigned long nr_pages,
1211 const void __user * __user *pages,
1212 const int __user *nodes,
1213 int __user *status, int flags)
1215 struct page_to_node *pm;
1216 unsigned long chunk_nr_pages;
1217 unsigned long chunk_start;
1221 pm = (struct page_to_node *)__get_free_page(GFP_KERNEL);
1228 * Store a chunk of page_to_node array in a page,
1229 * but keep the last one as a marker
1231 chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1;
1233 for (chunk_start = 0;
1234 chunk_start < nr_pages;
1235 chunk_start += chunk_nr_pages) {
1238 if (chunk_start + chunk_nr_pages > nr_pages)
1239 chunk_nr_pages = nr_pages - chunk_start;
1241 /* fill the chunk pm with addrs and nodes from user-space */
1242 for (j = 0; j < chunk_nr_pages; j++) {
1243 const void __user *p;
1247 if (get_user(p, pages + j + chunk_start))
1249 pm[j].addr = (unsigned long) p;
1251 if (get_user(node, nodes + j + chunk_start))
1255 if (node < 0 || node >= MAX_NUMNODES)
1258 if (!node_state(node, N_MEMORY))
1262 if (!node_isset(node, task_nodes))
1268 /* End marker for this chunk */
1269 pm[chunk_nr_pages].node = MAX_NUMNODES;
1271 /* Migrate this chunk */
1272 err = do_move_page_to_node_array(mm, pm,
1273 flags & MPOL_MF_MOVE_ALL);
1277 /* Return status information */
1278 for (j = 0; j < chunk_nr_pages; j++)
1279 if (put_user(pm[j].status, status + j + chunk_start)) {
1287 free_page((unsigned long)pm);
1293 * Determine the nodes of an array of pages and store it in an array of status.
1295 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1296 const void __user **pages, int *status)
1300 down_read(&mm->mmap_sem);
1302 for (i = 0; i < nr_pages; i++) {
1303 unsigned long addr = (unsigned long)(*pages);
1304 struct vm_area_struct *vma;
1308 vma = find_vma(mm, addr);
1309 if (!vma || addr < vma->vm_start)
1312 page = follow_page(vma, addr, 0);
1314 err = PTR_ERR(page);
1319 /* Use PageReserved to check for zero page */
1320 if (!page || PageReserved(page))
1323 err = page_to_nid(page);
1331 up_read(&mm->mmap_sem);
1335 * Determine the nodes of a user array of pages and store it in
1336 * a user array of status.
1338 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1339 const void __user * __user *pages,
1342 #define DO_PAGES_STAT_CHUNK_NR 16
1343 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1344 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1347 unsigned long chunk_nr;
1349 chunk_nr = nr_pages;
1350 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1351 chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1353 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1356 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1358 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1363 nr_pages -= chunk_nr;
1365 return nr_pages ? -EFAULT : 0;
1369 * Move a list of pages in the address space of the currently executing
1372 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1373 const void __user * __user *, pages,
1374 const int __user *, nodes,
1375 int __user *, status, int, flags)
1377 const struct cred *cred = current_cred(), *tcred;
1378 struct task_struct *task;
1379 struct mm_struct *mm;
1381 nodemask_t task_nodes;
1384 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1387 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1390 /* Find the mm_struct */
1392 task = pid ? find_task_by_vpid(pid) : current;
1397 get_task_struct(task);
1400 * Check if this process has the right to modify the specified
1401 * process. The right exists if the process has administrative
1402 * capabilities, superuser privileges or the same
1403 * userid as the target process.
1405 tcred = __task_cred(task);
1406 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1407 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
1408 !capable(CAP_SYS_NICE)) {
1415 err = security_task_movememory(task);
1419 task_nodes = cpuset_mems_allowed(task);
1420 mm = get_task_mm(task);
1421 put_task_struct(task);
1427 err = do_pages_move(mm, task_nodes, nr_pages, pages,
1428 nodes, status, flags);
1430 err = do_pages_stat(mm, nr_pages, pages, status);
1436 put_task_struct(task);
1441 * Call migration functions in the vma_ops that may prepare
1442 * memory in a vm for migration. migration functions may perform
1443 * the migration for vmas that do not have an underlying page struct.
1445 int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
1446 const nodemask_t *from, unsigned long flags)
1448 struct vm_area_struct *vma;
1451 for (vma = mm->mmap; vma && !err; vma = vma->vm_next) {
1452 if (vma->vm_ops && vma->vm_ops->migrate) {
1453 err = vma->vm_ops->migrate(vma, to, from, flags);
1461 #ifdef CONFIG_NUMA_BALANCING
1463 * Returns true if this is a safe migration target node for misplaced NUMA
1464 * pages. Currently it only checks the watermarks which crude
1466 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
1467 unsigned long nr_migrate_pages)
1470 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
1471 struct zone *zone = pgdat->node_zones + z;
1473 if (!populated_zone(zone))
1476 if (!zone_reclaimable(zone))
1479 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
1480 if (!zone_watermark_ok(zone, 0,
1481 high_wmark_pages(zone) +
1490 static struct page *alloc_misplaced_dst_page(struct page *page,
1494 int nid = (int) data;
1495 struct page *newpage;
1497 newpage = alloc_pages_exact_node(nid,
1498 (GFP_HIGHUSER_MOVABLE | GFP_THISNODE |
1499 __GFP_NOMEMALLOC | __GFP_NORETRY |
1503 page_nid_xchg_last(newpage, page_nid_last(page));
1509 * page migration rate limiting control.
1510 * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs
1511 * window of time. Default here says do not migrate more than 1280M per second.
1512 * If a node is rate-limited then PTE NUMA updates are also rate-limited. However
1513 * as it is faults that reset the window, pte updates will happen unconditionally
1514 * if there has not been a fault since @pteupdate_interval_millisecs after the
1515 * throttle window closed.
1517 static unsigned int migrate_interval_millisecs __read_mostly = 100;
1518 static unsigned int pteupdate_interval_millisecs __read_mostly = 1000;
1519 static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT);
1521 /* Returns true if NUMA migration is currently rate limited */
1522 bool migrate_ratelimited(int node)
1524 pg_data_t *pgdat = NODE_DATA(node);
1526 if (time_after(jiffies, pgdat->numabalancing_migrate_next_window +
1527 msecs_to_jiffies(pteupdate_interval_millisecs)))
1530 if (pgdat->numabalancing_migrate_nr_pages < ratelimit_pages)
1536 /* Returns true if the node is migrate rate-limited after the update */
1537 bool numamigrate_update_ratelimit(pg_data_t *pgdat, unsigned long nr_pages)
1539 bool rate_limited = false;
1542 * Rate-limit the amount of data that is being migrated to a node.
1543 * Optimal placement is no good if the memory bus is saturated and
1544 * all the time is being spent migrating!
1546 spin_lock(&pgdat->numabalancing_migrate_lock);
1547 if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
1548 pgdat->numabalancing_migrate_nr_pages = 0;
1549 pgdat->numabalancing_migrate_next_window = jiffies +
1550 msecs_to_jiffies(migrate_interval_millisecs);
1552 if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages)
1553 rate_limited = true;
1555 pgdat->numabalancing_migrate_nr_pages += nr_pages;
1556 spin_unlock(&pgdat->numabalancing_migrate_lock);
1558 return rate_limited;
1561 int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
1565 VM_BUG_ON(compound_order(page) && !PageTransHuge(page));
1567 /* Avoid migrating to a node that is nearly full */
1568 if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
1571 if (isolate_lru_page(page))
1575 * migrate_misplaced_transhuge_page() skips page migration's usual
1576 * check on page_count(), so we must do it here, now that the page
1577 * has been isolated: a GUP pin, or any other pin, prevents migration.
1578 * The expected page count is 3: 1 for page's mapcount and 1 for the
1579 * caller's pin and 1 for the reference taken by isolate_lru_page().
1581 if (PageTransHuge(page) && page_count(page) != 3) {
1582 putback_lru_page(page);
1586 page_lru = page_is_file_cache(page);
1587 mod_zone_page_state(page_zone(page), NR_ISOLATED_ANON + page_lru,
1588 hpage_nr_pages(page));
1591 * Isolating the page has taken another reference, so the
1592 * caller's reference can be safely dropped without the page
1593 * disappearing underneath us during migration.
1600 * Attempt to migrate a misplaced page to the specified destination
1601 * node. Caller is expected to have an elevated reference count on
1602 * the page that will be dropped by this function before returning.
1604 int migrate_misplaced_page(struct page *page, int node)
1606 pg_data_t *pgdat = NODE_DATA(node);
1609 LIST_HEAD(migratepages);
1612 * Don't migrate pages that are mapped in multiple processes.
1613 * TODO: Handle false sharing detection instead of this hammer
1615 if (page_mapcount(page) != 1)
1619 * Rate-limit the amount of data that is being migrated to a node.
1620 * Optimal placement is no good if the memory bus is saturated and
1621 * all the time is being spent migrating!
1623 if (numamigrate_update_ratelimit(pgdat, 1))
1626 isolated = numamigrate_isolate_page(pgdat, page);
1630 list_add(&page->lru, &migratepages);
1631 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
1632 node, MIGRATE_ASYNC, MR_NUMA_MISPLACED);
1634 putback_lru_pages(&migratepages);
1637 count_vm_numa_event(NUMA_PAGE_MIGRATE);
1638 BUG_ON(!list_empty(&migratepages));
1645 #endif /* CONFIG_NUMA_BALANCING */
1647 #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
1649 * Migrates a THP to a given target node. page must be locked and is unlocked
1652 int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1653 struct vm_area_struct *vma,
1654 pmd_t *pmd, pmd_t entry,
1655 unsigned long address,
1656 struct page *page, int node)
1658 unsigned long haddr = address & HPAGE_PMD_MASK;
1659 pg_data_t *pgdat = NODE_DATA(node);
1661 struct page *new_page = NULL;
1662 struct mem_cgroup *memcg = NULL;
1663 int page_lru = page_is_file_cache(page);
1666 * Don't migrate pages that are mapped in multiple processes.
1667 * TODO: Handle false sharing detection instead of this hammer
1669 if (page_mapcount(page) != 1)
1673 * Rate-limit the amount of data that is being migrated to a node.
1674 * Optimal placement is no good if the memory bus is saturated and
1675 * all the time is being spent migrating!
1677 if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR))
1680 new_page = alloc_pages_node(node,
1681 (GFP_TRANSHUGE | GFP_THISNODE) & ~__GFP_WAIT, HPAGE_PMD_ORDER);
1685 page_nid_xchg_last(new_page, page_nid_last(page));
1687 isolated = numamigrate_isolate_page(pgdat, page);
1693 /* Prepare a page as a migration target */
1694 __set_page_locked(new_page);
1695 SetPageSwapBacked(new_page);
1697 /* anon mapping, we can simply copy page->mapping to the new page: */
1698 new_page->mapping = page->mapping;
1699 new_page->index = page->index;
1700 migrate_page_copy(new_page, page);
1701 WARN_ON(PageLRU(new_page));
1703 /* Recheck the target PMD */
1704 spin_lock(&mm->page_table_lock);
1705 if (unlikely(!pmd_same(*pmd, entry))) {
1706 spin_unlock(&mm->page_table_lock);
1708 /* Reverse changes made by migrate_page_copy() */
1709 if (TestClearPageActive(new_page))
1710 SetPageActive(page);
1711 if (TestClearPageUnevictable(new_page))
1712 SetPageUnevictable(page);
1713 mlock_migrate_page(page, new_page);
1715 unlock_page(new_page);
1716 put_page(new_page); /* Free it */
1718 /* Retake the callers reference and putback on LRU */
1720 putback_lru_page(page);
1721 mod_zone_page_state(page_zone(page),
1722 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
1727 * Traditional migration needs to prepare the memcg charge
1728 * transaction early to prevent the old page from being
1729 * uncharged when installing migration entries. Here we can
1730 * save the potential rollback and start the charge transfer
1731 * only when migration is already known to end successfully.
1733 mem_cgroup_prepare_migration(page, new_page, &memcg);
1735 entry = mk_pmd(new_page, vma->vm_page_prot);
1736 entry = pmd_mknonnuma(entry);
1737 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1738 entry = pmd_mkhuge(entry);
1740 pmdp_clear_flush(vma, haddr, pmd);
1741 set_pmd_at(mm, haddr, pmd, entry);
1742 page_add_new_anon_rmap(new_page, vma, haddr);
1743 update_mmu_cache_pmd(vma, address, &entry);
1744 page_remove_rmap(page);
1746 * Finish the charge transaction under the page table lock to
1747 * prevent split_huge_page() from dividing up the charge
1748 * before it's fully transferred to the new page.
1750 mem_cgroup_end_migration(memcg, page, new_page, true);
1751 spin_unlock(&mm->page_table_lock);
1753 unlock_page(new_page);
1755 put_page(page); /* Drop the rmap reference */
1756 put_page(page); /* Drop the LRU isolation reference */
1758 count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
1759 count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
1761 mod_zone_page_state(page_zone(page),
1762 NR_ISOLATED_ANON + page_lru,
1767 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
1769 entry = pmd_mknonnuma(entry);
1770 set_pmd_at(mm, haddr, pmd, entry);
1771 update_mmu_cache_pmd(vma, address, &entry);
1777 #endif /* CONFIG_NUMA_BALANCING */
1779 #endif /* CONFIG_NUMA */