mm/mempolicy: use a standard migration target allocation callback
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>
Wed, 12 Aug 2020 01:37:28 +0000 (18:37 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 12 Aug 2020 17:58:02 +0000 (10:58 -0700)
There is a well-defined migration target allocation callback.  Use it.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Roman Gushchin <guro@fb.com>
Link: http://lkml.kernel.org/r/1594622517-20681-7-git-send-email-iamjoonsoo.kim@lge.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/internal.h
mm/mempolicy.c
mm/migrate.c

index f725aa8..d11a9a8 100644 (file)
@@ -613,7 +613,6 @@ static inline bool is_migrate_highatomic_page(struct page *page)
 }
 
 void setup_zone_pageset(struct zone *zone);
-extern struct page *alloc_new_node_page(struct page *page, unsigned long node);
 
 struct migration_target_control {
        int nid;                /* preferred node id */
index 9ae2b70..afaa09f 100644 (file)
@@ -1065,29 +1065,6 @@ static int migrate_page_add(struct page *page, struct list_head *pagelist,
        return 0;
 }
 
-/* page allocation callback for NUMA node migration */
-struct page *alloc_new_node_page(struct page *page, unsigned long node)
-{
-       if (PageHuge(page)) {
-               struct hstate *h = page_hstate(compound_head(page));
-               gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
-
-               return alloc_huge_page_nodemask(h, node, NULL, gfp_mask);
-       } else if (PageTransHuge(page)) {
-               struct page *thp;
-
-               thp = alloc_pages_node(node,
-                       (GFP_TRANSHUGE | __GFP_THISNODE),
-                       HPAGE_PMD_ORDER);
-               if (!thp)
-                       return NULL;
-               prep_transhuge_page(thp);
-               return thp;
-       } else
-               return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
-                                                   __GFP_THISNODE, 0);
-}
-
 /*
  * Migrate pages from one node to a target node.
  * Returns error or the number of pages not migrated.
@@ -1098,6 +1075,10 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
        nodemask_t nmask;
        LIST_HEAD(pagelist);
        int err = 0;
+       struct migration_target_control mtc = {
+               .nid = dest,
+               .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
+       };
 
        nodes_clear(nmask);
        node_set(source, nmask);
@@ -1112,8 +1093,8 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
                        flags | MPOL_MF_DISCONTIG_OK, &pagelist);
 
        if (!list_empty(&pagelist)) {
-               err = migrate_pages(&pagelist, alloc_new_node_page, NULL, dest,
-                                       MIGRATE_SYNC, MR_SYSCALL);
+               err = migrate_pages(&pagelist, alloc_migration_target, NULL,
+                               (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL);
                if (err)
                        putback_movable_pages(&pagelist);
        }
index 48b1f14..5053439 100644 (file)
@@ -1598,9 +1598,13 @@ static int do_move_pages_to_node(struct mm_struct *mm,
                struct list_head *pagelist, int node)
 {
        int err;
+       struct migration_target_control mtc = {
+               .nid = node,
+               .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
+       };
 
-       err = migrate_pages(pagelist, alloc_new_node_page, NULL, node,
-                       MIGRATE_SYNC, MR_SYSCALL);
+       err = migrate_pages(pagelist, alloc_migration_target, NULL,
+                       (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL);
        if (err)
                putback_movable_pages(pagelist);
        return err;