mm/memory_hotplug: remove a wrapper for alloc_migration_target()
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>
Sat, 17 Oct 2020 23:14:00 +0000 (16:14 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 18 Oct 2020 16:27:09 +0000 (09:27 -0700)
To calculate the correct node to migrate the page for hotplug, we need to
check node id of the page.  Wrapper for alloc_migration_target() exists
for this purpose.

However, Vlastimil informs that all migration source pages come from a
single node.  In this case, we don't need to check the node id for each
page and we don't need to re-set the target nodemask for each page by
using the wrapper.  Set up the migration_target_control once and use it
for all pages.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Roman Gushchin <guro@fb.com>
Link: http://lkml.kernel.org/r/1594622517-20681-10-git-send-email-iamjoonsoo.kim@lge.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/memory_hotplug.c

index 6f20357..b44d4c7 100644 (file)
@@ -1290,27 +1290,6 @@ found:
        return 0;
 }
 
-static struct page *new_node_page(struct page *page, unsigned long private)
-{
-       nodemask_t nmask = node_states[N_MEMORY];
-       struct migration_target_control mtc = {
-               .nid = page_to_nid(page),
-               .nmask = &nmask,
-               .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
-       };
-
-       /*
-        * try to allocate from a different node but reuse this node if there
-        * are no other online nodes to be used (e.g. we are offlining a part
-        * of the only existing node)
-        */
-       node_clear(mtc.nid, nmask);
-       if (nodes_empty(nmask))
-               node_set(mtc.nid, nmask);
-
-       return alloc_migration_target(page, (unsigned long)&mtc);
-}
-
 static int
 do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
 {
@@ -1370,9 +1349,28 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
                put_page(page);
        }
        if (!list_empty(&source)) {
-               /* Allocate a new page from the nearest neighbor node */
-               ret = migrate_pages(&source, new_node_page, NULL, 0,
-                                       MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
+               nodemask_t nmask = node_states[N_MEMORY];
+               struct migration_target_control mtc = {
+                       .nmask = &nmask,
+                       .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
+               };
+
+               /*
+                * We have checked that migration range is on a single zone so
+                * we can use the nid of the first page to all the others.
+                */
+               mtc.nid = page_to_nid(list_first_entry(&source, struct page, lru));
+
+               /*
+                * try to allocate from a different node but reuse this node
+                * if there are no other online nodes to be used (e.g. we are
+                * offlining a part of the only existing node)
+                */
+               node_clear(mtc.nid, nmask);
+               if (nodes_empty(nmask))
+                       node_set(mtc.nid, nmask);
+               ret = migrate_pages(&source, alloc_migration_target, NULL,
+                       (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
                if (ret) {
                        list_for_each_entry(page, &source, lru) {
                                pr_warn("migrating pfn %lx failed ret:%d ",