mm/mempolicy: Use vma_alloc_folio() in new_page()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 4 Apr 2022 19:23:39 +0000 (15:23 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 7 Apr 2022 13:43:41 +0000 (09:43 -0400)
Simplify new_page() by unifying the THP and base page cases, and
handle orders other than 0 and HPAGE_PMD_ORDER correctly.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
mm/mempolicy.c

index ec15f4f..649bd3b 100644 (file)
@@ -1191,8 +1191,10 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
  */
 static struct page *new_page(struct page *page, unsigned long start)
 {
+       struct folio *dst, *src = page_folio(page);
        struct vm_area_struct *vma;
        unsigned long address;
+       gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL;
 
        vma = find_vma(current->mm, start);
        while (vma) {
@@ -1202,24 +1204,19 @@ static struct page *new_page(struct page *page, unsigned long start)
                vma = vma->vm_next;
        }
 
-       if (PageHuge(page)) {
-               return alloc_huge_page_vma(page_hstate(compound_head(page)),
+       if (folio_test_hugetlb(src))
+               return alloc_huge_page_vma(page_hstate(&src->page),
                                vma, address);
-       } else if (PageTransHuge(page)) {
-               struct page *thp;
 
-               thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
-                                        HPAGE_PMD_ORDER);
-               if (!thp)
-                       return NULL;
-               prep_transhuge_page(thp);
-               return thp;
-       }
+       if (folio_test_large(src))
+               gfp = GFP_TRANSHUGE;
+
        /*
-        * if !vma, alloc_page_vma() will use task or system default policy
+        * if !vma, vma_alloc_folio() will use task or system default policy
         */
-       return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
-                       vma, address);
+       dst = vma_alloc_folio(gfp, folio_order(src), vma, address,
+                       folio_test_large(src));
+       return &dst->page;
 }
 #else