mm/huge_memory: convert do_huge_pmd_anonymous_page() to use vma_alloc_folio()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 13 May 2022 03:23:01 +0000 (20:23 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 13 May 2022 14:20:14 +0000 (07:20 -0700)
Remove the use of this old API, eliminating a call to
prep_transhuge_page().

Link: https://lkml.kernel.org/r/20220504182857.4013401-3-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index 6f37f77..c37c870 100644 (file)
@@ -726,7 +726,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        gfp_t gfp;
-       struct page *page;
+       struct folio *folio;
        unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
 
        if (!transhuge_vma_suitable(vma, haddr))
@@ -775,13 +775,12 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
                return ret;
        }
        gfp = vma_thp_gfp_mask(vma);
-       page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
-       if (unlikely(!page)) {
+       folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true);
+       if (unlikely(!folio)) {
                count_vm_event(THP_FAULT_FALLBACK);
                return VM_FAULT_FALLBACK;
        }
-       prep_transhuge_page(page);
-       return __do_huge_pmd_anonymous_page(vmf, page, gfp);
+       return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp);
 }
 
 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,