mm: huge_memory: convert madvise_free_huge_pmd to use a folio
authorKefeng Wang <wangkefeng.wang@huawei.com>
Wed, 7 Dec 2022 02:34:30 +0000 (10:34 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 19 Jan 2023 01:12:42 +0000 (17:12 -0800)
Using folios instead of pages removes several calls to compound_head(),

Link: https://lkml.kernel.org/r/20221207023431.151008-1-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index 867f02e..3de266e 100644 (file)
@@ -1603,7 +1603,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
 {
        spinlock_t *ptl;
        pmd_t orig_pmd;
-       struct page *page;
+       struct folio *folio;
        struct mm_struct *mm = tlb->mm;
        bool ret = false;
 
@@ -1623,15 +1623,15 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                goto out;
        }
 
-       page = pmd_page(orig_pmd);
+       folio = pfn_folio(pmd_pfn(orig_pmd));
        /*
-        * If other processes are mapping this page, we couldn't discard
-        * the page unless they all do MADV_FREE so let's skip the page.
+        * If other processes are mapping this folio, we couldn't discard
+        * the folio unless they all do MADV_FREE so let's skip the folio.
         */
-       if (total_mapcount(page) != 1)
+       if (folio_mapcount(folio) != 1)
                goto out;
 
-       if (!trylock_page(page))
+       if (!folio_trylock(folio))
                goto out;
 
        /*
@@ -1639,17 +1639,17 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
         * will deactivate only them.
         */
        if (next - addr != HPAGE_PMD_SIZE) {
-               get_page(page);
+               folio_get(folio);
                spin_unlock(ptl);
-               split_huge_page(page);
-               unlock_page(page);
-               put_page(page);
+               split_folio(folio);
+               folio_unlock(folio);
+               folio_put(folio);
                goto out_unlocked;
        }
 
-       if (PageDirty(page))
-               ClearPageDirty(page);
-       unlock_page(page);
+       if (folio_test_dirty(folio))
+               folio_clear_dirty(folio);
+       folio_unlock(folio);
 
        if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
                pmdp_invalidate(vma, addr, pmd);
@@ -1660,7 +1660,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
        }
 
-       mark_page_lazyfree(page);
+       mark_page_lazyfree(&folio->page);
        ret = true;
 out:
        spin_unlock(ptl);