mm: convert destroy_compound_page() to destroy_large_folio()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 17 Jun 2022 17:50:17 +0000 (18:50 +0100)
committerakpm <akpm@linux-foundation.org>
Mon, 4 Jul 2022 01:08:48 +0000 (18:08 -0700)
All callers now have a folio, so push the folio->page conversion
down to this function.

[akpm@linux-foundation.org: uninline destroy_large_folio() to fix build issue]
Link: https://lkml.kernel.org/r/20220617175020.717127-20-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mm.h
mm/page_alloc.c
mm/swap.c
mm/vmscan.c

index 3fb49ae..9cc02a7 100644 (file)
@@ -892,11 +892,7 @@ static inline void set_compound_page_dtor(struct page *page,
        page[1].compound_dtor = compound_dtor;
 }
 
-static inline void destroy_compound_page(struct page *page)
-{
-       VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
-       compound_page_dtors[page[1].compound_dtor](page);
-}
+void destroy_large_folio(struct folio *folio);
 
 static inline int head_compound_pincount(struct page *head)
 {
index 2484691..52fd92b 100644 (file)
@@ -744,6 +744,14 @@ void prep_compound_page(struct page *page, unsigned int order)
        prep_compound_head(page, order);
 }
 
+void destroy_large_folio(struct folio *folio)
+{
+       enum compound_dtor_id dtor = folio_page(folio, 1)->compound_dtor;
+
+       VM_BUG_ON_FOLIO(dtor >= NR_COMPOUND_DTORS, folio);
+       compound_page_dtors[dtor](&folio->page);
+}
+
 #ifdef CONFIG_DEBUG_PAGEALLOC
 unsigned int _debug_guardpage_minorder;
 
index 5f6caa6..1f563d8 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -115,7 +115,7 @@ static void __folio_put_large(struct folio *folio)
         */
        if (!folio_test_hugetlb(folio))
                __page_cache_release(folio);
-       destroy_compound_page(&folio->page);
+       destroy_large_folio(folio);
 }
 
 void __folio_put(struct folio *folio)
index e7d3db6..e660d72 100644 (file)
@@ -1979,7 +1979,7 @@ free_it:
                 * appear not as the counts should be low
                 */
                if (unlikely(folio_test_large(folio)))
-                       destroy_compound_page(&folio->page);
+                       destroy_large_folio(folio);
                else
                        list_add(&folio->lru, &free_pages);
                continue;
@@ -2348,7 +2348,7 @@ static unsigned int move_pages_to_lru(struct lruvec *lruvec,
 
                        if (unlikely(folio_test_large(folio))) {
                                spin_unlock_irq(&lruvec->lru_lock);
-                               destroy_compound_page(&folio->page);
+                               destroy_large_folio(folio);
                                spin_lock_irq(&lruvec->lru_lock);
                        } else
                                list_add(&folio->lru, &folios_to_free);