From 5375336c8c42a343c3b440b6f1e21c65e7b174b9 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 17 Jun 2022 18:50:17 +0100 Subject: [PATCH] mm: convert destroy_compound_page() to destroy_large_folio() All callers now have a folio, so push the folio->page conversion down to this function. [akpm@linux-foundation.org: uninline destroy_large_folio() to fix build issue] Link: https://lkml.kernel.org/r/20220617175020.717127-20-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- include/linux/mm.h | 6 +----- mm/page_alloc.c | 8 ++++++++ mm/swap.c | 2 +- mm/vmscan.c | 4 ++-- 4 files changed, 12 insertions(+), 8 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 3fb49ae..9cc02a7 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -892,11 +892,7 @@ static inline void set_compound_page_dtor(struct page *page, page[1].compound_dtor = compound_dtor; } -static inline void destroy_compound_page(struct page *page) -{ - VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page); - compound_page_dtors[page[1].compound_dtor](page); -} +void destroy_large_folio(struct folio *folio); static inline int head_compound_pincount(struct page *head) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2484691..52fd92b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -744,6 +744,14 @@ void prep_compound_page(struct page *page, unsigned int order) prep_compound_head(page, order); } +void destroy_large_folio(struct folio *folio) +{ + enum compound_dtor_id dtor = folio_page(folio, 1)->compound_dtor; + + VM_BUG_ON_FOLIO(dtor >= NR_COMPOUND_DTORS, folio); + compound_page_dtors[dtor](&folio->page); +} + #ifdef CONFIG_DEBUG_PAGEALLOC unsigned int _debug_guardpage_minorder; diff --git a/mm/swap.c b/mm/swap.c index 5f6caa6..1f563d8 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -115,7 +115,7 @@ static void __folio_put_large(struct folio *folio) */ if (!folio_test_hugetlb(folio)) __page_cache_release(folio); - destroy_compound_page(&folio->page); + destroy_large_folio(folio); } void __folio_put(struct folio *folio) diff --git a/mm/vmscan.c b/mm/vmscan.c index e7d3db6..e660d72 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1979,7 +1979,7 @@ free_it: * appear not as the counts should be low */ if (unlikely(folio_test_large(folio))) - destroy_compound_page(&folio->page); + destroy_large_folio(folio); else list_add(&folio->lru, &free_pages); continue; @@ -2348,7 +2348,7 @@ static unsigned int move_pages_to_lru(struct lruvec *lruvec, if (unlikely(folio_test_large(folio))) { spin_unlock_irq(&lruvec->lru_lock); - destroy_compound_page(&folio->page); + destroy_large_folio(folio); spin_lock_irq(&lruvec->lru_lock); } else list_add(&folio->lru, &folios_to_free); -- 2.7.4