mm: call free_huge_page() directly
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 16 Aug 2023 15:11:50 +0000 (16:11 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 21 Aug 2023 21:28:43 +0000 (14:28 -0700)
Indirect calls are expensive, thanks to Spectre.  Call free_huge_page()
directly if the folio belongs to hugetlb.

Link: https://lkml.kernel.org/r/20230816151201.3655946-3-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Yanteng Si <siyanteng@loongson.cn>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/hugetlb.h
mm/page_alloc.c

index 0a393bc02f25b4203c1f8694298abf522a10eb90..5a1dfaffbd8064c2cb9740d4c4138ecd438f3ead 100644 (file)
@@ -26,6 +26,8 @@ typedef struct { unsigned long pd; } hugepd_t;
 #define __hugepd(x) ((hugepd_t) { (x) })
 #endif
 
+void free_huge_page(struct page *page);
+
 #ifdef CONFIG_HUGETLB_PAGE
 
 #include <linux/mempolicy.h>
@@ -165,7 +167,6 @@ int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
                                bool *migratable_cleared);
 void folio_putback_active_hugetlb(struct folio *folio);
 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
-void free_huge_page(struct page *page);
 void hugetlb_fix_reserve_counts(struct inode *inode);
 extern struct mutex *hugetlb_fault_mutex_table;
 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
index 986b56db96b5bf729590909342fc1b1e9265516d..7448485933603835ef618d4b693eebfdcd148a3b 100644 (file)
@@ -287,9 +287,6 @@ const char * const migratetype_names[MIGRATE_TYPES] = {
 static compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = {
        [NULL_COMPOUND_DTOR] = NULL,
        [COMPOUND_PAGE_DTOR] = free_compound_page,
-#ifdef CONFIG_HUGETLB_PAGE
-       [HUGETLB_PAGE_DTOR] = free_huge_page,
-#endif
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
        [TRANSHUGE_PAGE_DTOR] = free_transhuge_page,
 #endif
@@ -612,6 +609,11 @@ void destroy_large_folio(struct folio *folio)
 {
        enum compound_dtor_id dtor = folio->_folio_dtor;
 
+       if (folio_test_hugetlb(folio)) {
+               free_huge_page(&folio->page);
+               return;
+       }
+
        VM_BUG_ON_FOLIO(dtor >= NR_COMPOUND_DTORS, folio);
        compound_page_dtors[dtor](&folio->page);
 }