mm: convert free_transhuge_folio() to folio_undo_large_rmappable()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 16 Aug 2023 15:11:52 +0000 (16:11 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 21 Aug 2023 21:28:43 +0000 (14:28 -0700)
Indirect calls are expensive, thanks to Spectre.  Test for
TRANSHUGE_PAGE_DTOR and destroy the folio appropriately.  Move the
free_compound_page() call into destroy_large_folio() to simplify later
patches.

Link: https://lkml.kernel.org/r/20230816151201.3655946-5-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Yanteng Si <siyanteng@loongson.cn>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/huge_mm.h
include/linux/mm.h
mm/huge_memory.c
mm/internal.h
mm/page_alloc.c

index e718dbe..ceda26a 100644 (file)
@@ -141,8 +141,6 @@ unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
                unsigned long len, unsigned long pgoff, unsigned long flags);
 
 void prep_transhuge_page(struct page *page);
-void free_transhuge_page(struct page *page);
-
 bool can_split_folio(struct folio *folio, int *pextra_pins);
 int split_huge_page_to_list(struct page *page, struct list_head *list);
 static inline int split_huge_page(struct page *page)
index 55eb278..0d14e20 100644 (file)
@@ -1253,9 +1253,7 @@ enum compound_dtor_id {
 #ifdef CONFIG_HUGETLB_PAGE
        HUGETLB_PAGE_DTOR,
 #endif
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
        TRANSHUGE_PAGE_DTOR,
-#endif
        NR_COMPOUND_DTORS,
 };
 
index 154c210..b334566 100644 (file)
@@ -2776,10 +2776,9 @@ out:
        return ret;
 }
 
-void free_transhuge_page(struct page *page)
+void folio_undo_large_rmappable(struct folio *folio)
 {
-       struct folio *folio = (struct folio *)page;
-       struct deferred_split *ds_queue = get_deferred_split_queue(folio);
+       struct deferred_split *ds_queue;
        unsigned long flags;
 
        /*
@@ -2787,15 +2786,16 @@ void free_transhuge_page(struct page *page)
         * deferred_list. If folio is not in deferred_list, it's safe
         * to check without acquiring the split_queue_lock.
         */
-       if (data_race(!list_empty(&folio->_deferred_list))) {
-               spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
-               if (!list_empty(&folio->_deferred_list)) {
-                       ds_queue->split_queue_len--;
-                       list_del(&folio->_deferred_list);
-               }
-               spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
+       if (data_race(list_empty(&folio->_deferred_list)))
+               return;
+
+       ds_queue = get_deferred_split_queue(folio);
+       spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
+       if (!list_empty(&folio->_deferred_list)) {
+               ds_queue->split_queue_len--;
+               list_del(&folio->_deferred_list);
        }
-       free_compound_page(page);
+       spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
 }
 
 void deferred_split_folio(struct folio *folio)
index d99ffb4..30bbfca 100644 (file)
@@ -413,6 +413,8 @@ static inline void folio_set_order(struct folio *folio, unsigned int order)
 #endif
 }
 
+void folio_undo_large_rmappable(struct folio *folio);
+
 static inline void prep_compound_head(struct page *page, unsigned int order)
 {
        struct folio *folio = (struct folio *)page;
index 30dc444..4047b58 100644 (file)
@@ -287,9 +287,6 @@ const char * const migratetype_names[MIGRATE_TYPES] = {
 static compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = {
        [NULL_COMPOUND_DTOR] = NULL,
        [COMPOUND_PAGE_DTOR] = free_compound_page,
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       [TRANSHUGE_PAGE_DTOR] = free_transhuge_page,
-#endif
 };
 
 int min_free_kbytes = 1024;
@@ -614,6 +611,12 @@ void destroy_large_folio(struct folio *folio)
                return;
        }
 
+       if (folio_test_transhuge(folio) && dtor == TRANSHUGE_PAGE_DTOR) {
+               folio_undo_large_rmappable(folio);
+               free_compound_page(&folio->page);
+               return;
+       }
+
        VM_BUG_ON_FOLIO(dtor >= NR_COMPOUND_DTORS, folio);
        compound_page_dtors[dtor](&folio->page);
 }