hugetlb: clear flags in tail pages that will be freed individually
authorMike Kravetz <mike.kravetz@oracle.com>
Tue, 22 Aug 2023 22:30:43 +0000 (15:30 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 24 Aug 2023 23:20:15 +0000 (16:20 -0700)
hugetlb manually creates and destroys compound pages.  As such it makes
assumptions about struct page layout.  Commit ebc1baf5c9b4 ("mm: free up a
word in the first tail page") breaks hugetlb.  The following will fix the
breakage.

Link: https://lkml.kernel.org/r/20230822231741.GC4509@monkey
Fixes: ebc1baf5c9b4 ("mm: free up a word in the first tail page")
Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/hugetlb.c

index a82c310..cbc2582 100644 (file)
@@ -1484,6 +1484,7 @@ static void __destroy_compound_gigantic_folio(struct folio *folio,
 
        for (i = 1; i < nr_pages; i++) {
                p = folio_page(folio, i);
+               p->flags &= ~PAGE_FLAGS_CHECK_AT_FREE;
                p->mapping = NULL;
                clear_compound_head(p);
                if (!demote)
@@ -1702,8 +1703,6 @@ static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
 static void __update_and_free_hugetlb_folio(struct hstate *h,
                                                struct folio *folio)
 {
-       int i;
-       struct page *subpage;
        bool clear_dtor = folio_test_hugetlb_vmemmap_optimized(folio);
 
        if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
@@ -1745,14 +1744,6 @@ static void __update_and_free_hugetlb_folio(struct hstate *h,
                spin_unlock_irq(&hugetlb_lock);
        }
 
-       for (i = 0; i < pages_per_huge_page(h); i++) {
-               subpage = folio_page(folio, i);
-               subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
-                               1 << PG_referenced | 1 << PG_dirty |
-                               1 << PG_active | 1 << PG_private |
-                               1 << PG_writeback);
-       }
-
        /*
         * Non-gigantic pages demoted from CMA allocated gigantic pages
         * need to be given back to CMA in free_gigantic_folio.