mm/slub: Convert __free_slab() to use struct slab
authorVlastimil Babka <vbabka@suse.cz>
Fri, 29 Oct 2021 10:18:24 +0000 (12:18 +0200)
committerVlastimil Babka <vbabka@suse.cz>
Thu, 6 Jan 2022 11:26:01 +0000 (12:26 +0100)
__free_slab() is on the boundary of distinguishing struct slab and
struct page so start with struct slab but convert to folio for working
with flags and folio_page() to call functions that require struct page.

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Roman Gushchin <guro@fb.com>
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
mm/slub.c

index 65cbdeae7edba6c5592f808b78e0a0f15822532e..4d64c5b4262907b7c6b6a395bc087f5e46eaef73 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2005,35 +2005,34 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
                flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
 }
 
-static void __free_slab(struct kmem_cache *s, struct page *page)
+static void __free_slab(struct kmem_cache *s, struct slab *slab)
 {
-       int order = compound_order(page);
+       struct folio *folio = slab_folio(slab);
+       int order = folio_order(folio);
        int pages = 1 << order;
 
        if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
                void *p;
 
-               slab_pad_check(s, page);
-               for_each_object(p, s, page_address(page),
-                                               page->objects)
-                       check_object(s, page, p, SLUB_RED_INACTIVE);
+               slab_pad_check(s, folio_page(folio, 0));
+               for_each_object(p, s, slab_address(slab), slab->objects)
+                       check_object(s, folio_page(folio, 0), p, SLUB_RED_INACTIVE);
        }
 
-       __ClearPageSlabPfmemalloc(page);
-       __ClearPageSlab(page);
-       /* In union with page->mapping where page allocator expects NULL */
-       page->slab_cache = NULL;
+       __slab_clear_pfmemalloc(slab);
+       __folio_clear_slab(folio);
+       folio->mapping = NULL;
        if (current->reclaim_state)
                current->reclaim_state->reclaimed_slab += pages;
-       unaccount_slab(page_slab(page), order, s);
-       __free_pages(page, order);
+       unaccount_slab(slab, order, s);
+       __free_pages(folio_page(folio, 0), order);
 }
 
 static void rcu_free_slab(struct rcu_head *h)
 {
        struct page *page = container_of(h, struct page, rcu_head);
 
-       __free_slab(page->slab_cache, page);
+       __free_slab(page->slab_cache, page_slab(page));
 }
 
 static void free_slab(struct kmem_cache *s, struct page *page)
@@ -2041,7 +2040,7 @@ static void free_slab(struct kmem_cache *s, struct page *page)
        if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
                call_rcu(&page->rcu_head, rcu_free_slab);
        } else
-               __free_slab(s, page);
+               __free_slab(s, page_slab(page));
 }
 
 static void discard_slab(struct kmem_cache *s, struct page *page)