From 3f2b77e35a4fc3c83132a1a1a2fc7a2c803a2514 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 11 May 2021 16:37:51 +0200 Subject: [PATCH] mm, slub: validate slab from partial list or page allocator before making it cpu slab When we obtain a new slab page from node partial list or page allocator, we assign it to kmem_cache_cpu, perform some checks, and if they fail, we undo the assignment. In order to allow doing the checks without irq disabled, restructure the code so that the checks are done first, and kmem_cache_cpu.page assignment only after they pass. Signed-off-by: Vlastimil Babka --- mm/slub.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 7798ba1..a5e974d 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2802,10 +2802,8 @@ new_objects: lockdep_assert_irqs_disabled(); freelist = get_partial(s, gfpflags, node, &page); - if (freelist) { - c->page = page; + if (freelist) goto check_new_page; - } local_irq_restore(flags); put_cpu_ptr(s->cpu_slab); @@ -2818,9 +2816,6 @@ new_objects: } local_irq_save(flags); - if (c->page) - flush_slab(s, c); - /* * No other reference to the page yet so we can * muck around with it freely without cmpxchg @@ -2829,14 +2824,12 @@ new_objects: page->freelist = NULL; stat(s, ALLOC_SLAB); - c->page = page; check_new_page: if (kmem_cache_debug(s)) { if (!alloc_debug_processing(s, page, freelist, addr)) { /* Slab failed checks. Next slab needed */ - c->page = NULL; local_irq_restore(flags); goto new_slab; } else { @@ -2855,10 +2848,18 @@ check_new_page: */ goto return_single; + if (unlikely(c->page)) + flush_slab(s, c); + c->page = page; + goto load_freelist; return_single: + if (unlikely(c->page)) + flush_slab(s, c); + c->page = page; + deactivate_slab(s, page, get_freepointer(s, freelist), c); local_irq_restore(flags); return freelist; -- 2.7.4