struct kmem_cache_order_objects oo = s->oo;
gfp_t alloc_gfp;
+ flags &= gfp_allowed_mask;
+
+ if (flags & __GFP_WAIT)
+ local_irq_enable();
+
flags |= s->allocflags;
/*
* Try a lower order alloc if possible
*/
page = alloc_slab_page(flags, node, oo);
- if (!page)
- return NULL;
- stat(s, ORDER_FALLBACK);
+ if (page)
+ stat(s, ORDER_FALLBACK);
}
+ if (flags & __GFP_WAIT)
+ local_irq_disable();
+
+ if (!page)
+ return NULL;
+
if (kmemcheck_enabled
&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
int pages = 1 << oo_order(oo);
goto load_freelist;
}
- gfpflags &= gfp_allowed_mask;
- if (gfpflags & __GFP_WAIT)
- local_irq_enable();
-
page = new_slab(s, gfpflags, node);
- if (gfpflags & __GFP_WAIT)
- local_irq_disable();
-
if (page) {
c = __this_cpu_ptr(s->cpu_slab);
stat(s, ALLOC_SLAB);