From 57d437d2aa680f42d75cef45205834d5f605550a Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 9 May 2012 10:09:59 -0500 Subject: [PATCH] slub: pass page to node_match() instead of kmem_cache_cpu structure Avoid passing the kmem_cache_cpu pointer to node_match. This makes the node_match function more generic and easier to understand. Acked-by: David Rientjes Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slub.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 6b60fc9..719509e 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2050,10 +2050,10 @@ static void flush_all(struct kmem_cache *s) * Check if the objects in a per cpu structure fit numa * locality expectations. */ -static inline int node_match(struct kmem_cache_cpu *c, int node) +static inline int node_match(struct page *page, int node) { #ifdef CONFIG_NUMA - if (node != NUMA_NO_NODE && page_to_nid(c->page) != node) + if (node != NUMA_NO_NODE && page_to_nid(page) != node) return 0; #endif return 1; @@ -2226,7 +2226,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, goto new_slab; redo: - if (unlikely(!node_match(c, node))) { + if (unlikely(!node_match(page, node))) { stat(s, ALLOC_NODE_MISMATCH); deactivate_slab(s, page, c->freelist); c->page = NULL; @@ -2313,6 +2313,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, { void **object; struct kmem_cache_cpu *c; + struct page *page; unsigned long tid; if (slab_pre_alloc_hook(s, gfpflags)) @@ -2338,7 +2339,8 @@ redo: barrier(); object = c->freelist; - if (unlikely(!object || !node_match(c, node))) + page = c->page; + if (unlikely(!object || !node_match(page, node))) object = __slab_alloc(s, gfpflags, node, addr, c); -- 2.7.4