From 5d1f57e4d3d547b113ebd62f569be13bf485e53b Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 29 Sep 2010 21:02:15 +0900 Subject: [PATCH] slub: Move NUMA-related functions under CONFIG_NUMA Make kmalloc_cache_alloc_node_notrace(), kmalloc_large_node() and __kmalloc_node_track_caller() to be compiled only when CONFIG_NUMA is selected. Acked-by: David Rientjes Signed-off-by: Namhyung Kim Signed-off-by: Pekka Enberg --- mm/slub.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 118422e2..9f121c1 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1792,7 +1792,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) return ret; } EXPORT_SYMBOL(kmem_cache_alloc_node); -#endif #ifdef CONFIG_TRACING void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, @@ -1803,6 +1802,7 @@ void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, } EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); #endif +#endif /* * Slow patch handling. This may still be called frequently since objects @@ -2673,6 +2673,7 @@ void *__kmalloc(size_t size, gfp_t flags) } EXPORT_SYMBOL(__kmalloc); +#ifdef CONFIG_NUMA static void *kmalloc_large_node(size_t size, gfp_t flags, int node) { struct page *page; @@ -2687,7 +2688,6 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) return ptr; } -#ifdef CONFIG_NUMA void *__kmalloc_node(size_t size, gfp_t flags, int node) { struct kmem_cache *s; @@ -3342,6 +3342,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) return ret; } +#ifdef CONFIG_NUMA void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, int node, unsigned long caller) { @@ -3370,6 +3371,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, return ret; } +#endif #ifdef CONFIG_SLUB_DEBUG static int count_inuse(struct page *page) -- 2.7.4