mm/slub.c: switch to bitmap_zalloc()
authorAndy Shevchenko <andriy.shevchenko@linux.intel.com>
Fri, 26 Oct 2018 22:03:06 +0000 (15:03 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 26 Oct 2018 23:25:18 +0000 (16:25 -0700)
Switch to bitmap_zalloc() to show clearly what we are allocating.  Besides
that it returns pointer of bitmap type instead of opaque void *.

Link: http://lkml.kernel.org/r/20180830104301.61649-1-andriy.shevchenko@linux.intel.com
Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Acked-by: Christoph Lameter <cl@linux.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Tested-by: David Rientjes <rientjes@google.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/slub.c

index 8da34a8..37e82a0 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3621,9 +3621,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
 #ifdef CONFIG_SLUB_DEBUG
        void *addr = page_address(page);
        void *p;
-       unsigned long *map = kcalloc(BITS_TO_LONGS(page->objects),
-                                    sizeof(long),
-                                    GFP_ATOMIC);
+       unsigned long *map = bitmap_zalloc(page->objects, GFP_ATOMIC);
        if (!map)
                return;
        slab_err(s, page, text, s->name);
@@ -3638,7 +3636,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
                }
        }
        slab_unlock(page);
-       kfree(map);
+       bitmap_free(map);
 #endif
 }
 
@@ -4411,10 +4409,8 @@ static long validate_slab_cache(struct kmem_cache *s)
 {
        int node;
        unsigned long count = 0;
-       unsigned long *map = kmalloc_array(BITS_TO_LONGS(oo_objects(s->max)),
-                                          sizeof(unsigned long),
-                                          GFP_KERNEL);
        struct kmem_cache_node *n;
+       unsigned long *map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);
 
        if (!map)
                return -ENOMEM;
@@ -4422,7 +4418,7 @@ static long validate_slab_cache(struct kmem_cache *s)
        flush_all(s);
        for_each_kmem_cache_node(s, node, n)
                count += validate_slab_node(s, n, map);
-       kfree(map);
+       bitmap_free(map);
        return count;
 }
 /*
@@ -4573,14 +4569,12 @@ static int list_locations(struct kmem_cache *s, char *buf,
        unsigned long i;
        struct loc_track t = { 0, 0, NULL };
        int node;
-       unsigned long *map = kmalloc_array(BITS_TO_LONGS(oo_objects(s->max)),
-                                          sizeof(unsigned long),
-                                          GFP_KERNEL);
        struct kmem_cache_node *n;
+       unsigned long *map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);
 
        if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
                                     GFP_KERNEL)) {
-               kfree(map);
+               bitmap_free(map);
                return sprintf(buf, "Out of memory\n");
        }
        /* Push back cpu slabs */
@@ -4646,7 +4640,7 @@ static int list_locations(struct kmem_cache *s, char *buf,
        }
 
        free_loc_track(&t);
-       kfree(map);
+       bitmap_free(map);
        if (!t.count)
                len += sprintf(buf, "No data\n");
        return len;