mm/slub.c: clean up validate_slab()
authorYu Zhao <yuzhao@google.com>
Sun, 1 Dec 2019 01:49:37 +0000 (17:49 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 1 Dec 2019 14:29:18 +0000 (06:29 -0800)
The function doesn't need to return any value, and the check can be done
in one pass.

There is a behavior change: before the patch, we stop at the first invalid
free object; after the patch, we stop at the first invalid object, free or
in use.  This shouldn't matter because the original behavior isn't
intended anyway.

Link: http://lkml.kernel.org/r/20191108193958.205102-1-yuzhao@google.com
Signed-off-by: Yu Zhao <yuzhao@google.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
Cc: Yu Zhao <yuzhao@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/slub.c

index 656f947..d113897 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4384,31 +4384,26 @@ static int count_total(struct page *page)
 #endif
 
 #ifdef CONFIG_SLUB_DEBUG
-static int validate_slab(struct kmem_cache *s, struct page *page,
+static void validate_slab(struct kmem_cache *s, struct page *page,
                                                unsigned long *map)
 {
        void *p;
        void *addr = page_address(page);
 
-       if (!check_slab(s, page) ||
-                       !on_freelist(s, page, NULL))
-               return 0;
+       if (!check_slab(s, page) || !on_freelist(s, page, NULL))
+               return;
 
        /* Now we know that a valid freelist exists */
        bitmap_zero(map, page->objects);
 
        get_map(s, page, map);
        for_each_object(p, s, addr, page->objects) {
-               if (test_bit(slab_index(p, s, addr), map))
-                       if (!check_object(s, page, p, SLUB_RED_INACTIVE))
-                               return 0;
-       }
+               u8 val = test_bit(slab_index(p, s, addr), map) ?
+                        SLUB_RED_INACTIVE : SLUB_RED_ACTIVE;
 
-       for_each_object(p, s, addr, page->objects)
-               if (!test_bit(slab_index(p, s, addr), map))
-                       if (!check_object(s, page, p, SLUB_RED_ACTIVE))
-                               return 0;
-       return 1;
+               if (!check_object(s, page, p, val))
+                       break;
+       }
 }
 
 static void validate_slab_slab(struct kmem_cache *s, struct page *page,