mm/slab.c: replace open-coded round-up code with ALIGN
authorCanjiang Lu <canjiang.lu@samsung.com>
Thu, 6 Jul 2017 22:36:37 +0000 (15:36 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 6 Jul 2017 23:24:30 +0000 (16:24 -0700)
Link: http://lkml.kernel.org/r/20170616072918epcms5p4ff16c24ef8472b4c3b4371823cd87856@epcms5p4
Signed-off-by: Canjiang Lu <canjiang.lu@samsung.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/slab.c

index 2a31ee3..5033171 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2040,17 +2040,13 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
         * unaligned accesses for some archs when redzoning is used, and makes
         * sure any on-slab bufctl's are also correctly aligned.
         */
-       if (size & (BYTES_PER_WORD - 1)) {
-               size += (BYTES_PER_WORD - 1);
-               size &= ~(BYTES_PER_WORD - 1);
-       }
+       size = ALIGN(size, BYTES_PER_WORD);
 
        if (flags & SLAB_RED_ZONE) {
                ralign = REDZONE_ALIGN;
                /* If redzoning, ensure that the second redzone is suitably
                 * aligned, by adjusting the object size accordingly. */
-               size += REDZONE_ALIGN - 1;
-               size &= ~(REDZONE_ALIGN - 1);
+               size = ALIGN(size, REDZONE_ALIGN);
        }
 
        /* 3) caller mandated alignment */