mm/mm_init: use helper macro BITS_PER_LONG and BITS_PER_BYTE
authorMiaohe Lin <linmiaohe@huawei.com>
Mon, 7 Aug 2023 02:35:28 +0000 (10:35 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 21 Aug 2023 20:37:47 +0000 (13:37 -0700)
It's more readable to use helper macro BITS_PER_LONG and BITS_PER_BYTE.
No functional change intended.

Link: https://lkml.kernel.org/r/20230807023528.325191-1-linmiaohe@huawei.com
Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/mm_init.c

index 2a19f31..50f2f34 100644 (file)
@@ -79,7 +79,7 @@ void __init mminit_verify_pageflags_layout(void)
        int shift, width;
        unsigned long or_mask, add_mask;
 
-       shift = 8 * sizeof(unsigned long);
+       shift = BITS_PER_LONG;
        width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
                - LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH - LRU_GEN_WIDTH - LRU_REFS_WIDTH;
        mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
@@ -1426,9 +1426,9 @@ static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned l
        usemapsize = roundup(zonesize, pageblock_nr_pages);
        usemapsize = usemapsize >> pageblock_order;
        usemapsize *= NR_PAGEBLOCK_BITS;
-       usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
+       usemapsize = roundup(usemapsize, BITS_PER_LONG);
 
-       return usemapsize / 8;
+       return usemapsize / BITS_PER_BYTE;
 }
 
 static void __ref setup_usemap(struct zone *zone)