mm: add pageblock_align() macro
authorKefeng Wang <wangkefeng.wang@huawei.com>
Wed, 7 Sep 2022 06:08:43 +0000 (14:08 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 3 Oct 2022 21:03:04 +0000 (14:03 -0700)
Add pageblock_align() macro and use it to simplify code.

Link: https://lkml.kernel.org/r/20220907060844.126891-2-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Acked-by: Mike Rapoport <rppt@linux.ibm.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/pageblock-flags.h
mm/memblock.c
mm/page_isolation.c

index a09b7fe..293c766 100644 (file)
@@ -53,6 +53,7 @@ extern unsigned int pageblock_order;
 #endif /* CONFIG_HUGETLB_PAGE */
 
 #define pageblock_nr_pages     (1UL << pageblock_order)
+#define pageblock_align(pfn)   ALIGN((pfn), pageblock_nr_pages)
 #define pageblock_start_pfn(pfn)       ALIGN_DOWN((pfn), pageblock_nr_pages)
 #define pageblock_end_pfn(pfn)         ALIGN((pfn) + 1, pageblock_nr_pages)
 
index 46fe757..511d478 100644 (file)
@@ -2014,12 +2014,12 @@ static void __init free_unused_memmap(void)
                 * presume that there are no holes in the memory map inside
                 * a pageblock
                 */
-               prev_end = ALIGN(end, pageblock_nr_pages);
+               prev_end = pageblock_align(end);
        }
 
 #ifdef CONFIG_SPARSEMEM
        if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) {
-               prev_end = ALIGN(end, pageblock_nr_pages);
+               prev_end = pageblock_align(end);
                free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
        }
 #endif
index 5819cb9..fa82faa 100644 (file)
@@ -533,7 +533,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
        struct page *page;
        /* isolation is done at page block granularity */
        unsigned long isolate_start = pageblock_start_pfn(start_pfn);
-       unsigned long isolate_end = ALIGN(end_pfn, pageblock_nr_pages);
+       unsigned long isolate_end = pageblock_align(end_pfn);
        int ret;
        bool skip_isolation = false;
 
@@ -580,7 +580,7 @@ void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
        unsigned long pfn;
        struct page *page;
        unsigned long isolate_start = pageblock_start_pfn(start_pfn);
-       unsigned long isolate_end = ALIGN(end_pfn, pageblock_nr_pages);
+       unsigned long isolate_end = pageblock_align(end_pfn);
 
        for (pfn = isolate_start;
             pfn < isolate_end;