mm: reuse pageblock_start/end_pfn() macro
authorKefeng Wang <wangkefeng.wang@huawei.com>
Wed, 7 Sep 2022 06:08:42 +0000 (14:08 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 3 Oct 2022 21:03:03 +0000 (14:03 -0700)
Move pageblock_start_pfn/pageblock_end_pfn() into pageblock-flags.h, then
they could be used somewhere else, not only in compaction, also use
ALIGN_DOWN() instead of round_down() to be pair with ALIGN(), which should
be same for pageblock usage.

Link: https://lkml.kernel.org/r/20220907060844.126891-1-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Acked-by: Mike Rapoport <rppt@linux.ibm.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/pageblock-flags.h
mm/compaction.c
mm/memblock.c
mm/page_alloc.c
mm/page_isolation.c
mm/page_owner.c

index 83c7248053a1eb745e0bff410be66d0f04a5e8bb..a09b7fe6bbf8ec5115728ad374033821e50d2860 100644 (file)
@@ -53,6 +53,8 @@ extern unsigned int pageblock_order;
 #endif /* CONFIG_HUGETLB_PAGE */
 
 #define pageblock_nr_pages     (1UL << pageblock_order)
+#define pageblock_start_pfn(pfn)       ALIGN_DOWN((pfn), pageblock_nr_pages)
+#define pageblock_end_pfn(pfn)         ALIGN((pfn) + 1, pageblock_nr_pages)
 
 /* Forward declaration */
 struct page;
index 262c4676b32c1f44b00c2702a821414c4830732a..9cbe8562b63ac734f50a8d9be924662811d1a53d 100644 (file)
@@ -52,8 +52,6 @@ static inline void count_compact_events(enum vm_event_item item, long delta)
 
 #define block_start_pfn(pfn, order)    round_down(pfn, 1UL << (order))
 #define block_end_pfn(pfn, order)      ALIGN((pfn) + 1, 1UL << (order))
-#define pageblock_start_pfn(pfn)       block_start_pfn(pfn, pageblock_order)
-#define pageblock_end_pfn(pfn)         block_end_pfn(pfn, pageblock_order)
 
 /*
  * Page order with-respect-to which proactive compaction
index b5d3026979fccbb767458c2844ba1043ca47dd06..46fe7575f03c6959cb6cfb0b58b2f373587972be 100644 (file)
@@ -2000,7 +2000,7 @@ static void __init free_unused_memmap(void)
                 * presume that there are no holes in the memory map inside
                 * a pageblock
                 */
-               start = round_down(start, pageblock_nr_pages);
+               start = pageblock_start_pfn(start);
 
                /*
                 * If we had a previous bank, and there is a space
index 44f3c93643161f911cc40edd4b84bffdb4680b3a..1637db90472ed6e39a46e509d7dbceef8ff6da79 100644 (file)
@@ -544,7 +544,7 @@ static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
 #ifdef CONFIG_SPARSEMEM
        pfn &= (PAGES_PER_SECTION-1);
 #else
-       pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
+       pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn);
 #endif /* CONFIG_SPARSEMEM */
        return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
 }
@@ -1857,7 +1857,7 @@ void set_zone_contiguous(struct zone *zone)
        unsigned long block_start_pfn = zone->zone_start_pfn;
        unsigned long block_end_pfn;
 
-       block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
+       block_end_pfn = pageblock_end_pfn(block_start_pfn);
        for (; block_start_pfn < zone_end_pfn(zone);
                        block_start_pfn = block_end_pfn,
                         block_end_pfn += pageblock_nr_pages) {
@@ -2653,8 +2653,8 @@ int move_freepages_block(struct zone *zone, struct page *page,
                *num_movable = 0;
 
        pfn = page_to_pfn(page);
-       start_pfn = pfn & ~(pageblock_nr_pages - 1);
-       end_pfn = start_pfn + pageblock_nr_pages - 1;
+       start_pfn = pageblock_start_pfn(pfn);
+       end_pfn = pageblock_end_pfn(pfn) - 1;
 
        /* Do not cross zone boundaries */
        if (!zone_spans_pfn(zone, start_pfn))
@@ -6934,9 +6934,8 @@ static void __init init_unavailable_range(unsigned long spfn,
        u64 pgcnt = 0;
 
        for (pfn = spfn; pfn < epfn; pfn++) {
-               if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
-                       pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
-                               + pageblock_nr_pages - 1;
+               if (!pfn_valid(pageblock_start_pfn(pfn))) {
+                       pfn = pageblock_end_pfn(pfn) - 1;
                        continue;
                }
                __init_single_page(pfn_to_page(pfn), pfn, zone, node);
index eb3a68ca92ad9ad60532f9d97b55dc42a89eaa78..5819cb9c62f37890ccd027da8cc07106b78f334c 100644 (file)
@@ -37,8 +37,8 @@ static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long e
        struct zone *zone = page_zone(page);
        unsigned long pfn;
 
-       VM_BUG_ON(ALIGN_DOWN(start_pfn, pageblock_nr_pages) !=
-                 ALIGN_DOWN(end_pfn - 1, pageblock_nr_pages));
+       VM_BUG_ON(pageblock_start_pfn(start_pfn) !=
+                 pageblock_start_pfn(end_pfn - 1));
 
        if (is_migrate_cma_page(page)) {
                /*
@@ -172,7 +172,7 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
         * to avoid redundant checks.
         */
        check_unmovable_start = max(page_to_pfn(page), start_pfn);
-       check_unmovable_end = min(ALIGN(page_to_pfn(page) + 1, pageblock_nr_pages),
+       check_unmovable_end = min(pageblock_end_pfn(page_to_pfn(page)),
                                  end_pfn);
 
        unmovable = has_unmovable_pages(check_unmovable_start, check_unmovable_end,
@@ -532,7 +532,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
        unsigned long pfn;
        struct page *page;
        /* isolation is done at page block granularity */
-       unsigned long isolate_start = ALIGN_DOWN(start_pfn, pageblock_nr_pages);
+       unsigned long isolate_start = pageblock_start_pfn(start_pfn);
        unsigned long isolate_end = ALIGN(end_pfn, pageblock_nr_pages);
        int ret;
        bool skip_isolation = false;
@@ -579,10 +579,9 @@ void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
 {
        unsigned long pfn;
        struct page *page;
-       unsigned long isolate_start = ALIGN_DOWN(start_pfn, pageblock_nr_pages);
+       unsigned long isolate_start = pageblock_start_pfn(start_pfn);
        unsigned long isolate_end = ALIGN(end_pfn, pageblock_nr_pages);
 
-
        for (pfn = isolate_start;
             pfn < isolate_end;
             pfn += pageblock_nr_pages) {
index 54f3e039fb483a45b361b50db9a62ba8f0455bb2..2d27f532df4c1064530b8178a03830998288bb45 100644 (file)
@@ -297,7 +297,7 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
                        continue;
                }
 
-               block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
+               block_end_pfn = pageblock_end_pfn(pfn);
                block_end_pfn = min(block_end_pfn, end_pfn);
 
                pageblock_mt = get_pageblock_migratetype(page);
@@ -635,7 +635,7 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
                        continue;
                }
 
-               block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
+               block_end_pfn = pageblock_end_pfn(pfn);
                block_end_pfn = min(block_end_pfn, end_pfn);
 
                for (; pfn < block_end_pfn; pfn++) {