From 3256ff83c566235e812498ee1dc806c45a5d5af7 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Wed, 24 Feb 2021 12:06:17 -0800 Subject: [PATCH] mm: simplify parater of function memmap_init_zone() As David suggested, simply passing 'struct zone *zone' is enough. We can get all needed information from 'struct zone*' easily. Link: https://lkml.kernel.org/r/20210122135956.5946-4-bhe@redhat.com Signed-off-by: Baoquan He Suggested-by: David Hildenbrand Reviewed-by: Mike Rapoport Reviewed-by: David Hildenbrand Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/mm/init.c | 12 +++++++----- include/linux/mm.h | 3 +-- mm/page_alloc.c | 24 +++++++++++------------- 3 files changed, 19 insertions(+), 20 deletions(-) diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 39b782f..16d0d7d 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -542,12 +542,14 @@ virtual_memmap_init(u64 start, u64 end, void *arg) return 0; } -void __meminit -memmap_init_zone(unsigned long size, int nid, unsigned long zone, - unsigned long start_pfn) +void __meminit memmap_init_zone(struct zone *zone) { + int nid = zone_to_nid(zone), zone_id = zone_idx(zone); + unsigned long start_pfn = zone->zone_start_pfn; + unsigned long size = zone->spanned_pages; + if (!vmem_map) { - memmap_init_range(size, nid, zone, start_pfn, start_pfn + size, + memmap_init_range(size, nid, zone_id, start_pfn, start_pfn + size, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); } else { struct page *start; @@ -557,7 +559,7 @@ memmap_init_zone(unsigned long size, int nid, unsigned long zone, args.start = start; args.end = start + size; args.nid = nid; - args.zone = zone; + args.zone = zone_id; efi_memmap_walk(virtual_memmap_init, &args); } diff --git a/include/linux/mm.h b/include/linux/mm.h index 56d0eea..2601a5c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2408,8 +2408,7 @@ extern void set_dma_reserve(unsigned long new_dma_reserve); extern void memmap_init_range(unsigned long, int, unsigned long, unsigned long, unsigned long, enum meminit_context, struct vmem_altmap *, int migratetype); -extern void memmap_init_zone(unsigned long size, int nid, - unsigned long zone, unsigned long range_start_pfn); +extern void memmap_init_zone(struct zone *zone); extern void setup_per_zone_wmarks(void); extern int __meminit init_per_zone_wmark_min(void); extern void mem_init(void); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 519cf52..aa04c54 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6258,23 +6258,21 @@ static void __meminit zone_init_free_lists(struct zone *zone) } } -void __meminit __weak memmap_init_zone(unsigned long size, int nid, - unsigned long zone, - unsigned long range_start_pfn) +void __meminit __weak memmap_init_zone(struct zone *zone) { + unsigned long zone_start_pfn = zone->zone_start_pfn; + unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; + int i, nid = zone_to_nid(zone), zone_id = zone_idx(zone); unsigned long start_pfn, end_pfn; - unsigned long range_end_pfn = range_start_pfn + size; - int i; for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { - start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn); - end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn); + start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn); + end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn); - if (end_pfn > start_pfn) { - size = end_pfn - start_pfn; - memmap_init_range(size, nid, zone, start_pfn, range_end_pfn, - MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); - } + if (end_pfn > start_pfn) + memmap_init_range(end_pfn - start_pfn, nid, + zone_id, start_pfn, zone_end_pfn, + MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); } } @@ -6982,7 +6980,7 @@ static void __init free_area_init_core(struct pglist_data *pgdat) set_pageblock_order(); setup_usemap(pgdat, zone, zone_start_pfn, size); init_currently_empty_zone(zone, zone_start_pfn, size); - memmap_init_zone(size, nid, j, zone_start_pfn); + memmap_init_zone(zone); } } -- 2.7.4