1 // SPDX-License-Identifier: GPL-2.0
3 * Bootmem core functions.
5 * Copyright (c) 2020, Bytedance.
7 * Author: Muchun Song <songmuchun@bytedance.com>
11 #include <linux/compiler.h>
12 #include <linux/memblock.h>
13 #include <linux/bootmem_info.h>
14 #include <linux/memory_hotplug.h>
15 #include <linux/kmemleak.h>
17 void get_page_bootmem(unsigned long info, struct page *page, unsigned long type)
21 set_page_private(page, info);
25 void put_page_bootmem(struct page *page)
27 unsigned long type = page->index;
29 BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
30 type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
32 if (page_ref_dec_return(page) == 1) {
34 ClearPagePrivate(page);
35 set_page_private(page, 0);
36 INIT_LIST_HEAD(&page->lru);
37 kmemleak_free_part(page_to_virt(page), PAGE_SIZE);
38 free_reserved_page(page);
42 #ifndef CONFIG_SPARSEMEM_VMEMMAP
43 static void __init register_page_bootmem_info_section(unsigned long start_pfn)
45 unsigned long mapsize, section_nr, i;
46 struct mem_section *ms;
47 struct page *page, *memmap;
48 struct mem_section_usage *usage;
50 section_nr = pfn_to_section_nr(start_pfn);
51 ms = __nr_to_section(section_nr);
53 /* Get section's memmap address */
54 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
57 * Get page for the memmap's phys address
58 * XXX: need more consideration for sparse_vmemmap...
60 page = virt_to_page(memmap);
61 mapsize = sizeof(struct page) * PAGES_PER_SECTION;
62 mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
64 /* remember memmap's page */
65 for (i = 0; i < mapsize; i++, page++)
66 get_page_bootmem(section_nr, page, SECTION_INFO);
69 page = virt_to_page(usage);
71 mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT;
73 for (i = 0; i < mapsize; i++, page++)
74 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
77 #else /* CONFIG_SPARSEMEM_VMEMMAP */
78 static void __init register_page_bootmem_info_section(unsigned long start_pfn)
80 unsigned long mapsize, section_nr, i;
81 struct mem_section *ms;
82 struct page *page, *memmap;
83 struct mem_section_usage *usage;
85 section_nr = pfn_to_section_nr(start_pfn);
86 ms = __nr_to_section(section_nr);
88 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
90 register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
93 page = virt_to_page(usage);
95 mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT;
97 for (i = 0; i < mapsize; i++, page++)
98 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
100 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
102 void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
104 unsigned long i, pfn, end_pfn, nr_pages;
105 int node = pgdat->node_id;
108 nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
109 page = virt_to_page(pgdat);
111 for (i = 0; i < nr_pages; i++, page++)
112 get_page_bootmem(node, page, NODE_INFO);
114 pfn = pgdat->node_start_pfn;
115 end_pfn = pgdat_end_pfn(pgdat);
117 /* register section info */
118 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
120 * Some platforms can assign the same pfn to multiple nodes - on
121 * node0 as well as nodeN. To avoid registering a pfn against
122 * multiple nodes we check that this pfn does not already
123 * reside in some other nodes.
125 if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
126 register_page_bootmem_info_section(pfn);