Merge tag 'perf-urgent-2021-09-19' of git://git.kernel.org/pub/scm/linux/kernel/git...
[platform/kernel/linux-starfive.git] / mm / bootmem_info.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Bootmem core functions.
4  *
5  * Copyright (c) 2020, Bytedance.
6  *
7  *     Author: Muchun Song <songmuchun@bytedance.com>
8  *
9  */
10 #include <linux/mm.h>
11 #include <linux/compiler.h>
12 #include <linux/memblock.h>
13 #include <linux/bootmem_info.h>
14 #include <linux/memory_hotplug.h>
15
16 void get_page_bootmem(unsigned long info, struct page *page, unsigned long type)
17 {
18         page->freelist = (void *)type;
19         SetPagePrivate(page);
20         set_page_private(page, info);
21         page_ref_inc(page);
22 }
23
24 void put_page_bootmem(struct page *page)
25 {
26         unsigned long type;
27
28         type = (unsigned long) page->freelist;
29         BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
30                type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
31
32         if (page_ref_dec_return(page) == 1) {
33                 page->freelist = NULL;
34                 ClearPagePrivate(page);
35                 set_page_private(page, 0);
36                 INIT_LIST_HEAD(&page->lru);
37                 free_reserved_page(page);
38         }
39 }
40
41 #ifndef CONFIG_SPARSEMEM_VMEMMAP
42 static void __init register_page_bootmem_info_section(unsigned long start_pfn)
43 {
44         unsigned long mapsize, section_nr, i;
45         struct mem_section *ms;
46         struct page *page, *memmap;
47         struct mem_section_usage *usage;
48
49         section_nr = pfn_to_section_nr(start_pfn);
50         ms = __nr_to_section(section_nr);
51
52         /* Get section's memmap address */
53         memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
54
55         /*
56          * Get page for the memmap's phys address
57          * XXX: need more consideration for sparse_vmemmap...
58          */
59         page = virt_to_page(memmap);
60         mapsize = sizeof(struct page) * PAGES_PER_SECTION;
61         mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
62
63         /* remember memmap's page */
64         for (i = 0; i < mapsize; i++, page++)
65                 get_page_bootmem(section_nr, page, SECTION_INFO);
66
67         usage = ms->usage;
68         page = virt_to_page(usage);
69
70         mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT;
71
72         for (i = 0; i < mapsize; i++, page++)
73                 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
74
75 }
76 #else /* CONFIG_SPARSEMEM_VMEMMAP */
77 static void __init register_page_bootmem_info_section(unsigned long start_pfn)
78 {
79         unsigned long mapsize, section_nr, i;
80         struct mem_section *ms;
81         struct page *page, *memmap;
82         struct mem_section_usage *usage;
83
84         section_nr = pfn_to_section_nr(start_pfn);
85         ms = __nr_to_section(section_nr);
86
87         memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
88
89         register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
90
91         usage = ms->usage;
92         page = virt_to_page(usage);
93
94         mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT;
95
96         for (i = 0; i < mapsize; i++, page++)
97                 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
98 }
99 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
100
101 void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
102 {
103         unsigned long i, pfn, end_pfn, nr_pages;
104         int node = pgdat->node_id;
105         struct page *page;
106
107         nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
108         page = virt_to_page(pgdat);
109
110         for (i = 0; i < nr_pages; i++, page++)
111                 get_page_bootmem(node, page, NODE_INFO);
112
113         pfn = pgdat->node_start_pfn;
114         end_pfn = pgdat_end_pfn(pgdat);
115
116         /* register section info */
117         for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
118                 /*
119                  * Some platforms can assign the same pfn to multiple nodes - on
120                  * node0 as well as nodeN.  To avoid registering a pfn against
121                  * multiple nodes we check that this pfn does not already
122                  * reside in some other nodes.
123                  */
124                 if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
125                         register_page_bootmem_info_section(pfn);
126         }
127 }