1 // SPDX-License-Identifier: GPL-2.0
3 * Virtual Memory Map support
5 * (C) 2007 sgi. Christoph Lameter.
7 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
8 * virt_to_page, page_address() to be implemented as a base offset
9 * calculation without memory access.
11 * However, virtual mappings need a page table and TLBs. Many Linux
12 * architectures already map their physical space using 1-1 mappings
13 * via TLBs. For those arches the virtual memory map is essentially
14 * for free if we use the same page size as the 1-1 mappings. In that
15 * case the overhead consists of a few additional pages that are
16 * allocated to create a view of memory for vmemmap.
18 * The architecture is expected to provide a vmemmap_populate() function
19 * to instantiate the mapping.
22 #include <linux/mmzone.h>
23 #include <linux/memblock.h>
24 #include <linux/memremap.h>
25 #include <linux/highmem.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/vmalloc.h>
29 #include <linux/sched.h>
32 #include <asm/pgalloc.h>
35 * Allocate a block of memory to be used to back the virtual memory map
36 * or to back the page tables that are used to create the mapping.
37 * Uses the main allocators if they are available, else bootmem.
40 static void * __ref __earlyonly_bootmem_alloc(int node,
45 return memblock_alloc_try_nid_raw(size, align, goal,
46 MEMBLOCK_ALLOC_ACCESSIBLE, node);
49 void * __meminit vmemmap_alloc_block(unsigned long size, int node)
51 /* If the main allocator is up use that, fallback to bootmem. */
52 if (slab_is_available()) {
53 gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
54 int order = get_order(size);
58 page = alloc_pages_node(node, gfp_mask, order);
60 return page_address(page);
63 warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL,
64 "vmemmap alloc failure: order:%u", order);
69 return __earlyonly_bootmem_alloc(node, size, size,
70 __pa(MAX_DMA_ADDRESS));
73 static void * __meminit altmap_alloc_block_buf(unsigned long size,
74 struct vmem_altmap *altmap);
76 /* need to make sure size is all the same during early stage */
77 void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node,
78 struct vmem_altmap *altmap)
83 return altmap_alloc_block_buf(size, altmap);
85 ptr = sparse_buffer_alloc(size);
87 ptr = vmemmap_alloc_block(size, node);
91 static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap)
93 return altmap->base_pfn + altmap->reserve + altmap->alloc
97 static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap)
99 unsigned long allocated = altmap->alloc + altmap->align;
101 if (altmap->free > allocated)
102 return altmap->free - allocated;
106 static void * __meminit altmap_alloc_block_buf(unsigned long size,
107 struct vmem_altmap *altmap)
109 unsigned long pfn, nr_pfns, nr_align;
111 if (size & ~PAGE_MASK) {
112 pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
117 pfn = vmem_altmap_next_pfn(altmap);
118 nr_pfns = size >> PAGE_SHIFT;
119 nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
120 nr_align = ALIGN(pfn, nr_align) - pfn;
121 if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
124 altmap->alloc += nr_pfns;
125 altmap->align += nr_align;
128 pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
129 __func__, pfn, altmap->alloc, altmap->align, nr_pfns);
130 return __va(__pfn_to_phys(pfn));
133 void __meminit vmemmap_verify(pte_t *pte, int node,
134 unsigned long start, unsigned long end)
136 unsigned long pfn = pte_pfn(*pte);
137 int actual_node = early_pfn_to_nid(pfn);
139 if (node_distance(actual_node, node) > LOCAL_DISTANCE)
140 pr_warn_once("[%lx-%lx] potential offnode page_structs\n",
144 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
145 struct vmem_altmap *altmap,
148 pte_t *pte = pte_offset_kernel(pmd, addr);
149 if (pte_none(*pte)) {
154 p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap);
159 * When a PTE/PMD entry is freed from the init_mm
160 * there's a free_pages() call to this page allocated
161 * above. Thus this get_page() is paired with the
162 * put_page_testzero() on the freeing path.
163 * This can only called by certain ZONE_DEVICE path,
164 * and through vmemmap_populate_compound_pages() when
168 p = page_to_virt(reuse);
170 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
171 set_pte_at(&init_mm, addr, pte, entry);
176 static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node)
178 void *p = vmemmap_alloc_block(size, node);
187 pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
189 pmd_t *pmd = pmd_offset(pud, addr);
190 if (pmd_none(*pmd)) {
191 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
194 pmd_populate_kernel(&init_mm, pmd, p);
199 pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
201 pud_t *pud = pud_offset(p4d, addr);
202 if (pud_none(*pud)) {
203 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
206 pud_populate(&init_mm, pud, p);
211 p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
213 p4d_t *p4d = p4d_offset(pgd, addr);
214 if (p4d_none(*p4d)) {
215 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
218 p4d_populate(&init_mm, p4d, p);
223 pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
225 pgd_t *pgd = pgd_offset_k(addr);
226 if (pgd_none(*pgd)) {
227 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
230 pgd_populate(&init_mm, pgd, p);
235 static pte_t * __meminit vmemmap_populate_address(unsigned long addr, int node,
236 struct vmem_altmap *altmap,
245 pgd = vmemmap_pgd_populate(addr, node);
248 p4d = vmemmap_p4d_populate(pgd, addr, node);
251 pud = vmemmap_pud_populate(p4d, addr, node);
254 pmd = vmemmap_pmd_populate(pud, addr, node);
257 pte = vmemmap_pte_populate(pmd, addr, node, altmap, reuse);
260 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
265 static int __meminit vmemmap_populate_range(unsigned long start,
266 unsigned long end, int node,
267 struct vmem_altmap *altmap,
270 unsigned long addr = start;
273 for (; addr < end; addr += PAGE_SIZE) {
274 pte = vmemmap_populate_address(addr, node, altmap, reuse);
282 int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end,
283 int node, struct vmem_altmap *altmap)
285 return vmemmap_populate_range(start, end, node, altmap, NULL);
289 * For compound pages bigger than section size (e.g. x86 1G compound
290 * pages with 2M subsection size) fill the rest of sections as tail
293 * Note that memremap_pages() resets @nr_range value and will increment
294 * it after each range successful onlining. Thus the value or @nr_range
295 * at section memmap populate corresponds to the in-progress range
296 * being onlined here.
298 static bool __meminit reuse_compound_section(unsigned long start_pfn,
299 struct dev_pagemap *pgmap)
301 unsigned long nr_pages = pgmap_vmemmap_nr(pgmap);
302 unsigned long offset = start_pfn -
303 PHYS_PFN(pgmap->ranges[pgmap->nr_range].start);
305 return !IS_ALIGNED(offset, nr_pages) && nr_pages > PAGES_PER_SUBSECTION;
308 static pte_t * __meminit compound_section_tail_page(unsigned long addr)
315 * Assuming sections are populated sequentially, the previous section's
316 * page data can be reused.
318 pte = pte_offset_kernel(pmd_off_k(addr), addr);
325 static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn,
327 unsigned long end, int node,
328 struct dev_pagemap *pgmap)
330 unsigned long size, addr;
334 if (reuse_compound_section(start_pfn, pgmap)) {
335 pte = compound_section_tail_page(start);
340 * Reuse the page that was populated in the prior iteration
341 * with just tail struct pages.
343 return vmemmap_populate_range(start, end, node, NULL,
347 size = min(end - start, pgmap_vmemmap_nr(pgmap) * sizeof(struct page));
348 for (addr = start; addr < end; addr += size) {
349 unsigned long next, last = addr + size;
351 /* Populate the head page vmemmap page */
352 pte = vmemmap_populate_address(addr, node, NULL, NULL);
356 /* Populate the tail pages vmemmap page */
357 next = addr + PAGE_SIZE;
358 pte = vmemmap_populate_address(next, node, NULL, NULL);
363 * Reuse the previous page for the rest of tail pages
364 * See layout diagram in Documentation/mm/vmemmap_dedup.rst
367 rc = vmemmap_populate_range(next, last, node, NULL,
376 struct page * __meminit __populate_section_memmap(unsigned long pfn,
377 unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
378 struct dev_pagemap *pgmap)
380 unsigned long start = (unsigned long) pfn_to_page(pfn);
381 unsigned long end = start + nr_pages * sizeof(struct page);
384 if (WARN_ON_ONCE(!IS_ALIGNED(pfn, PAGES_PER_SUBSECTION) ||
385 !IS_ALIGNED(nr_pages, PAGES_PER_SUBSECTION)))
388 if (is_power_of_2(sizeof(struct page)) &&
389 pgmap && pgmap_vmemmap_nr(pgmap) > 1 && !altmap)
390 r = vmemmap_populate_compound_pages(pfn, start, end, nid, pgmap);
392 r = vmemmap_populate(start, end, nid, altmap);
397 return pfn_to_page(pfn);