1 // SPDX-License-Identifier: GPL-2.0
3 * Virtual Memory Map support
5 * (C) 2007 sgi. Christoph Lameter.
7 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
8 * virt_to_page, page_address() to be implemented as a base offset
9 * calculation without memory access.
11 * However, virtual mappings need a page table and TLBs. Many Linux
12 * architectures already map their physical space using 1-1 mappings
13 * via TLBs. For those arches the virtual memory map is essentially
14 * for free if we use the same page size as the 1-1 mappings. In that
15 * case the overhead consists of a few additional pages that are
16 * allocated to create a view of memory for vmemmap.
18 * The architecture is expected to provide a vmemmap_populate() function
19 * to instantiate the mapping.
22 #include <linux/mmzone.h>
23 #include <linux/memblock.h>
24 #include <linux/memremap.h>
25 #include <linux/highmem.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/vmalloc.h>
29 #include <linux/sched.h>
30 #include <linux/pgtable.h>
31 #include <linux/bootmem_info.h>
34 #include <asm/pgalloc.h>
35 #include <asm/tlbflush.h>
37 #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
39 * struct vmemmap_remap_walk - walk vmemmap page table
41 * @remap_pte: called for each lowest-level entry (PTE).
42 * @nr_walked: the number of walked pte.
43 * @reuse_page: the page which is reused for the tail vmemmap pages.
44 * @reuse_addr: the virtual address of the @reuse_page page.
45 * @vmemmap_pages: the list head of the vmemmap pages that can be freed
48 struct vmemmap_remap_walk {
49 void (*remap_pte)(pte_t *pte, unsigned long addr,
50 struct vmemmap_remap_walk *walk);
51 unsigned long nr_walked;
52 struct page *reuse_page;
53 unsigned long reuse_addr;
54 struct list_head *vmemmap_pages;
57 static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
61 unsigned long addr = start;
62 struct page *page = pmd_page(*pmd);
63 pte_t *pgtable = pte_alloc_one_kernel(&init_mm);
68 pmd_populate_kernel(&init_mm, &__pmd, pgtable);
70 for (i = 0; i < PMD_SIZE / PAGE_SIZE; i++, addr += PAGE_SIZE) {
72 pgprot_t pgprot = PAGE_KERNEL;
74 entry = mk_pte(page + i, pgprot);
75 pte = pte_offset_kernel(&__pmd, addr);
76 set_pte_at(&init_mm, addr, pte, entry);
79 spin_lock(&init_mm.page_table_lock);
80 if (likely(pmd_leaf(*pmd))) {
81 /* Make pte visible before pmd. See comment in pmd_install(). */
83 pmd_populate_kernel(&init_mm, pmd, pgtable);
84 flush_tlb_kernel_range(start, start + PMD_SIZE);
86 pte_free_kernel(&init_mm, pgtable);
88 spin_unlock(&init_mm.page_table_lock);
93 static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
97 spin_lock(&init_mm.page_table_lock);
98 leaf = pmd_leaf(*pmd);
99 spin_unlock(&init_mm.page_table_lock);
104 return __split_vmemmap_huge_pmd(pmd, start);
107 static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr,
109 struct vmemmap_remap_walk *walk)
111 pte_t *pte = pte_offset_kernel(pmd, addr);
114 * The reuse_page is found 'first' in table walk before we start
115 * remapping (which is calling @walk->remap_pte).
117 if (!walk->reuse_page) {
118 walk->reuse_page = pte_page(*pte);
120 * Because the reuse address is part of the range that we are
121 * walking, skip the reuse address range.
128 for (; addr != end; addr += PAGE_SIZE, pte++) {
129 walk->remap_pte(pte, addr, walk);
134 static int vmemmap_pmd_range(pud_t *pud, unsigned long addr,
136 struct vmemmap_remap_walk *walk)
141 pmd = pmd_offset(pud, addr);
145 ret = split_vmemmap_huge_pmd(pmd, addr & PMD_MASK);
149 next = pmd_addr_end(addr, end);
150 vmemmap_pte_range(pmd, addr, next, walk);
151 } while (pmd++, addr = next, addr != end);
156 static int vmemmap_pud_range(p4d_t *p4d, unsigned long addr,
158 struct vmemmap_remap_walk *walk)
163 pud = pud_offset(p4d, addr);
167 next = pud_addr_end(addr, end);
168 ret = vmemmap_pmd_range(pud, addr, next, walk);
171 } while (pud++, addr = next, addr != end);
176 static int vmemmap_p4d_range(pgd_t *pgd, unsigned long addr,
178 struct vmemmap_remap_walk *walk)
183 p4d = p4d_offset(pgd, addr);
187 next = p4d_addr_end(addr, end);
188 ret = vmemmap_pud_range(p4d, addr, next, walk);
191 } while (p4d++, addr = next, addr != end);
196 static int vmemmap_remap_range(unsigned long start, unsigned long end,
197 struct vmemmap_remap_walk *walk)
199 unsigned long addr = start;
203 VM_BUG_ON(!IS_ALIGNED(start, PAGE_SIZE));
204 VM_BUG_ON(!IS_ALIGNED(end, PAGE_SIZE));
206 pgd = pgd_offset_k(addr);
210 next = pgd_addr_end(addr, end);
211 ret = vmemmap_p4d_range(pgd, addr, next, walk);
214 } while (pgd++, addr = next, addr != end);
217 * We only change the mapping of the vmemmap virtual address range
218 * [@start + PAGE_SIZE, end), so we only need to flush the TLB which
219 * belongs to the range.
221 flush_tlb_kernel_range(start + PAGE_SIZE, end);
227 * Free a vmemmap page. A vmemmap page can be allocated from the memblock
228 * allocator or buddy allocator. If the PG_reserved flag is set, it means
229 * that it allocated from the memblock allocator, just free it via the
230 * free_bootmem_page(). Otherwise, use __free_page().
232 static inline void free_vmemmap_page(struct page *page)
234 if (PageReserved(page))
235 free_bootmem_page(page);
240 /* Free a list of the vmemmap pages */
241 static void free_vmemmap_page_list(struct list_head *list)
243 struct page *page, *next;
245 list_for_each_entry_safe(page, next, list, lru) {
246 list_del(&page->lru);
247 free_vmemmap_page(page);
251 static void vmemmap_remap_pte(pte_t *pte, unsigned long addr,
252 struct vmemmap_remap_walk *walk)
255 * Remap the tail pages as read-only to catch illegal write operation
258 pgprot_t pgprot = PAGE_KERNEL_RO;
259 pte_t entry = mk_pte(walk->reuse_page, pgprot);
260 struct page *page = pte_page(*pte);
262 list_add_tail(&page->lru, walk->vmemmap_pages);
263 set_pte_at(&init_mm, addr, pte, entry);
267 * How many struct page structs need to be reset. When we reuse the head
268 * struct page, the special metadata (e.g. page->flags or page->mapping)
269 * cannot copy to the tail struct page structs. The invalid value will be
270 * checked in the free_tail_pages_check(). In order to avoid the message
271 * of "corrupted mapping in tail page". We need to reset at least 3 (one
272 * head struct page struct and two tail struct page structs) struct page
275 #define NR_RESET_STRUCT_PAGE 3
277 static inline void reset_struct_pages(struct page *start)
280 struct page *from = start + NR_RESET_STRUCT_PAGE;
282 for (i = 0; i < NR_RESET_STRUCT_PAGE; i++)
283 memcpy(start + i, from, sizeof(*from));
286 static void vmemmap_restore_pte(pte_t *pte, unsigned long addr,
287 struct vmemmap_remap_walk *walk)
289 pgprot_t pgprot = PAGE_KERNEL;
293 BUG_ON(pte_page(*pte) != walk->reuse_page);
295 page = list_first_entry(walk->vmemmap_pages, struct page, lru);
296 list_del(&page->lru);
297 to = page_to_virt(page);
298 copy_page(to, (void *)walk->reuse_addr);
299 reset_struct_pages(to);
301 set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot));
305 * vmemmap_remap_free - remap the vmemmap virtual address range [@start, @end)
306 * to the page which @reuse is mapped to, then free vmemmap
307 * which the range are mapped to.
308 * @start: start address of the vmemmap virtual address range that we want
310 * @end: end address of the vmemmap virtual address range that we want to
312 * @reuse: reuse address.
314 * Return: %0 on success, negative error code otherwise.
316 int vmemmap_remap_free(unsigned long start, unsigned long end,
320 LIST_HEAD(vmemmap_pages);
321 struct vmemmap_remap_walk walk = {
322 .remap_pte = vmemmap_remap_pte,
324 .vmemmap_pages = &vmemmap_pages,
328 * In order to make remapping routine most efficient for the huge pages,
329 * the routine of vmemmap page table walking has the following rules
330 * (see more details from the vmemmap_pte_range()):
332 * - The range [@start, @end) and the range [@reuse, @reuse + PAGE_SIZE)
333 * should be continuous.
334 * - The @reuse address is part of the range [@reuse, @end) that we are
335 * walking which is passed to vmemmap_remap_range().
336 * - The @reuse address is the first in the complete range.
338 * So we need to make sure that @start and @reuse meet the above rules.
340 BUG_ON(start - reuse != PAGE_SIZE);
342 mmap_read_lock(&init_mm);
343 ret = vmemmap_remap_range(reuse, end, &walk);
344 if (ret && walk.nr_walked) {
345 end = reuse + walk.nr_walked * PAGE_SIZE;
347 * vmemmap_pages contains pages from the previous
348 * vmemmap_remap_range call which failed. These
349 * are pages which were removed from the vmemmap.
350 * They will be restored in the following call.
352 walk = (struct vmemmap_remap_walk) {
353 .remap_pte = vmemmap_restore_pte,
355 .vmemmap_pages = &vmemmap_pages,
358 vmemmap_remap_range(reuse, end, &walk);
360 mmap_read_unlock(&init_mm);
362 free_vmemmap_page_list(&vmemmap_pages);
367 static int alloc_vmemmap_page_list(unsigned long start, unsigned long end,
368 gfp_t gfp_mask, struct list_head *list)
370 unsigned long nr_pages = (end - start) >> PAGE_SHIFT;
371 int nid = page_to_nid((struct page *)start);
372 struct page *page, *next;
375 page = alloc_pages_node(nid, gfp_mask, 0);
378 list_add_tail(&page->lru, list);
383 list_for_each_entry_safe(page, next, list, lru)
384 __free_pages(page, 0);
389 * vmemmap_remap_alloc - remap the vmemmap virtual address range [@start, end)
390 * to the page which is from the @vmemmap_pages
392 * @start: start address of the vmemmap virtual address range that we want
394 * @end: end address of the vmemmap virtual address range that we want to
396 * @reuse: reuse address.
397 * @gfp_mask: GFP flag for allocating vmemmap pages.
399 * Return: %0 on success, negative error code otherwise.
401 int vmemmap_remap_alloc(unsigned long start, unsigned long end,
402 unsigned long reuse, gfp_t gfp_mask)
404 LIST_HEAD(vmemmap_pages);
405 struct vmemmap_remap_walk walk = {
406 .remap_pte = vmemmap_restore_pte,
408 .vmemmap_pages = &vmemmap_pages,
411 /* See the comment in the vmemmap_remap_free(). */
412 BUG_ON(start - reuse != PAGE_SIZE);
414 if (alloc_vmemmap_page_list(start, end, gfp_mask, &vmemmap_pages))
417 mmap_read_lock(&init_mm);
418 vmemmap_remap_range(reuse, end, &walk);
419 mmap_read_unlock(&init_mm);
423 #endif /* CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP */
426 * Allocate a block of memory to be used to back the virtual memory map
427 * or to back the page tables that are used to create the mapping.
428 * Uses the main allocators if they are available, else bootmem.
431 static void * __ref __earlyonly_bootmem_alloc(int node,
436 return memblock_alloc_try_nid_raw(size, align, goal,
437 MEMBLOCK_ALLOC_ACCESSIBLE, node);
440 void * __meminit vmemmap_alloc_block(unsigned long size, int node)
442 /* If the main allocator is up use that, fallback to bootmem. */
443 if (slab_is_available()) {
444 gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
445 int order = get_order(size);
449 page = alloc_pages_node(node, gfp_mask, order);
451 return page_address(page);
454 warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL,
455 "vmemmap alloc failure: order:%u", order);
460 return __earlyonly_bootmem_alloc(node, size, size,
461 __pa(MAX_DMA_ADDRESS));
464 static void * __meminit altmap_alloc_block_buf(unsigned long size,
465 struct vmem_altmap *altmap);
467 /* need to make sure size is all the same during early stage */
468 void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node,
469 struct vmem_altmap *altmap)
474 return altmap_alloc_block_buf(size, altmap);
476 ptr = sparse_buffer_alloc(size);
478 ptr = vmemmap_alloc_block(size, node);
482 static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap)
484 return altmap->base_pfn + altmap->reserve + altmap->alloc
488 static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap)
490 unsigned long allocated = altmap->alloc + altmap->align;
492 if (altmap->free > allocated)
493 return altmap->free - allocated;
497 static void * __meminit altmap_alloc_block_buf(unsigned long size,
498 struct vmem_altmap *altmap)
500 unsigned long pfn, nr_pfns, nr_align;
502 if (size & ~PAGE_MASK) {
503 pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
508 pfn = vmem_altmap_next_pfn(altmap);
509 nr_pfns = size >> PAGE_SHIFT;
510 nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
511 nr_align = ALIGN(pfn, nr_align) - pfn;
512 if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
515 altmap->alloc += nr_pfns;
516 altmap->align += nr_align;
519 pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
520 __func__, pfn, altmap->alloc, altmap->align, nr_pfns);
521 return __va(__pfn_to_phys(pfn));
524 void __meminit vmemmap_verify(pte_t *pte, int node,
525 unsigned long start, unsigned long end)
527 unsigned long pfn = pte_pfn(*pte);
528 int actual_node = early_pfn_to_nid(pfn);
530 if (node_distance(actual_node, node) > LOCAL_DISTANCE)
531 pr_warn("[%lx-%lx] potential offnode page_structs\n",
535 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
536 struct vmem_altmap *altmap,
539 pte_t *pte = pte_offset_kernel(pmd, addr);
540 if (pte_none(*pte)) {
545 p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap);
550 * When a PTE/PMD entry is freed from the init_mm
551 * there's a a free_pages() call to this page allocated
552 * above. Thus this get_page() is paired with the
553 * put_page_testzero() on the freeing path.
554 * This can only called by certain ZONE_DEVICE path,
555 * and through vmemmap_populate_compound_pages() when
559 p = page_to_virt(reuse);
561 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
562 set_pte_at(&init_mm, addr, pte, entry);
567 static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node)
569 void *p = vmemmap_alloc_block(size, node);
578 pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
580 pmd_t *pmd = pmd_offset(pud, addr);
581 if (pmd_none(*pmd)) {
582 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
585 pmd_populate_kernel(&init_mm, pmd, p);
590 pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
592 pud_t *pud = pud_offset(p4d, addr);
593 if (pud_none(*pud)) {
594 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
597 pud_populate(&init_mm, pud, p);
602 p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
604 p4d_t *p4d = p4d_offset(pgd, addr);
605 if (p4d_none(*p4d)) {
606 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
609 p4d_populate(&init_mm, p4d, p);
614 pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
616 pgd_t *pgd = pgd_offset_k(addr);
617 if (pgd_none(*pgd)) {
618 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
621 pgd_populate(&init_mm, pgd, p);
626 static pte_t * __meminit vmemmap_populate_address(unsigned long addr, int node,
627 struct vmem_altmap *altmap,
636 pgd = vmemmap_pgd_populate(addr, node);
639 p4d = vmemmap_p4d_populate(pgd, addr, node);
642 pud = vmemmap_pud_populate(p4d, addr, node);
645 pmd = vmemmap_pmd_populate(pud, addr, node);
648 pte = vmemmap_pte_populate(pmd, addr, node, altmap, reuse);
651 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
656 static int __meminit vmemmap_populate_range(unsigned long start,
657 unsigned long end, int node,
658 struct vmem_altmap *altmap,
661 unsigned long addr = start;
664 for (; addr < end; addr += PAGE_SIZE) {
665 pte = vmemmap_populate_address(addr, node, altmap, reuse);
673 int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end,
674 int node, struct vmem_altmap *altmap)
676 return vmemmap_populate_range(start, end, node, altmap, NULL);
680 * For compound pages bigger than section size (e.g. x86 1G compound
681 * pages with 2M subsection size) fill the rest of sections as tail
684 * Note that memremap_pages() resets @nr_range value and will increment
685 * it after each range successful onlining. Thus the value or @nr_range
686 * at section memmap populate corresponds to the in-progress range
687 * being onlined here.
689 static bool __meminit reuse_compound_section(unsigned long start_pfn,
690 struct dev_pagemap *pgmap)
692 unsigned long nr_pages = pgmap_vmemmap_nr(pgmap);
693 unsigned long offset = start_pfn -
694 PHYS_PFN(pgmap->ranges[pgmap->nr_range].start);
696 return !IS_ALIGNED(offset, nr_pages) && nr_pages > PAGES_PER_SUBSECTION;
699 static pte_t * __meminit compound_section_tail_page(unsigned long addr)
706 * Assuming sections are populated sequentially, the previous section's
707 * page data can be reused.
709 pte = pte_offset_kernel(pmd_off_k(addr), addr);
716 static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn,
718 unsigned long end, int node,
719 struct dev_pagemap *pgmap)
721 unsigned long size, addr;
725 if (reuse_compound_section(start_pfn, pgmap)) {
726 pte = compound_section_tail_page(start);
731 * Reuse the page that was populated in the prior iteration
732 * with just tail struct pages.
734 return vmemmap_populate_range(start, end, node, NULL,
738 size = min(end - start, pgmap_vmemmap_nr(pgmap) * sizeof(struct page));
739 for (addr = start; addr < end; addr += size) {
740 unsigned long next = addr, last = addr + size;
742 /* Populate the head page vmemmap page */
743 pte = vmemmap_populate_address(addr, node, NULL, NULL);
747 /* Populate the tail pages vmemmap page */
748 next = addr + PAGE_SIZE;
749 pte = vmemmap_populate_address(next, node, NULL, NULL);
754 * Reuse the previous page for the rest of tail pages
755 * See layout diagram in Documentation/vm/vmemmap_dedup.rst
758 rc = vmemmap_populate_range(next, last, node, NULL,
767 struct page * __meminit __populate_section_memmap(unsigned long pfn,
768 unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
769 struct dev_pagemap *pgmap)
771 unsigned long start = (unsigned long) pfn_to_page(pfn);
772 unsigned long end = start + nr_pages * sizeof(struct page);
775 if (WARN_ON_ONCE(!IS_ALIGNED(pfn, PAGES_PER_SUBSECTION) ||
776 !IS_ALIGNED(nr_pages, PAGES_PER_SUBSECTION)))
779 if (is_power_of_2(sizeof(struct page)) &&
780 pgmap && pgmap_vmemmap_nr(pgmap) > 1 && !altmap)
781 r = vmemmap_populate_compound_pages(pfn, start, end, nid, pgmap);
783 r = vmemmap_populate(start, end, nid, altmap);
788 return pfn_to_page(pfn);