1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2006
6 #include <linux/memory_hotplug.h>
7 #include <linux/memblock.h>
10 #include <linux/init.h>
11 #include <linux/list.h>
12 #include <linux/hugetlb.h>
13 #include <linux/slab.h>
14 #include <linux/sort.h>
15 #include <asm/cacheflush.h>
16 #include <asm/nospec-branch.h>
17 #include <asm/pgalloc.h>
18 #include <asm/setup.h>
19 #include <asm/tlbflush.h>
20 #include <asm/sections.h>
21 #include <asm/set_memory.h>
23 static DEFINE_MUTEX(vmem_mutex);
25 static void __ref *vmem_alloc_pages(unsigned int order)
27 unsigned long size = PAGE_SIZE << order;
29 if (slab_is_available())
30 return (void *)__get_free_pages(GFP_KERNEL, order);
31 return memblock_alloc(size, size);
34 static void vmem_free_pages(unsigned long addr, int order)
36 /* We don't expect boot memory to be removed ever. */
37 if (!slab_is_available() ||
38 WARN_ON_ONCE(PageReserved(virt_to_page((void *)addr))))
40 free_pages(addr, order);
43 void *vmem_crst_alloc(unsigned long val)
47 table = vmem_alloc_pages(CRST_ALLOC_ORDER);
49 crst_table_init(table, val);
53 pte_t __ref *vmem_pte_alloc(void)
55 unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
58 if (slab_is_available())
59 pte = (pte_t *) page_table_alloc(&init_mm);
61 pte = (pte_t *) memblock_alloc(size, size);
64 memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
68 static void vmem_pte_free(unsigned long *table)
70 /* We don't expect boot memory to be removed ever. */
71 if (!slab_is_available() ||
72 WARN_ON_ONCE(PageReserved(virt_to_page(table))))
74 page_table_free(&init_mm, table);
77 #define PAGE_UNUSED 0xFD
80 * The unused vmemmap range, which was not yet memset(PAGE_UNUSED) ranges
81 * from unused_sub_pmd_start to next PMD_SIZE boundary.
83 static unsigned long unused_sub_pmd_start;
85 static void vmemmap_flush_unused_sub_pmd(void)
87 if (!unused_sub_pmd_start)
89 memset((void *)unused_sub_pmd_start, PAGE_UNUSED,
90 ALIGN(unused_sub_pmd_start, PMD_SIZE) - unused_sub_pmd_start);
91 unused_sub_pmd_start = 0;
94 static void vmemmap_mark_sub_pmd_used(unsigned long start, unsigned long end)
97 * As we expect to add in the same granularity as we remove, it's
98 * sufficient to mark only some piece used to block the memmap page from
99 * getting removed (just in case the memmap never gets initialized,
100 * e.g., because the memory block never gets onlined).
102 memset((void *)start, 0, sizeof(struct page));
105 static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
108 * We only optimize if the new used range directly follows the
109 * previously unused range (esp., when populating consecutive sections).
111 if (unused_sub_pmd_start == start) {
112 unused_sub_pmd_start = end;
113 if (likely(IS_ALIGNED(unused_sub_pmd_start, PMD_SIZE)))
114 unused_sub_pmd_start = 0;
117 vmemmap_flush_unused_sub_pmd();
118 vmemmap_mark_sub_pmd_used(start, end);
121 static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
123 unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
125 vmemmap_flush_unused_sub_pmd();
127 /* Could be our memmap page is filled with PAGE_UNUSED already ... */
128 vmemmap_mark_sub_pmd_used(start, end);
130 /* Mark the unused parts of the new memmap page PAGE_UNUSED. */
131 if (!IS_ALIGNED(start, PMD_SIZE))
132 memset((void *)page, PAGE_UNUSED, start - page);
134 * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
135 * consecutive sections. Remember for the last added PMD the last
136 * unused range in the populated PMD.
138 if (!IS_ALIGNED(end, PMD_SIZE))
139 unused_sub_pmd_start = end;
142 /* Returns true if the PMD is completely unused and can be freed. */
143 static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end)
145 unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
147 vmemmap_flush_unused_sub_pmd();
148 memset((void *)start, PAGE_UNUSED, end - start);
149 return !memchr_inv((void *)page, PAGE_UNUSED, PMD_SIZE);
152 /* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
153 static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
154 unsigned long end, bool add, bool direct)
156 unsigned long prot, pages = 0;
160 prot = pgprot_val(PAGE_KERNEL);
162 prot &= ~_PAGE_NOEXEC;
164 pte = pte_offset_kernel(pmd, addr);
165 for (; addr < end; addr += PAGE_SIZE, pte++) {
170 vmem_free_pages((unsigned long) pfn_to_virt(pte_pfn(*pte)), 0);
171 pte_clear(&init_mm, addr, pte);
172 } else if (pte_none(*pte)) {
174 void *new_page = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
178 set_pte(pte, __pte(__pa(new_page) | prot));
180 set_pte(pte, __pte(__pa(addr) | prot));
190 update_page_count(PG_DIRECT_MAP_4K, add ? pages : -pages);
194 static void try_free_pte_table(pmd_t *pmd, unsigned long start)
199 /* We can safely assume this is fully in 1:1 mapping & vmemmap area */
200 pte = pte_offset_kernel(pmd, start);
201 for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
205 vmem_pte_free((unsigned long *) pmd_deref(*pmd));
209 /* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
210 static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
211 unsigned long end, bool add, bool direct)
213 unsigned long next, prot, pages = 0;
218 prot = pgprot_val(SEGMENT_KERNEL);
220 prot &= ~_SEGMENT_ENTRY_NOEXEC;
222 pmd = pmd_offset(pud, addr);
223 for (; addr < end; addr = next, pmd++) {
224 next = pmd_addr_end(addr, end);
228 if (pmd_large(*pmd)) {
229 if (IS_ALIGNED(addr, PMD_SIZE) &&
230 IS_ALIGNED(next, PMD_SIZE)) {
232 vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
235 } else if (!direct && vmemmap_unuse_sub_pmd(addr, next)) {
236 vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
241 } else if (pmd_none(*pmd)) {
242 if (IS_ALIGNED(addr, PMD_SIZE) &&
243 IS_ALIGNED(next, PMD_SIZE) &&
244 MACHINE_HAS_EDAT1 && direct &&
245 !debug_pagealloc_enabled()) {
246 set_pmd(pmd, __pmd(__pa(addr) | prot));
249 } else if (!direct && MACHINE_HAS_EDAT1) {
253 * Use 1MB frames for vmemmap if available. We
254 * always use large frames even if they are only
255 * partially used. Otherwise we would have also
256 * page tables since vmemmap_populate gets
257 * called for each section separately.
259 new_page = vmemmap_alloc_block(PMD_SIZE, NUMA_NO_NODE);
261 set_pmd(pmd, __pmd(__pa(new_page) | prot));
262 if (!IS_ALIGNED(addr, PMD_SIZE) ||
263 !IS_ALIGNED(next, PMD_SIZE)) {
264 vmemmap_use_new_sub_pmd(addr, next);
269 pte = vmem_pte_alloc();
272 pmd_populate(&init_mm, pmd, pte);
273 } else if (pmd_large(*pmd)) {
275 vmemmap_use_sub_pmd(addr, next);
278 ret = modify_pte_table(pmd, addr, next, add, direct);
282 try_free_pte_table(pmd, addr & PMD_MASK);
287 update_page_count(PG_DIRECT_MAP_1M, add ? pages : -pages);
291 static void try_free_pmd_table(pud_t *pud, unsigned long start)
293 const unsigned long end = start + PUD_SIZE;
297 /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
298 if (end > VMALLOC_START)
301 pmd = pmd_offset(pud, start);
302 for (i = 0; i < PTRS_PER_PMD; i++, pmd++)
305 vmem_free_pages(pud_deref(*pud), CRST_ALLOC_ORDER);
309 static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
310 bool add, bool direct)
312 unsigned long next, prot, pages = 0;
317 prot = pgprot_val(REGION3_KERNEL);
319 prot &= ~_REGION_ENTRY_NOEXEC;
320 pud = pud_offset(p4d, addr);
321 for (; addr < end; addr = next, pud++) {
322 next = pud_addr_end(addr, end);
326 if (pud_large(*pud)) {
327 if (IS_ALIGNED(addr, PUD_SIZE) &&
328 IS_ALIGNED(next, PUD_SIZE)) {
334 } else if (pud_none(*pud)) {
335 if (IS_ALIGNED(addr, PUD_SIZE) &&
336 IS_ALIGNED(next, PUD_SIZE) &&
337 MACHINE_HAS_EDAT2 && direct &&
338 !debug_pagealloc_enabled()) {
339 set_pud(pud, __pud(__pa(addr) | prot));
343 pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
346 pud_populate(&init_mm, pud, pmd);
347 } else if (pud_large(*pud)) {
350 ret = modify_pmd_table(pud, addr, next, add, direct);
354 try_free_pmd_table(pud, addr & PUD_MASK);
359 update_page_count(PG_DIRECT_MAP_2G, add ? pages : -pages);
363 static void try_free_pud_table(p4d_t *p4d, unsigned long start)
365 const unsigned long end = start + P4D_SIZE;
369 /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
370 if (end > VMALLOC_START)
373 pud = pud_offset(p4d, start);
374 for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
378 vmem_free_pages(p4d_deref(*p4d), CRST_ALLOC_ORDER);
382 static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
383 bool add, bool direct)
390 p4d = p4d_offset(pgd, addr);
391 for (; addr < end; addr = next, p4d++) {
392 next = p4d_addr_end(addr, end);
396 } else if (p4d_none(*p4d)) {
397 pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
400 p4d_populate(&init_mm, p4d, pud);
402 ret = modify_pud_table(p4d, addr, next, add, direct);
406 try_free_pud_table(p4d, addr & P4D_MASK);
413 static void try_free_p4d_table(pgd_t *pgd, unsigned long start)
415 const unsigned long end = start + PGDIR_SIZE;
419 /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
420 if (end > VMALLOC_START)
423 p4d = p4d_offset(pgd, start);
424 for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
428 vmem_free_pages(pgd_deref(*pgd), CRST_ALLOC_ORDER);
432 static int modify_pagetable(unsigned long start, unsigned long end, bool add,
435 unsigned long addr, next;
440 if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end)))
442 for (addr = start; addr < end; addr = next) {
443 next = pgd_addr_end(addr, end);
444 pgd = pgd_offset_k(addr);
449 } else if (pgd_none(*pgd)) {
450 p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
453 pgd_populate(&init_mm, pgd, p4d);
455 ret = modify_p4d_table(pgd, addr, next, add, direct);
459 try_free_p4d_table(pgd, addr & PGDIR_MASK);
464 flush_tlb_kernel_range(start, end);
468 static int add_pagetable(unsigned long start, unsigned long end, bool direct)
470 return modify_pagetable(start, end, true, direct);
473 static int remove_pagetable(unsigned long start, unsigned long end, bool direct)
475 return modify_pagetable(start, end, false, direct);
479 * Add a physical memory range to the 1:1 mapping.
481 static int vmem_add_range(unsigned long start, unsigned long size)
483 start = (unsigned long)__va(start);
484 return add_pagetable(start, start + size, true);
488 * Remove a physical memory range from the 1:1 mapping.
490 static void vmem_remove_range(unsigned long start, unsigned long size)
492 start = (unsigned long)__va(start);
493 remove_pagetable(start, start + size, true);
497 * Add a backed mem_map array to the virtual mem_map array.
499 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
500 struct vmem_altmap *altmap)
504 mutex_lock(&vmem_mutex);
505 /* We don't care about the node, just use NUMA_NO_NODE on allocations */
506 ret = add_pagetable(start, end, false);
508 remove_pagetable(start, end, false);
509 mutex_unlock(&vmem_mutex);
513 void vmemmap_free(unsigned long start, unsigned long end,
514 struct vmem_altmap *altmap)
516 mutex_lock(&vmem_mutex);
517 remove_pagetable(start, end, false);
518 mutex_unlock(&vmem_mutex);
521 void vmem_remove_mapping(unsigned long start, unsigned long size)
523 mutex_lock(&vmem_mutex);
524 vmem_remove_range(start, size);
525 mutex_unlock(&vmem_mutex);
528 struct range arch_get_mappable_range(void)
530 struct range mhp_range;
533 mhp_range.end = max_mappable - 1;
537 int vmem_add_mapping(unsigned long start, unsigned long size)
539 struct range range = arch_get_mappable_range();
542 if (start < range.start ||
543 start + size > range.end + 1 ||
544 start + size < start)
547 mutex_lock(&vmem_mutex);
548 ret = vmem_add_range(start, size);
550 vmem_remove_range(start, size);
551 mutex_unlock(&vmem_mutex);
556 * Allocate new or return existing page-table entry, but do not map it
557 * to any physical address. If missing, allocate segment- and region-
558 * table entries along. Meeting a large segment- or region-table entry
559 * while traversing is an error, since the function is expected to be
560 * called against virtual regions reserved for 4KB mappings only.
562 pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
571 pgd = pgd_offset_k(addr);
572 if (pgd_none(*pgd)) {
575 p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
578 pgd_populate(&init_mm, pgd, p4d);
580 p4d = p4d_offset(pgd, addr);
581 if (p4d_none(*p4d)) {
584 pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
587 p4d_populate(&init_mm, p4d, pud);
589 pud = pud_offset(p4d, addr);
590 if (pud_none(*pud)) {
593 pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
596 pud_populate(&init_mm, pud, pmd);
597 } else if (WARN_ON_ONCE(pud_large(*pud))) {
600 pmd = pmd_offset(pud, addr);
601 if (pmd_none(*pmd)) {
604 pte = vmem_pte_alloc();
607 pmd_populate(&init_mm, pmd, pte);
608 } else if (WARN_ON_ONCE(pmd_large(*pmd))) {
611 ptep = pte_offset_kernel(pmd, addr);
616 int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc)
620 if (!IS_ALIGNED(addr, PAGE_SIZE))
622 ptep = vmem_get_alloc_pte(addr, alloc);
625 __ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
626 pte = mk_pte_phys(phys, prot);
631 int vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot)
635 mutex_lock(&vmem_mutex);
636 rc = __vmem_map_4k_page(addr, phys, prot, true);
637 mutex_unlock(&vmem_mutex);
641 void vmem_unmap_4k_page(unsigned long addr)
645 mutex_lock(&vmem_mutex);
646 ptep = virt_to_kpte(addr);
647 __ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
648 pte_clear(&init_mm, addr, ptep);
649 mutex_unlock(&vmem_mutex);
652 void __init vmem_map_init(void)
654 set_memory_rox((unsigned long)_stext,
655 (unsigned long)(_etext - _stext) >> PAGE_SHIFT);
656 set_memory_ro((unsigned long)_etext,
657 (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT);
658 set_memory_rox((unsigned long)_sinittext,
659 (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT);
660 set_memory_rox(__stext_amode31,
661 (__etext_amode31 - __stext_amode31) >> PAGE_SHIFT);
663 /* lowcore must be executable for LPSWE */
664 if (!static_key_enabled(&cpu_has_bear))
666 if (debug_pagealloc_enabled()) {
667 set_memory_4k((unsigned long)__va(0),
668 ident_map_size >> PAGE_SHIFT);
672 pr_info("Write protected kernel read-only data: %luk\n",
673 (unsigned long)(__end_rodata - _stext) >> 10);