1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Page table handling routines for radix page table.
5 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
8 #define pr_fmt(fmt) "radix-mmu: " fmt
11 #include <linux/kernel.h>
12 #include <linux/sched/mm.h>
13 #include <linux/memblock.h>
15 #include <linux/of_fdt.h>
17 #include <linux/hugetlb.h>
18 #include <linux/string_helpers.h>
19 #include <linux/memory.h>
21 #include <asm/pgalloc.h>
22 #include <asm/mmu_context.h>
24 #include <asm/machdep.h>
26 #include <asm/firmware.h>
27 #include <asm/powernv.h>
28 #include <asm/sections.h>
30 #include <asm/trace.h>
31 #include <asm/uaccess.h>
32 #include <asm/ultravisor.h>
33 #include <asm/set_memory.h>
35 #include <trace/events/thp.h>
37 #include <mm/mmu_decl.h>
39 unsigned int mmu_base_pid;
40 unsigned long radix_mem_block_size __ro_after_init;
42 static __ref void *early_alloc_pgtable(unsigned long size, int nid,
43 unsigned long region_start, unsigned long region_end)
45 phys_addr_t min_addr = MEMBLOCK_LOW_LIMIT;
46 phys_addr_t max_addr = MEMBLOCK_ALLOC_ANYWHERE;
50 min_addr = region_start;
52 max_addr = region_end;
54 ptr = memblock_alloc_try_nid(size, size, min_addr, max_addr, nid);
57 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa max_addr=%pa\n",
58 __func__, size, size, nid, &min_addr, &max_addr);
64 * When allocating pud or pmd pointers, we allocate a complete page
65 * of PAGE_SIZE rather than PUD_TABLE_SIZE or PMD_TABLE_SIZE. This
66 * is to ensure that the page obtained from the memblock allocator
67 * can be completely used as page table page and can be freed
68 * correctly when the page table entries are removed.
70 static int early_map_kernel_page(unsigned long ea, unsigned long pa,
72 unsigned int map_page_size,
74 unsigned long region_start, unsigned long region_end)
76 unsigned long pfn = pa >> PAGE_SHIFT;
83 pgdp = pgd_offset_k(ea);
84 p4dp = p4d_offset(pgdp, ea);
85 if (p4d_none(*p4dp)) {
86 pudp = early_alloc_pgtable(PAGE_SIZE, nid,
87 region_start, region_end);
88 p4d_populate(&init_mm, p4dp, pudp);
90 pudp = pud_offset(p4dp, ea);
91 if (map_page_size == PUD_SIZE) {
95 if (pud_none(*pudp)) {
96 pmdp = early_alloc_pgtable(PAGE_SIZE, nid, region_start,
98 pud_populate(&init_mm, pudp, pmdp);
100 pmdp = pmd_offset(pudp, ea);
101 if (map_page_size == PMD_SIZE) {
102 ptep = pmdp_ptep(pmdp);
105 if (!pmd_present(*pmdp)) {
106 ptep = early_alloc_pgtable(PAGE_SIZE, nid,
107 region_start, region_end);
108 pmd_populate_kernel(&init_mm, pmdp, ptep);
110 ptep = pte_offset_kernel(pmdp, ea);
113 set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
114 asm volatile("ptesync": : :"memory");
119 * nid, region_start, and region_end are hints to try to place the page
120 * table memory in the same node or region.
122 static int __map_kernel_page(unsigned long ea, unsigned long pa,
124 unsigned int map_page_size,
126 unsigned long region_start, unsigned long region_end)
128 unsigned long pfn = pa >> PAGE_SHIFT;
135 * Make sure task size is correct as per the max adddr
137 BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
139 #ifdef CONFIG_PPC_64K_PAGES
140 BUILD_BUG_ON(RADIX_KERN_MAP_SIZE != (1UL << MAX_EA_BITS_PER_CONTEXT));
143 if (unlikely(!slab_is_available()))
144 return early_map_kernel_page(ea, pa, flags, map_page_size,
145 nid, region_start, region_end);
148 * Should make page table allocation functions be able to take a
149 * node, so we can place kernel page tables on the right nodes after
152 pgdp = pgd_offset_k(ea);
153 p4dp = p4d_offset(pgdp, ea);
154 pudp = pud_alloc(&init_mm, p4dp, ea);
157 if (map_page_size == PUD_SIZE) {
158 ptep = (pte_t *)pudp;
161 pmdp = pmd_alloc(&init_mm, pudp, ea);
164 if (map_page_size == PMD_SIZE) {
165 ptep = pmdp_ptep(pmdp);
168 ptep = pte_alloc_kernel(pmdp, ea);
173 set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
174 asm volatile("ptesync": : :"memory");
178 int radix__map_kernel_page(unsigned long ea, unsigned long pa,
180 unsigned int map_page_size)
182 return __map_kernel_page(ea, pa, flags, map_page_size, -1, 0, 0);
185 #ifdef CONFIG_STRICT_KERNEL_RWX
186 static void radix__change_memory_range(unsigned long start, unsigned long end,
196 start = ALIGN_DOWN(start, PAGE_SIZE);
197 end = PAGE_ALIGN(end); // aligns up
199 pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
202 for (idx = start; idx < end; idx += PAGE_SIZE) {
203 pgdp = pgd_offset_k(idx);
204 p4dp = p4d_offset(pgdp, idx);
205 pudp = pud_alloc(&init_mm, p4dp, idx);
208 if (pud_is_leaf(*pudp)) {
209 ptep = (pte_t *)pudp;
212 pmdp = pmd_alloc(&init_mm, pudp, idx);
215 if (pmd_is_leaf(*pmdp)) {
216 ptep = pmdp_ptep(pmdp);
219 ptep = pte_alloc_kernel(pmdp, idx);
223 radix__pte_update(&init_mm, idx, ptep, clear, 0, 0);
226 radix__flush_tlb_kernel_range(start, end);
229 void radix__mark_rodata_ro(void)
231 unsigned long start, end;
233 start = (unsigned long)_stext;
234 end = (unsigned long)__end_rodata;
236 radix__change_memory_range(start, end, _PAGE_WRITE);
238 for (start = PAGE_OFFSET; start < (unsigned long)_stext; start += PAGE_SIZE) {
239 end = start + PAGE_SIZE;
240 if (overlaps_interrupt_vector_text(start, end))
241 radix__change_memory_range(start, end, _PAGE_WRITE);
247 void radix__mark_initmem_nx(void)
249 unsigned long start = (unsigned long)__init_begin;
250 unsigned long end = (unsigned long)__init_end;
252 radix__change_memory_range(start, end, _PAGE_EXEC);
254 #endif /* CONFIG_STRICT_KERNEL_RWX */
256 static inline void __meminit
257 print_mapping(unsigned long start, unsigned long end, unsigned long size, bool exec)
264 string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf));
266 pr_info("Mapped 0x%016lx-0x%016lx with %s pages%s\n", start, end, buf,
267 exec ? " (exec)" : "");
270 static unsigned long next_boundary(unsigned long addr, unsigned long end)
272 #ifdef CONFIG_STRICT_KERNEL_RWX
273 unsigned long stext_phys;
275 stext_phys = __pa_symbol(_stext);
277 // Relocatable kernel running at non-zero real address
278 if (stext_phys != 0) {
279 // The end of interrupts code at zero is a rodata boundary
280 unsigned long end_intr = __pa_symbol(__end_interrupts) - stext_phys;
284 // Start of relocated kernel text is a rodata boundary
285 if (addr < stext_phys)
289 if (addr < __pa_symbol(__srwx_boundary))
290 return __pa_symbol(__srwx_boundary);
295 static int __meminit create_physical_mapping(unsigned long start,
297 int nid, pgprot_t _prot)
299 unsigned long vaddr, addr, mapping_size = 0;
300 bool prev_exec, exec = false;
303 unsigned long max_mapping_size = radix_mem_block_size;
305 if (debug_pagealloc_enabled_or_kfence())
306 max_mapping_size = PAGE_SIZE;
308 start = ALIGN(start, PAGE_SIZE);
309 end = ALIGN_DOWN(end, PAGE_SIZE);
310 for (addr = start; addr < end; addr += mapping_size) {
311 unsigned long gap, previous_size;
314 gap = next_boundary(addr, end) - addr;
315 if (gap > max_mapping_size)
316 gap = max_mapping_size;
317 previous_size = mapping_size;
320 if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
321 mmu_psize_defs[MMU_PAGE_1G].shift) {
322 mapping_size = PUD_SIZE;
324 } else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
325 mmu_psize_defs[MMU_PAGE_2M].shift) {
326 mapping_size = PMD_SIZE;
329 mapping_size = PAGE_SIZE;
330 psize = mmu_virtual_psize;
333 vaddr = (unsigned long)__va(addr);
335 if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
336 overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size)) {
337 prot = PAGE_KERNEL_X;
344 if (mapping_size != previous_size || exec != prev_exec) {
345 print_mapping(start, addr, previous_size, prev_exec);
349 rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end);
353 update_page_count(psize, 1);
356 print_mapping(start, addr, mapping_size, exec);
360 static void __init radix_init_pgtable(void)
362 unsigned long rts_field;
363 phys_addr_t start, end;
366 /* We don't support slb for radix */
370 * Create the linear mapping
372 for_each_mem_range(i, &start, &end) {
374 * The memblock allocator is up at this point, so the
375 * page tables will be allocated within the range. No
376 * need or a node (which we don't have yet).
379 if (end >= RADIX_VMALLOC_START) {
380 pr_warn("Outside the supported range\n");
384 WARN_ON(create_physical_mapping(start, end,
388 if (!cpu_has_feature(CPU_FTR_HVMODE) &&
389 cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
391 * Older versions of KVM on these machines prefer if the
392 * guest only uses the low 19 PID bits.
399 * Allocate Partition table and process table for the
402 BUG_ON(PRTB_SIZE_SHIFT > 36);
403 process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT, -1, 0, 0);
405 * Fill in the process table.
407 rts_field = radix__get_tree_size();
408 process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
411 * The init_mm context is given the first available (non-zero) PID,
412 * which is the "guard PID" and contains no page table. PIDR should
413 * never be set to zero because that duplicates the kernel address
414 * space at the 0x0... offset (quadrant 0)!
416 * An arbitrary PID that may later be allocated by the PID allocator
417 * for userspace processes must not be used either, because that
418 * would cause stale user mappings for that PID on CPUs outside of
419 * the TLB invalidation scheme (because it won't be in mm_cpumask).
421 * So permanently carve out one PID for the purpose of a guard PID.
423 init_mm.context.id = mmu_base_pid;
427 static void __init radix_init_partition_table(void)
429 unsigned long rts_field, dw0, dw1;
431 mmu_partition_table_init();
432 rts_field = radix__get_tree_size();
433 dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
434 dw1 = __pa(process_tb) | (PRTB_SIZE_SHIFT - 12) | PATB_GR;
435 mmu_partition_table_set_entry(0, dw0, dw1, false);
437 pr_info("Initializing Radix MMU\n");
440 static int __init get_idx_from_shift(unsigned int shift)
461 static int __init radix_dt_scan_page_sizes(unsigned long node,
462 const char *uname, int depth,
469 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
471 /* We are scanning "cpu" nodes only */
472 if (type == NULL || strcmp(type, "cpu") != 0)
475 /* Grab page size encodings */
476 prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
480 pr_info("Page sizes from device-tree:\n");
481 for (; size >= 4; size -= 4, ++prop) {
483 struct mmu_psize_def *def;
485 /* top 3 bit is AP encoding */
486 shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
487 ap = be32_to_cpu(prop[0]) >> 29;
488 pr_info("Page size shift = %d AP=0x%x\n", shift, ap);
490 idx = get_idx_from_shift(shift);
494 def = &mmu_psize_defs[idx];
497 def->h_rpt_pgsize = psize_to_rpti_pgsize(idx);
501 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
505 #ifdef CONFIG_MEMORY_HOTPLUG
506 static int __init probe_memory_block_size(unsigned long node, const char *uname, int
509 unsigned long *mem_block_size = (unsigned long *)data;
516 if (strcmp(uname, "ibm,dynamic-reconfiguration-memory"))
519 prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len);
521 if (!prop || len < dt_root_size_cells * sizeof(__be32))
523 * Nothing in the device tree
525 *mem_block_size = MIN_MEMORY_BLOCK_SIZE;
527 *mem_block_size = of_read_number(prop, dt_root_size_cells);
531 static unsigned long __init radix_memory_block_size(void)
533 unsigned long mem_block_size = MIN_MEMORY_BLOCK_SIZE;
536 * OPAL firmware feature is set by now. Hence we are ok
537 * to test OPAL feature.
539 if (firmware_has_feature(FW_FEATURE_OPAL))
540 mem_block_size = 1UL * 1024 * 1024 * 1024;
542 of_scan_flat_dt(probe_memory_block_size, &mem_block_size);
544 return mem_block_size;
547 #else /* CONFIG_MEMORY_HOTPLUG */
549 static unsigned long __init radix_memory_block_size(void)
551 return 1UL * 1024 * 1024 * 1024;
554 #endif /* CONFIG_MEMORY_HOTPLUG */
557 void __init radix__early_init_devtree(void)
562 * Try to find the available page sizes in the device-tree
564 rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
567 * No page size details found in device tree.
568 * Let's assume we have page 4k and 64k support
570 mmu_psize_defs[MMU_PAGE_4K].shift = 12;
571 mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
572 mmu_psize_defs[MMU_PAGE_4K].h_rpt_pgsize =
573 psize_to_rpti_pgsize(MMU_PAGE_4K);
575 mmu_psize_defs[MMU_PAGE_64K].shift = 16;
576 mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
577 mmu_psize_defs[MMU_PAGE_64K].h_rpt_pgsize =
578 psize_to_rpti_pgsize(MMU_PAGE_64K);
582 * Max mapping size used when mapping pages. We don't use
583 * ppc_md.memory_block_size() here because this get called
584 * early and we don't have machine probe called yet. Also
585 * the pseries implementation only check for ibm,lmb-size.
586 * All hypervisor supporting radix do expose that device
589 radix_mem_block_size = radix_memory_block_size();
593 void __init radix__early_init_mmu(void)
597 #ifdef CONFIG_PPC_64S_HASH_MMU
598 #ifdef CONFIG_PPC_64K_PAGES
599 /* PAGE_SIZE mappings */
600 mmu_virtual_psize = MMU_PAGE_64K;
602 mmu_virtual_psize = MMU_PAGE_4K;
605 #ifdef CONFIG_SPARSEMEM_VMEMMAP
606 /* vmemmap mapping */
607 if (mmu_psize_defs[MMU_PAGE_2M].shift) {
609 * map vmemmap using 2M if available
611 mmu_vmemmap_psize = MMU_PAGE_2M;
613 mmu_vmemmap_psize = mmu_virtual_psize;
617 * initialize page table size
619 __pte_index_size = RADIX_PTE_INDEX_SIZE;
620 __pmd_index_size = RADIX_PMD_INDEX_SIZE;
621 __pud_index_size = RADIX_PUD_INDEX_SIZE;
622 __pgd_index_size = RADIX_PGD_INDEX_SIZE;
623 __pud_cache_index = RADIX_PUD_INDEX_SIZE;
624 __pte_table_size = RADIX_PTE_TABLE_SIZE;
625 __pmd_table_size = RADIX_PMD_TABLE_SIZE;
626 __pud_table_size = RADIX_PUD_TABLE_SIZE;
627 __pgd_table_size = RADIX_PGD_TABLE_SIZE;
629 __pmd_val_bits = RADIX_PMD_VAL_BITS;
630 __pud_val_bits = RADIX_PUD_VAL_BITS;
631 __pgd_val_bits = RADIX_PGD_VAL_BITS;
633 __kernel_virt_start = RADIX_KERN_VIRT_START;
634 __vmalloc_start = RADIX_VMALLOC_START;
635 __vmalloc_end = RADIX_VMALLOC_END;
636 __kernel_io_start = RADIX_KERN_IO_START;
637 __kernel_io_end = RADIX_KERN_IO_END;
638 vmemmap = (struct page *)RADIX_VMEMMAP_START;
639 ioremap_bot = IOREMAP_BASE;
642 pci_io_base = ISA_IO_BASE;
644 __pte_frag_nr = RADIX_PTE_FRAG_NR;
645 __pte_frag_size_shift = RADIX_PTE_FRAG_SIZE_SHIFT;
646 __pmd_frag_nr = RADIX_PMD_FRAG_NR;
647 __pmd_frag_size_shift = RADIX_PMD_FRAG_SIZE_SHIFT;
649 radix_init_pgtable();
651 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
652 lpcr = mfspr(SPRN_LPCR);
653 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
654 radix_init_partition_table();
656 radix_init_pseries();
659 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
661 /* Switch to the guard PID before turning on MMU */
662 radix__switch_mmu_context(NULL, &init_mm);
666 void radix__early_init_mmu_secondary(void)
670 * update partition table control register and UPRT
672 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
673 lpcr = mfspr(SPRN_LPCR);
674 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
676 set_ptcr_when_no_uv(__pa(partition_tb) |
677 (PATB_SIZE_SHIFT - 12));
680 radix__switch_mmu_context(NULL, &init_mm);
683 /* Make sure userspace can't change the AMR */
684 mtspr(SPRN_UAMOR, 0);
687 /* Called during kexec sequence with MMU off */
688 notrace void radix__mmu_cleanup_all(void)
692 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
693 lpcr = mfspr(SPRN_LPCR);
694 mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
695 set_ptcr_when_no_uv(0);
696 powernv_set_nmmu_ptcr(0);
697 radix__flush_tlb_all();
701 #ifdef CONFIG_MEMORY_HOTPLUG
702 static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
707 for (i = 0; i < PTRS_PER_PTE; i++) {
713 pte_free_kernel(&init_mm, pte_start);
717 static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
722 for (i = 0; i < PTRS_PER_PMD; i++) {
728 pmd_free(&init_mm, pmd_start);
732 static void free_pud_table(pud_t *pud_start, p4d_t *p4d)
737 for (i = 0; i < PTRS_PER_PUD; i++) {
743 pud_free(&init_mm, pud_start);
747 #ifdef CONFIG_SPARSEMEM_VMEMMAP
748 static bool __meminit vmemmap_pmd_is_unused(unsigned long addr, unsigned long end)
750 unsigned long start = ALIGN_DOWN(addr, PMD_SIZE);
752 return !vmemmap_populated(start, PMD_SIZE);
755 static bool __meminit vmemmap_page_is_unused(unsigned long addr, unsigned long end)
757 unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
759 return !vmemmap_populated(start, PAGE_SIZE);
764 static void __meminit free_vmemmap_pages(struct page *page,
765 struct vmem_altmap *altmap,
768 unsigned int nr_pages = 1 << order;
771 unsigned long alt_start, alt_end;
772 unsigned long base_pfn = page_to_pfn(page);
775 * with 2M vmemmap mmaping we can have things setup
776 * such that even though atlmap is specified we never
779 alt_start = altmap->base_pfn;
780 alt_end = altmap->base_pfn + altmap->reserve + altmap->free;
782 if (base_pfn >= alt_start && base_pfn < alt_end) {
783 vmem_altmap_free(altmap, nr_pages);
788 if (PageReserved(page)) {
789 /* allocated from memblock */
791 free_reserved_page(page++);
793 free_pages((unsigned long)page_address(page), order);
796 static void __meminit remove_pte_table(pte_t *pte_start, unsigned long addr,
797 unsigned long end, bool direct,
798 struct vmem_altmap *altmap)
800 unsigned long next, pages = 0;
803 pte = pte_start + pte_index(addr);
804 for (; addr < end; addr = next, pte++) {
805 next = (addr + PAGE_SIZE) & PAGE_MASK;
809 if (!pte_present(*pte))
812 if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) {
814 free_vmemmap_pages(pte_page(*pte), altmap, 0);
815 pte_clear(&init_mm, addr, pte);
818 #ifdef CONFIG_SPARSEMEM_VMEMMAP
819 else if (!direct && vmemmap_page_is_unused(addr, next)) {
820 free_vmemmap_pages(pte_page(*pte), altmap, 0);
821 pte_clear(&init_mm, addr, pte);
826 update_page_count(mmu_virtual_psize, -pages);
829 static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
830 unsigned long end, bool direct,
831 struct vmem_altmap *altmap)
833 unsigned long next, pages = 0;
837 pmd = pmd_start + pmd_index(addr);
838 for (; addr < end; addr = next, pmd++) {
839 next = pmd_addr_end(addr, end);
841 if (!pmd_present(*pmd))
844 if (pmd_is_leaf(*pmd)) {
845 if (IS_ALIGNED(addr, PMD_SIZE) &&
846 IS_ALIGNED(next, PMD_SIZE)) {
848 free_vmemmap_pages(pmd_page(*pmd), altmap, get_order(PMD_SIZE));
849 pte_clear(&init_mm, addr, (pte_t *)pmd);
852 #ifdef CONFIG_SPARSEMEM_VMEMMAP
853 else if (!direct && vmemmap_pmd_is_unused(addr, next)) {
854 free_vmemmap_pages(pmd_page(*pmd), altmap, get_order(PMD_SIZE));
855 pte_clear(&init_mm, addr, (pte_t *)pmd);
861 pte_base = (pte_t *)pmd_page_vaddr(*pmd);
862 remove_pte_table(pte_base, addr, next, direct, altmap);
863 free_pte_table(pte_base, pmd);
866 update_page_count(MMU_PAGE_2M, -pages);
869 static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr,
870 unsigned long end, bool direct,
871 struct vmem_altmap *altmap)
873 unsigned long next, pages = 0;
877 pud = pud_start + pud_index(addr);
878 for (; addr < end; addr = next, pud++) {
879 next = pud_addr_end(addr, end);
881 if (!pud_present(*pud))
884 if (pud_is_leaf(*pud)) {
885 if (!IS_ALIGNED(addr, PUD_SIZE) ||
886 !IS_ALIGNED(next, PUD_SIZE)) {
887 WARN_ONCE(1, "%s: unaligned range\n", __func__);
890 pte_clear(&init_mm, addr, (pte_t *)pud);
895 pmd_base = pud_pgtable(*pud);
896 remove_pmd_table(pmd_base, addr, next, direct, altmap);
897 free_pmd_table(pmd_base, pud);
900 update_page_count(MMU_PAGE_1G, -pages);
903 static void __meminit
904 remove_pagetable(unsigned long start, unsigned long end, bool direct,
905 struct vmem_altmap *altmap)
907 unsigned long addr, next;
912 spin_lock(&init_mm.page_table_lock);
914 for (addr = start; addr < end; addr = next) {
915 next = pgd_addr_end(addr, end);
917 pgd = pgd_offset_k(addr);
918 p4d = p4d_offset(pgd, addr);
919 if (!p4d_present(*p4d))
922 if (p4d_is_leaf(*p4d)) {
923 if (!IS_ALIGNED(addr, P4D_SIZE) ||
924 !IS_ALIGNED(next, P4D_SIZE)) {
925 WARN_ONCE(1, "%s: unaligned range\n", __func__);
929 pte_clear(&init_mm, addr, (pte_t *)pgd);
933 pud_base = p4d_pgtable(*p4d);
934 remove_pud_table(pud_base, addr, next, direct, altmap);
935 free_pud_table(pud_base, p4d);
938 spin_unlock(&init_mm.page_table_lock);
939 radix__flush_tlb_kernel_range(start, end);
942 int __meminit radix__create_section_mapping(unsigned long start,
943 unsigned long end, int nid,
946 if (end >= RADIX_VMALLOC_START) {
947 pr_warn("Outside the supported range\n");
951 return create_physical_mapping(__pa(start), __pa(end),
955 int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
957 remove_pagetable(start, end, true, NULL);
960 #endif /* CONFIG_MEMORY_HOTPLUG */
962 #ifdef CONFIG_SPARSEMEM_VMEMMAP
963 static int __map_kernel_page_nid(unsigned long ea, unsigned long pa,
964 pgprot_t flags, unsigned int map_page_size,
967 return __map_kernel_page(ea, pa, flags, map_page_size, nid, 0, 0);
970 int __meminit radix__vmemmap_create_mapping(unsigned long start,
971 unsigned long page_size,
974 /* Create a PTE encoding */
975 int nid = early_pfn_to_nid(phys >> PAGE_SHIFT);
978 if ((start + page_size) >= RADIX_VMEMMAP_END) {
979 pr_warn("Outside the supported range\n");
983 ret = __map_kernel_page_nid(start, phys, PAGE_KERNEL, page_size, nid);
990 bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
993 return __vmemmap_can_optimize(altmap, pgmap);
998 int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
999 unsigned long addr, unsigned long next)
1001 int large = pmd_large(*pmdp);
1004 vmemmap_verify(pmdp_ptep(pmdp), node, addr, next);
1009 void __meminit vmemmap_set_pmd(pmd_t *pmdp, void *p, int node,
1010 unsigned long addr, unsigned long next)
1013 pte_t *ptep = pmdp_ptep(pmdp);
1015 VM_BUG_ON(!IS_ALIGNED(addr, PMD_SIZE));
1016 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
1017 set_pte_at(&init_mm, addr, ptep, entry);
1018 asm volatile("ptesync": : :"memory");
1020 vmemmap_verify(ptep, node, addr, next);
1023 static pte_t * __meminit radix__vmemmap_pte_populate(pmd_t *pmdp, unsigned long addr,
1025 struct vmem_altmap *altmap,
1028 pte_t *pte = pte_offset_kernel(pmdp, addr);
1030 if (pte_none(*pte)) {
1036 * make sure we don't create altmap mappings
1037 * covering things outside the device.
1039 if (altmap && altmap_cross_boundary(altmap, addr, PAGE_SIZE))
1042 p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap);
1044 p = vmemmap_alloc_block_buf(PAGE_SIZE, node, NULL);
1049 * When a PTE/PMD entry is freed from the init_mm
1050 * there's a free_pages() call to this page allocated
1051 * above. Thus this get_page() is paired with the
1052 * put_page_testzero() on the freeing path.
1053 * This can only called by certain ZONE_DEVICE path,
1054 * and through vmemmap_populate_compound_pages() when
1055 * slab is available.
1058 p = page_to_virt(reuse);
1061 VM_BUG_ON(!PAGE_ALIGNED(addr));
1062 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
1063 set_pte_at(&init_mm, addr, pte, entry);
1064 asm volatile("ptesync": : :"memory");
1069 static inline pud_t *vmemmap_pud_alloc(p4d_t *p4dp, int node,
1070 unsigned long address)
1074 /* All early vmemmap mapping to keep simple do it at PAGE_SIZE */
1075 if (unlikely(p4d_none(*p4dp))) {
1076 if (unlikely(!slab_is_available())) {
1077 pud = early_alloc_pgtable(PAGE_SIZE, node, 0, 0);
1078 p4d_populate(&init_mm, p4dp, pud);
1079 /* go to the pud_offset */
1081 return pud_alloc(&init_mm, p4dp, address);
1083 return pud_offset(p4dp, address);
1086 static inline pmd_t *vmemmap_pmd_alloc(pud_t *pudp, int node,
1087 unsigned long address)
1091 /* All early vmemmap mapping to keep simple do it at PAGE_SIZE */
1092 if (unlikely(pud_none(*pudp))) {
1093 if (unlikely(!slab_is_available())) {
1094 pmd = early_alloc_pgtable(PAGE_SIZE, node, 0, 0);
1095 pud_populate(&init_mm, pudp, pmd);
1097 return pmd_alloc(&init_mm, pudp, address);
1099 return pmd_offset(pudp, address);
1102 static inline pte_t *vmemmap_pte_alloc(pmd_t *pmdp, int node,
1103 unsigned long address)
1107 /* All early vmemmap mapping to keep simple do it at PAGE_SIZE */
1108 if (unlikely(pmd_none(*pmdp))) {
1109 if (unlikely(!slab_is_available())) {
1110 pte = early_alloc_pgtable(PAGE_SIZE, node, 0, 0);
1111 pmd_populate(&init_mm, pmdp, pte);
1113 return pte_alloc_kernel(pmdp, address);
1115 return pte_offset_kernel(pmdp, address);
1120 int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, int node,
1121 struct vmem_altmap *altmap)
1131 for (addr = start; addr < end; addr = next) {
1132 next = pmd_addr_end(addr, end);
1134 pgd = pgd_offset_k(addr);
1135 p4d = p4d_offset(pgd, addr);
1136 pud = vmemmap_pud_alloc(p4d, node, addr);
1139 pmd = vmemmap_pmd_alloc(pud, node, addr);
1143 if (pmd_none(READ_ONCE(*pmd))) {
1147 * keep it simple by checking addr PMD_SIZE alignment
1148 * and verifying the device boundary condition.
1149 * For us to use a pmd mapping, both addr and pfn should
1150 * be aligned. We skip if addr is not aligned and for
1151 * pfn we hope we have extra area in the altmap that
1152 * can help to find an aligned block. This can result
1153 * in altmap block allocation failures, in which case
1154 * we fallback to RAM for vmemmap allocation.
1156 if (altmap && (!IS_ALIGNED(addr, PMD_SIZE) ||
1157 altmap_cross_boundary(altmap, addr, PMD_SIZE))) {
1159 * make sure we don't create altmap mappings
1160 * covering things outside the device.
1165 p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
1167 vmemmap_set_pmd(pmd, p, node, addr, next);
1169 } else if (altmap) {
1171 * A vmemmap block allocation can fail due to
1172 * alignment requirements and we trying to align
1173 * things aggressively there by running out of
1174 * space. Try base mapping on failure.
1178 } else if (vmemmap_check_pmd(pmd, node, addr, next)) {
1180 * If a huge mapping exist due to early call to
1181 * vmemmap_populate, let's try to use that.
1187 * Not able allocate higher order memory to back memmap
1188 * or we found a pointer to pte page. Allocate base page
1191 pte = vmemmap_pte_alloc(pmd, node, addr);
1195 pte = radix__vmemmap_pte_populate(pmd, addr, node, altmap, NULL);
1199 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
1200 next = addr + PAGE_SIZE;
1205 static pte_t * __meminit radix__vmemmap_populate_address(unsigned long addr, int node,
1206 struct vmem_altmap *altmap,
1215 pgd = pgd_offset_k(addr);
1216 p4d = p4d_offset(pgd, addr);
1217 pud = vmemmap_pud_alloc(p4d, node, addr);
1220 pmd = vmemmap_pmd_alloc(pud, node, addr);
1225 * The second page is mapped as a hugepage due to a nearby request.
1226 * Force our mapping to page size without deduplication
1229 pte = vmemmap_pte_alloc(pmd, node, addr);
1232 radix__vmemmap_pte_populate(pmd, addr, node, NULL, NULL);
1233 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
1238 static pte_t * __meminit vmemmap_compound_tail_page(unsigned long addr,
1239 unsigned long pfn_offset, int node)
1246 unsigned long map_addr;
1248 /* the second vmemmap page which we use for duplication */
1249 map_addr = addr - pfn_offset * sizeof(struct page) + PAGE_SIZE;
1250 pgd = pgd_offset_k(map_addr);
1251 p4d = p4d_offset(pgd, map_addr);
1252 pud = vmemmap_pud_alloc(p4d, node, map_addr);
1255 pmd = vmemmap_pmd_alloc(pud, node, map_addr);
1260 * The second page is mapped as a hugepage due to a nearby request.
1261 * Force our mapping to page size without deduplication
1264 pte = vmemmap_pte_alloc(pmd, node, map_addr);
1268 * Check if there exist a mapping to the left
1270 if (pte_none(*pte)) {
1272 * Populate the head page vmemmap page.
1273 * It can fall in different pmd, hence
1274 * vmemmap_populate_address()
1276 pte = radix__vmemmap_populate_address(map_addr - PAGE_SIZE, node, NULL, NULL);
1280 * Populate the tail pages vmemmap page
1282 pte = radix__vmemmap_pte_populate(pmd, map_addr, node, NULL, NULL);
1285 vmemmap_verify(pte, node, map_addr, map_addr + PAGE_SIZE);
1291 int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn,
1292 unsigned long start,
1293 unsigned long end, int node,
1294 struct dev_pagemap *pgmap)
1297 * we want to map things as base page size mapping so that
1298 * we can save space in vmemmap. We could have huge mapping
1299 * covering out both edges.
1302 unsigned long addr_pfn = start_pfn;
1310 for (addr = start; addr < end; addr = next) {
1312 pgd = pgd_offset_k(addr);
1313 p4d = p4d_offset(pgd, addr);
1314 pud = vmemmap_pud_alloc(p4d, node, addr);
1317 pmd = vmemmap_pmd_alloc(pud, node, addr);
1321 if (pmd_leaf(READ_ONCE(*pmd))) {
1322 /* existing huge mapping. Skip the range */
1323 addr_pfn += (PMD_SIZE >> PAGE_SHIFT);
1324 next = pmd_addr_end(addr, end);
1327 pte = vmemmap_pte_alloc(pmd, node, addr);
1330 if (!pte_none(*pte)) {
1332 * This could be because we already have a compound
1333 * page whose VMEMMAP_RESERVE_NR pages were mapped and
1334 * this request fall in those pages.
1337 next = addr + PAGE_SIZE;
1340 unsigned long nr_pages = pgmap_vmemmap_nr(pgmap);
1341 unsigned long pfn_offset = addr_pfn - ALIGN_DOWN(addr_pfn, nr_pages);
1342 pte_t *tail_page_pte;
1345 * if the address is aligned to huge page size it is the
1348 if (pfn_offset == 0) {
1349 /* Populate the head page vmemmap page */
1350 pte = radix__vmemmap_pte_populate(pmd, addr, node, NULL, NULL);
1353 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
1356 * Populate the tail pages vmemmap page
1357 * It can fall in different pmd, hence
1358 * vmemmap_populate_address()
1360 pte = radix__vmemmap_populate_address(addr + PAGE_SIZE, node, NULL, NULL);
1365 next = addr + 2 * PAGE_SIZE;
1369 * get the 2nd mapping details
1370 * Also create it if that doesn't exist
1372 tail_page_pte = vmemmap_compound_tail_page(addr, pfn_offset, node);
1373 if (!tail_page_pte) {
1375 pte = radix__vmemmap_pte_populate(pmd, addr, node, NULL, NULL);
1378 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
1381 next = addr + PAGE_SIZE;
1385 pte = radix__vmemmap_pte_populate(pmd, addr, node, NULL, pte_page(*tail_page_pte));
1388 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
1391 next = addr + PAGE_SIZE;
1399 #ifdef CONFIG_MEMORY_HOTPLUG
1400 void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
1402 remove_pagetable(start, start + page_size, true, NULL);
1405 void __ref radix__vmemmap_free(unsigned long start, unsigned long end,
1406 struct vmem_altmap *altmap)
1408 remove_pagetable(start, end, false, altmap);
1413 #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
1414 void radix__kernel_map_pages(struct page *page, int numpages, int enable)
1418 addr = (unsigned long)page_address(page);
1421 set_memory_p(addr, numpages);
1423 set_memory_np(addr, numpages);
1427 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1429 unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
1430 pmd_t *pmdp, unsigned long clr,
1435 #ifdef CONFIG_DEBUG_VM
1436 WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
1437 assert_spin_locked(pmd_lockptr(mm, pmdp));
1440 old = radix__pte_update(mm, addr, pmdp_ptep(pmdp), clr, set, 1);
1441 trace_hugepage_update_pmd(addr, old, clr, set);
1446 unsigned long radix__pud_hugepage_update(struct mm_struct *mm, unsigned long addr,
1447 pud_t *pudp, unsigned long clr,
1452 #ifdef CONFIG_DEBUG_VM
1453 WARN_ON(!pud_devmap(*pudp));
1454 assert_spin_locked(pud_lockptr(mm, pudp));
1457 old = radix__pte_update(mm, addr, pudp_ptep(pudp), clr, set, 1);
1458 trace_hugepage_update_pud(addr, old, clr, set);
1463 pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
1469 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1470 VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
1471 VM_BUG_ON(pmd_devmap(*pmdp));
1473 * khugepaged calls this for normal pmd
1478 radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
1484 * For us pgtable_t is pte_t *. Inorder to save the deposisted
1485 * page table, we consider the allocated page table as a list
1486 * head. On withdraw we need to make sure we zero out the used
1487 * list_head memory area.
1489 void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1492 struct list_head *lh = (struct list_head *) pgtable;
1494 assert_spin_locked(pmd_lockptr(mm, pmdp));
1497 if (!pmd_huge_pte(mm, pmdp))
1500 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
1501 pmd_huge_pte(mm, pmdp) = pgtable;
1504 pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
1508 struct list_head *lh;
1510 assert_spin_locked(pmd_lockptr(mm, pmdp));
1513 pgtable = pmd_huge_pte(mm, pmdp);
1514 lh = (struct list_head *) pgtable;
1516 pmd_huge_pte(mm, pmdp) = NULL;
1518 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
1521 ptep = (pte_t *) pgtable;
1528 pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
1529 unsigned long addr, pmd_t *pmdp)
1534 old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
1535 old_pmd = __pmd(old);
1539 pud_t radix__pudp_huge_get_and_clear(struct mm_struct *mm,
1540 unsigned long addr, pud_t *pudp)
1545 old = radix__pud_hugepage_update(mm, addr, pudp, ~0UL, 0);
1546 old_pud = __pud(old);
1550 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1552 void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
1553 pte_t entry, unsigned long address, int psize)
1555 struct mm_struct *mm = vma->vm_mm;
1556 unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_SOFT_DIRTY |
1557 _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
1559 unsigned long change = pte_val(entry) ^ pte_val(*ptep);
1561 * On POWER9, the NMMU is not able to relax PTE access permissions
1562 * for a translation with a TLB. The PTE must be invalidated, TLB
1563 * flushed before the new PTE is installed.
1565 * This only needs to be done for radix, because hash translation does
1566 * flush when updating the linux pte (and we don't support NMMU
1567 * accelerators on HPT on POWER9 anyway XXX: do we?).
1569 * POWER10 (and P9P) NMMU does behave as per ISA.
1571 if (!cpu_has_feature(CPU_FTR_ARCH_31) && (change & _PAGE_RW) &&
1572 atomic_read(&mm->context.copros) > 0) {
1573 unsigned long old_pte, new_pte;
1575 old_pte = __radix_pte_update(ptep, _PAGE_PRESENT, _PAGE_INVALID);
1576 new_pte = old_pte | set;
1577 radix__flush_tlb_page_psize(mm, address, psize);
1578 __radix_pte_update(ptep, _PAGE_INVALID, new_pte);
1580 __radix_pte_update(ptep, 0, set);
1582 * Book3S does not require a TLB flush when relaxing access
1583 * restrictions when the address space (modulo the POWER9 nest
1584 * MMU issue above) because the MMU will reload the PTE after
1585 * taking an access fault, as defined by the architecture. See
1586 * "Setting a Reference or Change Bit or Upgrading Access
1587 * Authority (PTE Subject to Atomic Hardware Updates)" in
1588 * Power ISA Version 3.1B.
1591 /* See ptesync comment in radix__set_pte_at */
1594 void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
1595 unsigned long addr, pte_t *ptep,
1596 pte_t old_pte, pte_t pte)
1598 struct mm_struct *mm = vma->vm_mm;
1601 * POWER9 NMMU must flush the TLB after clearing the PTE before
1602 * installing a PTE with more relaxed access permissions, see
1603 * radix__ptep_set_access_flags.
1605 if (!cpu_has_feature(CPU_FTR_ARCH_31) &&
1606 is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
1607 (atomic_read(&mm->context.copros) > 0))
1608 radix__flush_tlb_page(vma, addr);
1610 set_pte_at(mm, addr, ptep, pte);
1613 int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
1615 pte_t *ptep = (pte_t *)pud;
1616 pte_t new_pud = pfn_pte(__phys_to_pfn(addr), prot);
1618 if (!radix_enabled())
1621 set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pud);
1626 int pud_clear_huge(pud_t *pud)
1628 if (pud_is_leaf(*pud)) {
1636 int pud_free_pmd_page(pud_t *pud, unsigned long addr)
1641 pmd = pud_pgtable(*pud);
1644 flush_tlb_kernel_range(addr, addr + PUD_SIZE);
1646 for (i = 0; i < PTRS_PER_PMD; i++) {
1647 if (!pmd_none(pmd[i])) {
1649 pte = (pte_t *)pmd_page_vaddr(pmd[i]);
1651 pte_free_kernel(&init_mm, pte);
1655 pmd_free(&init_mm, pmd);
1660 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
1662 pte_t *ptep = (pte_t *)pmd;
1663 pte_t new_pmd = pfn_pte(__phys_to_pfn(addr), prot);
1665 if (!radix_enabled())
1668 set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pmd);
1673 int pmd_clear_huge(pmd_t *pmd)
1675 if (pmd_is_leaf(*pmd)) {
1683 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
1687 pte = (pte_t *)pmd_page_vaddr(*pmd);
1690 flush_tlb_kernel_range(addr, addr + PMD_SIZE);
1692 pte_free_kernel(&init_mm, pte);