powerpc/book3s64/radix: add support for vmemmap optimization for radix
[platform/kernel/linux-starfive.git] / arch / powerpc / mm / book3s64 / radix_pgtable.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Page table handling routines for radix page table.
4  *
5  * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
6  */
7
8 #define pr_fmt(fmt) "radix-mmu: " fmt
9
10 #include <linux/io.h>
11 #include <linux/kernel.h>
12 #include <linux/sched/mm.h>
13 #include <linux/memblock.h>
14 #include <linux/of.h>
15 #include <linux/of_fdt.h>
16 #include <linux/mm.h>
17 #include <linux/hugetlb.h>
18 #include <linux/string_helpers.h>
19 #include <linux/memory.h>
20
21 #include <asm/pgalloc.h>
22 #include <asm/mmu_context.h>
23 #include <asm/dma.h>
24 #include <asm/machdep.h>
25 #include <asm/mmu.h>
26 #include <asm/firmware.h>
27 #include <asm/powernv.h>
28 #include <asm/sections.h>
29 #include <asm/smp.h>
30 #include <asm/trace.h>
31 #include <asm/uaccess.h>
32 #include <asm/ultravisor.h>
33 #include <asm/set_memory.h>
34
35 #include <trace/events/thp.h>
36
37 #include <mm/mmu_decl.h>
38
39 unsigned int mmu_base_pid;
40 unsigned long radix_mem_block_size __ro_after_init;
41
42 static __ref void *early_alloc_pgtable(unsigned long size, int nid,
43                         unsigned long region_start, unsigned long region_end)
44 {
45         phys_addr_t min_addr = MEMBLOCK_LOW_LIMIT;
46         phys_addr_t max_addr = MEMBLOCK_ALLOC_ANYWHERE;
47         void *ptr;
48
49         if (region_start)
50                 min_addr = region_start;
51         if (region_end)
52                 max_addr = region_end;
53
54         ptr = memblock_alloc_try_nid(size, size, min_addr, max_addr, nid);
55
56         if (!ptr)
57                 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa max_addr=%pa\n",
58                       __func__, size, size, nid, &min_addr, &max_addr);
59
60         return ptr;
61 }
62
63 /*
64  * When allocating pud or pmd pointers, we allocate a complete page
65  * of PAGE_SIZE rather than PUD_TABLE_SIZE or PMD_TABLE_SIZE. This
66  * is to ensure that the page obtained from the memblock allocator
67  * can be completely used as page table page and can be freed
68  * correctly when the page table entries are removed.
69  */
70 static int early_map_kernel_page(unsigned long ea, unsigned long pa,
71                           pgprot_t flags,
72                           unsigned int map_page_size,
73                           int nid,
74                           unsigned long region_start, unsigned long region_end)
75 {
76         unsigned long pfn = pa >> PAGE_SHIFT;
77         pgd_t *pgdp;
78         p4d_t *p4dp;
79         pud_t *pudp;
80         pmd_t *pmdp;
81         pte_t *ptep;
82
83         pgdp = pgd_offset_k(ea);
84         p4dp = p4d_offset(pgdp, ea);
85         if (p4d_none(*p4dp)) {
86                 pudp = early_alloc_pgtable(PAGE_SIZE, nid,
87                                            region_start, region_end);
88                 p4d_populate(&init_mm, p4dp, pudp);
89         }
90         pudp = pud_offset(p4dp, ea);
91         if (map_page_size == PUD_SIZE) {
92                 ptep = (pte_t *)pudp;
93                 goto set_the_pte;
94         }
95         if (pud_none(*pudp)) {
96                 pmdp = early_alloc_pgtable(PAGE_SIZE, nid, region_start,
97                                            region_end);
98                 pud_populate(&init_mm, pudp, pmdp);
99         }
100         pmdp = pmd_offset(pudp, ea);
101         if (map_page_size == PMD_SIZE) {
102                 ptep = pmdp_ptep(pmdp);
103                 goto set_the_pte;
104         }
105         if (!pmd_present(*pmdp)) {
106                 ptep = early_alloc_pgtable(PAGE_SIZE, nid,
107                                                 region_start, region_end);
108                 pmd_populate_kernel(&init_mm, pmdp, ptep);
109         }
110         ptep = pte_offset_kernel(pmdp, ea);
111
112 set_the_pte:
113         set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
114         asm volatile("ptesync": : :"memory");
115         return 0;
116 }
117
118 /*
119  * nid, region_start, and region_end are hints to try to place the page
120  * table memory in the same node or region.
121  */
122 static int __map_kernel_page(unsigned long ea, unsigned long pa,
123                           pgprot_t flags,
124                           unsigned int map_page_size,
125                           int nid,
126                           unsigned long region_start, unsigned long region_end)
127 {
128         unsigned long pfn = pa >> PAGE_SHIFT;
129         pgd_t *pgdp;
130         p4d_t *p4dp;
131         pud_t *pudp;
132         pmd_t *pmdp;
133         pte_t *ptep;
134         /*
135          * Make sure task size is correct as per the max adddr
136          */
137         BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
138
139 #ifdef CONFIG_PPC_64K_PAGES
140         BUILD_BUG_ON(RADIX_KERN_MAP_SIZE != (1UL << MAX_EA_BITS_PER_CONTEXT));
141 #endif
142
143         if (unlikely(!slab_is_available()))
144                 return early_map_kernel_page(ea, pa, flags, map_page_size,
145                                                 nid, region_start, region_end);
146
147         /*
148          * Should make page table allocation functions be able to take a
149          * node, so we can place kernel page tables on the right nodes after
150          * boot.
151          */
152         pgdp = pgd_offset_k(ea);
153         p4dp = p4d_offset(pgdp, ea);
154         pudp = pud_alloc(&init_mm, p4dp, ea);
155         if (!pudp)
156                 return -ENOMEM;
157         if (map_page_size == PUD_SIZE) {
158                 ptep = (pte_t *)pudp;
159                 goto set_the_pte;
160         }
161         pmdp = pmd_alloc(&init_mm, pudp, ea);
162         if (!pmdp)
163                 return -ENOMEM;
164         if (map_page_size == PMD_SIZE) {
165                 ptep = pmdp_ptep(pmdp);
166                 goto set_the_pte;
167         }
168         ptep = pte_alloc_kernel(pmdp, ea);
169         if (!ptep)
170                 return -ENOMEM;
171
172 set_the_pte:
173         set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
174         asm volatile("ptesync": : :"memory");
175         return 0;
176 }
177
178 int radix__map_kernel_page(unsigned long ea, unsigned long pa,
179                           pgprot_t flags,
180                           unsigned int map_page_size)
181 {
182         return __map_kernel_page(ea, pa, flags, map_page_size, -1, 0, 0);
183 }
184
185 #ifdef CONFIG_STRICT_KERNEL_RWX
186 static void radix__change_memory_range(unsigned long start, unsigned long end,
187                                        unsigned long clear)
188 {
189         unsigned long idx;
190         pgd_t *pgdp;
191         p4d_t *p4dp;
192         pud_t *pudp;
193         pmd_t *pmdp;
194         pte_t *ptep;
195
196         start = ALIGN_DOWN(start, PAGE_SIZE);
197         end = PAGE_ALIGN(end); // aligns up
198
199         pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
200                  start, end, clear);
201
202         for (idx = start; idx < end; idx += PAGE_SIZE) {
203                 pgdp = pgd_offset_k(idx);
204                 p4dp = p4d_offset(pgdp, idx);
205                 pudp = pud_alloc(&init_mm, p4dp, idx);
206                 if (!pudp)
207                         continue;
208                 if (pud_is_leaf(*pudp)) {
209                         ptep = (pte_t *)pudp;
210                         goto update_the_pte;
211                 }
212                 pmdp = pmd_alloc(&init_mm, pudp, idx);
213                 if (!pmdp)
214                         continue;
215                 if (pmd_is_leaf(*pmdp)) {
216                         ptep = pmdp_ptep(pmdp);
217                         goto update_the_pte;
218                 }
219                 ptep = pte_alloc_kernel(pmdp, idx);
220                 if (!ptep)
221                         continue;
222 update_the_pte:
223                 radix__pte_update(&init_mm, idx, ptep, clear, 0, 0);
224         }
225
226         radix__flush_tlb_kernel_range(start, end);
227 }
228
229 void radix__mark_rodata_ro(void)
230 {
231         unsigned long start, end;
232
233         start = (unsigned long)_stext;
234         end = (unsigned long)__end_rodata;
235
236         radix__change_memory_range(start, end, _PAGE_WRITE);
237
238         for (start = PAGE_OFFSET; start < (unsigned long)_stext; start += PAGE_SIZE) {
239                 end = start + PAGE_SIZE;
240                 if (overlaps_interrupt_vector_text(start, end))
241                         radix__change_memory_range(start, end, _PAGE_WRITE);
242                 else
243                         break;
244         }
245 }
246
247 void radix__mark_initmem_nx(void)
248 {
249         unsigned long start = (unsigned long)__init_begin;
250         unsigned long end = (unsigned long)__init_end;
251
252         radix__change_memory_range(start, end, _PAGE_EXEC);
253 }
254 #endif /* CONFIG_STRICT_KERNEL_RWX */
255
256 static inline void __meminit
257 print_mapping(unsigned long start, unsigned long end, unsigned long size, bool exec)
258 {
259         char buf[10];
260
261         if (end <= start)
262                 return;
263
264         string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf));
265
266         pr_info("Mapped 0x%016lx-0x%016lx with %s pages%s\n", start, end, buf,
267                 exec ? " (exec)" : "");
268 }
269
270 static unsigned long next_boundary(unsigned long addr, unsigned long end)
271 {
272 #ifdef CONFIG_STRICT_KERNEL_RWX
273         unsigned long stext_phys;
274
275         stext_phys = __pa_symbol(_stext);
276
277         // Relocatable kernel running at non-zero real address
278         if (stext_phys != 0) {
279                 // The end of interrupts code at zero is a rodata boundary
280                 unsigned long end_intr = __pa_symbol(__end_interrupts) - stext_phys;
281                 if (addr < end_intr)
282                         return end_intr;
283
284                 // Start of relocated kernel text is a rodata boundary
285                 if (addr < stext_phys)
286                         return stext_phys;
287         }
288
289         if (addr < __pa_symbol(__srwx_boundary))
290                 return __pa_symbol(__srwx_boundary);
291 #endif
292         return end;
293 }
294
295 static int __meminit create_physical_mapping(unsigned long start,
296                                              unsigned long end,
297                                              int nid, pgprot_t _prot)
298 {
299         unsigned long vaddr, addr, mapping_size = 0;
300         bool prev_exec, exec = false;
301         pgprot_t prot;
302         int psize;
303         unsigned long max_mapping_size = radix_mem_block_size;
304
305         if (debug_pagealloc_enabled_or_kfence())
306                 max_mapping_size = PAGE_SIZE;
307
308         start = ALIGN(start, PAGE_SIZE);
309         end   = ALIGN_DOWN(end, PAGE_SIZE);
310         for (addr = start; addr < end; addr += mapping_size) {
311                 unsigned long gap, previous_size;
312                 int rc;
313
314                 gap = next_boundary(addr, end) - addr;
315                 if (gap > max_mapping_size)
316                         gap = max_mapping_size;
317                 previous_size = mapping_size;
318                 prev_exec = exec;
319
320                 if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
321                     mmu_psize_defs[MMU_PAGE_1G].shift) {
322                         mapping_size = PUD_SIZE;
323                         psize = MMU_PAGE_1G;
324                 } else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
325                            mmu_psize_defs[MMU_PAGE_2M].shift) {
326                         mapping_size = PMD_SIZE;
327                         psize = MMU_PAGE_2M;
328                 } else {
329                         mapping_size = PAGE_SIZE;
330                         psize = mmu_virtual_psize;
331                 }
332
333                 vaddr = (unsigned long)__va(addr);
334
335                 if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
336                     overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size)) {
337                         prot = PAGE_KERNEL_X;
338                         exec = true;
339                 } else {
340                         prot = _prot;
341                         exec = false;
342                 }
343
344                 if (mapping_size != previous_size || exec != prev_exec) {
345                         print_mapping(start, addr, previous_size, prev_exec);
346                         start = addr;
347                 }
348
349                 rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end);
350                 if (rc)
351                         return rc;
352
353                 update_page_count(psize, 1);
354         }
355
356         print_mapping(start, addr, mapping_size, exec);
357         return 0;
358 }
359
360 static void __init radix_init_pgtable(void)
361 {
362         unsigned long rts_field;
363         phys_addr_t start, end;
364         u64 i;
365
366         /* We don't support slb for radix */
367         slb_set_size(0);
368
369         /*
370          * Create the linear mapping
371          */
372         for_each_mem_range(i, &start, &end) {
373                 /*
374                  * The memblock allocator  is up at this point, so the
375                  * page tables will be allocated within the range. No
376                  * need or a node (which we don't have yet).
377                  */
378
379                 if (end >= RADIX_VMALLOC_START) {
380                         pr_warn("Outside the supported range\n");
381                         continue;
382                 }
383
384                 WARN_ON(create_physical_mapping(start, end,
385                                                 -1, PAGE_KERNEL));
386         }
387
388         if (!cpu_has_feature(CPU_FTR_HVMODE) &&
389                         cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
390                 /*
391                  * Older versions of KVM on these machines prefer if the
392                  * guest only uses the low 19 PID bits.
393                  */
394                 mmu_pid_bits = 19;
395         }
396         mmu_base_pid = 1;
397
398         /*
399          * Allocate Partition table and process table for the
400          * host.
401          */
402         BUG_ON(PRTB_SIZE_SHIFT > 36);
403         process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT, -1, 0, 0);
404         /*
405          * Fill in the process table.
406          */
407         rts_field = radix__get_tree_size();
408         process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
409
410         /*
411          * The init_mm context is given the first available (non-zero) PID,
412          * which is the "guard PID" and contains no page table. PIDR should
413          * never be set to zero because that duplicates the kernel address
414          * space at the 0x0... offset (quadrant 0)!
415          *
416          * An arbitrary PID that may later be allocated by the PID allocator
417          * for userspace processes must not be used either, because that
418          * would cause stale user mappings for that PID on CPUs outside of
419          * the TLB invalidation scheme (because it won't be in mm_cpumask).
420          *
421          * So permanently carve out one PID for the purpose of a guard PID.
422          */
423         init_mm.context.id = mmu_base_pid;
424         mmu_base_pid++;
425 }
426
427 static void __init radix_init_partition_table(void)
428 {
429         unsigned long rts_field, dw0, dw1;
430
431         mmu_partition_table_init();
432         rts_field = radix__get_tree_size();
433         dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
434         dw1 = __pa(process_tb) | (PRTB_SIZE_SHIFT - 12) | PATB_GR;
435         mmu_partition_table_set_entry(0, dw0, dw1, false);
436
437         pr_info("Initializing Radix MMU\n");
438 }
439
440 static int __init get_idx_from_shift(unsigned int shift)
441 {
442         int idx = -1;
443
444         switch (shift) {
445         case 0xc:
446                 idx = MMU_PAGE_4K;
447                 break;
448         case 0x10:
449                 idx = MMU_PAGE_64K;
450                 break;
451         case 0x15:
452                 idx = MMU_PAGE_2M;
453                 break;
454         case 0x1e:
455                 idx = MMU_PAGE_1G;
456                 break;
457         }
458         return idx;
459 }
460
461 static int __init radix_dt_scan_page_sizes(unsigned long node,
462                                            const char *uname, int depth,
463                                            void *data)
464 {
465         int size = 0;
466         int shift, idx;
467         unsigned int ap;
468         const __be32 *prop;
469         const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
470
471         /* We are scanning "cpu" nodes only */
472         if (type == NULL || strcmp(type, "cpu") != 0)
473                 return 0;
474
475         /* Grab page size encodings */
476         prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
477         if (!prop)
478                 return 0;
479
480         pr_info("Page sizes from device-tree:\n");
481         for (; size >= 4; size -= 4, ++prop) {
482
483                 struct mmu_psize_def *def;
484
485                 /* top 3 bit is AP encoding */
486                 shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
487                 ap = be32_to_cpu(prop[0]) >> 29;
488                 pr_info("Page size shift = %d AP=0x%x\n", shift, ap);
489
490                 idx = get_idx_from_shift(shift);
491                 if (idx < 0)
492                         continue;
493
494                 def = &mmu_psize_defs[idx];
495                 def->shift = shift;
496                 def->ap  = ap;
497                 def->h_rpt_pgsize = psize_to_rpti_pgsize(idx);
498         }
499
500         /* needed ? */
501         cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
502         return 1;
503 }
504
505 #ifdef CONFIG_MEMORY_HOTPLUG
506 static int __init probe_memory_block_size(unsigned long node, const char *uname, int
507                                           depth, void *data)
508 {
509         unsigned long *mem_block_size = (unsigned long *)data;
510         const __be32 *prop;
511         int len;
512
513         if (depth != 1)
514                 return 0;
515
516         if (strcmp(uname, "ibm,dynamic-reconfiguration-memory"))
517                 return 0;
518
519         prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len);
520
521         if (!prop || len < dt_root_size_cells * sizeof(__be32))
522                 /*
523                  * Nothing in the device tree
524                  */
525                 *mem_block_size = MIN_MEMORY_BLOCK_SIZE;
526         else
527                 *mem_block_size = of_read_number(prop, dt_root_size_cells);
528         return 1;
529 }
530
531 static unsigned long __init radix_memory_block_size(void)
532 {
533         unsigned long mem_block_size = MIN_MEMORY_BLOCK_SIZE;
534
535         /*
536          * OPAL firmware feature is set by now. Hence we are ok
537          * to test OPAL feature.
538          */
539         if (firmware_has_feature(FW_FEATURE_OPAL))
540                 mem_block_size = 1UL * 1024 * 1024 * 1024;
541         else
542                 of_scan_flat_dt(probe_memory_block_size, &mem_block_size);
543
544         return mem_block_size;
545 }
546
547 #else   /* CONFIG_MEMORY_HOTPLUG */
548
549 static unsigned long __init radix_memory_block_size(void)
550 {
551         return 1UL * 1024 * 1024 * 1024;
552 }
553
554 #endif /* CONFIG_MEMORY_HOTPLUG */
555
556
557 void __init radix__early_init_devtree(void)
558 {
559         int rc;
560
561         /*
562          * Try to find the available page sizes in the device-tree
563          */
564         rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
565         if (!rc) {
566                 /*
567                  * No page size details found in device tree.
568                  * Let's assume we have page 4k and 64k support
569                  */
570                 mmu_psize_defs[MMU_PAGE_4K].shift = 12;
571                 mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
572                 mmu_psize_defs[MMU_PAGE_4K].h_rpt_pgsize =
573                         psize_to_rpti_pgsize(MMU_PAGE_4K);
574
575                 mmu_psize_defs[MMU_PAGE_64K].shift = 16;
576                 mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
577                 mmu_psize_defs[MMU_PAGE_64K].h_rpt_pgsize =
578                         psize_to_rpti_pgsize(MMU_PAGE_64K);
579         }
580
581         /*
582          * Max mapping size used when mapping pages. We don't use
583          * ppc_md.memory_block_size() here because this get called
584          * early and we don't have machine probe called yet. Also
585          * the pseries implementation only check for ibm,lmb-size.
586          * All hypervisor supporting radix do expose that device
587          * tree node.
588          */
589         radix_mem_block_size = radix_memory_block_size();
590         return;
591 }
592
593 void __init radix__early_init_mmu(void)
594 {
595         unsigned long lpcr;
596
597 #ifdef CONFIG_PPC_64S_HASH_MMU
598 #ifdef CONFIG_PPC_64K_PAGES
599         /* PAGE_SIZE mappings */
600         mmu_virtual_psize = MMU_PAGE_64K;
601 #else
602         mmu_virtual_psize = MMU_PAGE_4K;
603 #endif
604
605 #ifdef CONFIG_SPARSEMEM_VMEMMAP
606         /* vmemmap mapping */
607         if (mmu_psize_defs[MMU_PAGE_2M].shift) {
608                 /*
609                  * map vmemmap using 2M if available
610                  */
611                 mmu_vmemmap_psize = MMU_PAGE_2M;
612         } else
613                 mmu_vmemmap_psize = mmu_virtual_psize;
614 #endif
615 #endif
616         /*
617          * initialize page table size
618          */
619         __pte_index_size = RADIX_PTE_INDEX_SIZE;
620         __pmd_index_size = RADIX_PMD_INDEX_SIZE;
621         __pud_index_size = RADIX_PUD_INDEX_SIZE;
622         __pgd_index_size = RADIX_PGD_INDEX_SIZE;
623         __pud_cache_index = RADIX_PUD_INDEX_SIZE;
624         __pte_table_size = RADIX_PTE_TABLE_SIZE;
625         __pmd_table_size = RADIX_PMD_TABLE_SIZE;
626         __pud_table_size = RADIX_PUD_TABLE_SIZE;
627         __pgd_table_size = RADIX_PGD_TABLE_SIZE;
628
629         __pmd_val_bits = RADIX_PMD_VAL_BITS;
630         __pud_val_bits = RADIX_PUD_VAL_BITS;
631         __pgd_val_bits = RADIX_PGD_VAL_BITS;
632
633         __kernel_virt_start = RADIX_KERN_VIRT_START;
634         __vmalloc_start = RADIX_VMALLOC_START;
635         __vmalloc_end = RADIX_VMALLOC_END;
636         __kernel_io_start = RADIX_KERN_IO_START;
637         __kernel_io_end = RADIX_KERN_IO_END;
638         vmemmap = (struct page *)RADIX_VMEMMAP_START;
639         ioremap_bot = IOREMAP_BASE;
640
641 #ifdef CONFIG_PCI
642         pci_io_base = ISA_IO_BASE;
643 #endif
644         __pte_frag_nr = RADIX_PTE_FRAG_NR;
645         __pte_frag_size_shift = RADIX_PTE_FRAG_SIZE_SHIFT;
646         __pmd_frag_nr = RADIX_PMD_FRAG_NR;
647         __pmd_frag_size_shift = RADIX_PMD_FRAG_SIZE_SHIFT;
648
649         radix_init_pgtable();
650
651         if (!firmware_has_feature(FW_FEATURE_LPAR)) {
652                 lpcr = mfspr(SPRN_LPCR);
653                 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
654                 radix_init_partition_table();
655         } else {
656                 radix_init_pseries();
657         }
658
659         memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
660
661         /* Switch to the guard PID before turning on MMU */
662         radix__switch_mmu_context(NULL, &init_mm);
663         tlbiel_all();
664 }
665
666 void radix__early_init_mmu_secondary(void)
667 {
668         unsigned long lpcr;
669         /*
670          * update partition table control register and UPRT
671          */
672         if (!firmware_has_feature(FW_FEATURE_LPAR)) {
673                 lpcr = mfspr(SPRN_LPCR);
674                 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
675
676                 set_ptcr_when_no_uv(__pa(partition_tb) |
677                                     (PATB_SIZE_SHIFT - 12));
678         }
679
680         radix__switch_mmu_context(NULL, &init_mm);
681         tlbiel_all();
682
683         /* Make sure userspace can't change the AMR */
684         mtspr(SPRN_UAMOR, 0);
685 }
686
687 /* Called during kexec sequence with MMU off */
688 notrace void radix__mmu_cleanup_all(void)
689 {
690         unsigned long lpcr;
691
692         if (!firmware_has_feature(FW_FEATURE_LPAR)) {
693                 lpcr = mfspr(SPRN_LPCR);
694                 mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
695                 set_ptcr_when_no_uv(0);
696                 powernv_set_nmmu_ptcr(0);
697                 radix__flush_tlb_all();
698         }
699 }
700
701 #ifdef CONFIG_MEMORY_HOTPLUG
702 static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
703 {
704         pte_t *pte;
705         int i;
706
707         for (i = 0; i < PTRS_PER_PTE; i++) {
708                 pte = pte_start + i;
709                 if (!pte_none(*pte))
710                         return;
711         }
712
713         pte_free_kernel(&init_mm, pte_start);
714         pmd_clear(pmd);
715 }
716
717 static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
718 {
719         pmd_t *pmd;
720         int i;
721
722         for (i = 0; i < PTRS_PER_PMD; i++) {
723                 pmd = pmd_start + i;
724                 if (!pmd_none(*pmd))
725                         return;
726         }
727
728         pmd_free(&init_mm, pmd_start);
729         pud_clear(pud);
730 }
731
732 static void free_pud_table(pud_t *pud_start, p4d_t *p4d)
733 {
734         pud_t *pud;
735         int i;
736
737         for (i = 0; i < PTRS_PER_PUD; i++) {
738                 pud = pud_start + i;
739                 if (!pud_none(*pud))
740                         return;
741         }
742
743         pud_free(&init_mm, pud_start);
744         p4d_clear(p4d);
745 }
746
747 #ifdef CONFIG_SPARSEMEM_VMEMMAP
748 static bool __meminit vmemmap_pmd_is_unused(unsigned long addr, unsigned long end)
749 {
750         unsigned long start = ALIGN_DOWN(addr, PMD_SIZE);
751
752         return !vmemmap_populated(start, PMD_SIZE);
753 }
754
755 static bool __meminit vmemmap_page_is_unused(unsigned long addr, unsigned long end)
756 {
757         unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
758
759         return !vmemmap_populated(start, PAGE_SIZE);
760
761 }
762 #endif
763
764 static void __meminit free_vmemmap_pages(struct page *page,
765                                          struct vmem_altmap *altmap,
766                                          int order)
767 {
768         unsigned int nr_pages = 1 << order;
769
770         if (altmap) {
771                 unsigned long alt_start, alt_end;
772                 unsigned long base_pfn = page_to_pfn(page);
773
774                 /*
775                  * with 2M vmemmap mmaping we can have things setup
776                  * such that even though atlmap is specified we never
777                  * used altmap.
778                  */
779                 alt_start = altmap->base_pfn;
780                 alt_end = altmap->base_pfn + altmap->reserve + altmap->free;
781
782                 if (base_pfn >= alt_start && base_pfn < alt_end) {
783                         vmem_altmap_free(altmap, nr_pages);
784                         return;
785                 }
786         }
787
788         if (PageReserved(page)) {
789                 /* allocated from memblock */
790                 while (nr_pages--)
791                         free_reserved_page(page++);
792         } else
793                 free_pages((unsigned long)page_address(page), order);
794 }
795
796 static void __meminit remove_pte_table(pte_t *pte_start, unsigned long addr,
797                                        unsigned long end, bool direct,
798                                        struct vmem_altmap *altmap)
799 {
800         unsigned long next, pages = 0;
801         pte_t *pte;
802
803         pte = pte_start + pte_index(addr);
804         for (; addr < end; addr = next, pte++) {
805                 next = (addr + PAGE_SIZE) & PAGE_MASK;
806                 if (next > end)
807                         next = end;
808
809                 if (!pte_present(*pte))
810                         continue;
811
812                 if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) {
813                         if (!direct)
814                                 free_vmemmap_pages(pte_page(*pte), altmap, 0);
815                         pte_clear(&init_mm, addr, pte);
816                         pages++;
817                 }
818 #ifdef CONFIG_SPARSEMEM_VMEMMAP
819                 else if (!direct && vmemmap_page_is_unused(addr, next)) {
820                         free_vmemmap_pages(pte_page(*pte), altmap, 0);
821                         pte_clear(&init_mm, addr, pte);
822                 }
823 #endif
824         }
825         if (direct)
826                 update_page_count(mmu_virtual_psize, -pages);
827 }
828
829 static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
830                                        unsigned long end, bool direct,
831                                        struct vmem_altmap *altmap)
832 {
833         unsigned long next, pages = 0;
834         pte_t *pte_base;
835         pmd_t *pmd;
836
837         pmd = pmd_start + pmd_index(addr);
838         for (; addr < end; addr = next, pmd++) {
839                 next = pmd_addr_end(addr, end);
840
841                 if (!pmd_present(*pmd))
842                         continue;
843
844                 if (pmd_is_leaf(*pmd)) {
845                         if (IS_ALIGNED(addr, PMD_SIZE) &&
846                             IS_ALIGNED(next, PMD_SIZE)) {
847                                 if (!direct)
848                                         free_vmemmap_pages(pmd_page(*pmd), altmap, get_order(PMD_SIZE));
849                                 pte_clear(&init_mm, addr, (pte_t *)pmd);
850                                 pages++;
851                         }
852 #ifdef CONFIG_SPARSEMEM_VMEMMAP
853                         else if (!direct && vmemmap_pmd_is_unused(addr, next)) {
854                                 free_vmemmap_pages(pmd_page(*pmd), altmap, get_order(PMD_SIZE));
855                                 pte_clear(&init_mm, addr, (pte_t *)pmd);
856                         }
857 #endif
858                         continue;
859                 }
860
861                 pte_base = (pte_t *)pmd_page_vaddr(*pmd);
862                 remove_pte_table(pte_base, addr, next, direct, altmap);
863                 free_pte_table(pte_base, pmd);
864         }
865         if (direct)
866                 update_page_count(MMU_PAGE_2M, -pages);
867 }
868
869 static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr,
870                                        unsigned long end, bool direct,
871                                        struct vmem_altmap *altmap)
872 {
873         unsigned long next, pages = 0;
874         pmd_t *pmd_base;
875         pud_t *pud;
876
877         pud = pud_start + pud_index(addr);
878         for (; addr < end; addr = next, pud++) {
879                 next = pud_addr_end(addr, end);
880
881                 if (!pud_present(*pud))
882                         continue;
883
884                 if (pud_is_leaf(*pud)) {
885                         if (!IS_ALIGNED(addr, PUD_SIZE) ||
886                             !IS_ALIGNED(next, PUD_SIZE)) {
887                                 WARN_ONCE(1, "%s: unaligned range\n", __func__);
888                                 continue;
889                         }
890                         pte_clear(&init_mm, addr, (pte_t *)pud);
891                         pages++;
892                         continue;
893                 }
894
895                 pmd_base = pud_pgtable(*pud);
896                 remove_pmd_table(pmd_base, addr, next, direct, altmap);
897                 free_pmd_table(pmd_base, pud);
898         }
899         if (direct)
900                 update_page_count(MMU_PAGE_1G, -pages);
901 }
902
903 static void __meminit
904 remove_pagetable(unsigned long start, unsigned long end, bool direct,
905                  struct vmem_altmap *altmap)
906 {
907         unsigned long addr, next;
908         pud_t *pud_base;
909         pgd_t *pgd;
910         p4d_t *p4d;
911
912         spin_lock(&init_mm.page_table_lock);
913
914         for (addr = start; addr < end; addr = next) {
915                 next = pgd_addr_end(addr, end);
916
917                 pgd = pgd_offset_k(addr);
918                 p4d = p4d_offset(pgd, addr);
919                 if (!p4d_present(*p4d))
920                         continue;
921
922                 if (p4d_is_leaf(*p4d)) {
923                         if (!IS_ALIGNED(addr, P4D_SIZE) ||
924                             !IS_ALIGNED(next, P4D_SIZE)) {
925                                 WARN_ONCE(1, "%s: unaligned range\n", __func__);
926                                 continue;
927                         }
928
929                         pte_clear(&init_mm, addr, (pte_t *)pgd);
930                         continue;
931                 }
932
933                 pud_base = p4d_pgtable(*p4d);
934                 remove_pud_table(pud_base, addr, next, direct, altmap);
935                 free_pud_table(pud_base, p4d);
936         }
937
938         spin_unlock(&init_mm.page_table_lock);
939         radix__flush_tlb_kernel_range(start, end);
940 }
941
942 int __meminit radix__create_section_mapping(unsigned long start,
943                                             unsigned long end, int nid,
944                                             pgprot_t prot)
945 {
946         if (end >= RADIX_VMALLOC_START) {
947                 pr_warn("Outside the supported range\n");
948                 return -1;
949         }
950
951         return create_physical_mapping(__pa(start), __pa(end),
952                                        nid, prot);
953 }
954
955 int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
956 {
957         remove_pagetable(start, end, true, NULL);
958         return 0;
959 }
960 #endif /* CONFIG_MEMORY_HOTPLUG */
961
962 #ifdef CONFIG_SPARSEMEM_VMEMMAP
963 static int __map_kernel_page_nid(unsigned long ea, unsigned long pa,
964                                  pgprot_t flags, unsigned int map_page_size,
965                                  int nid)
966 {
967         return __map_kernel_page(ea, pa, flags, map_page_size, nid, 0, 0);
968 }
969
970 int __meminit radix__vmemmap_create_mapping(unsigned long start,
971                                       unsigned long page_size,
972                                       unsigned long phys)
973 {
974         /* Create a PTE encoding */
975         int nid = early_pfn_to_nid(phys >> PAGE_SHIFT);
976         int ret;
977
978         if ((start + page_size) >= RADIX_VMEMMAP_END) {
979                 pr_warn("Outside the supported range\n");
980                 return -1;
981         }
982
983         ret = __map_kernel_page_nid(start, phys, PAGE_KERNEL, page_size, nid);
984         BUG_ON(ret);
985
986         return 0;
987 }
988
989
990 bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
991 {
992         if (radix_enabled())
993                 return __vmemmap_can_optimize(altmap, pgmap);
994
995         return false;
996 }
997
998 int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
999                                 unsigned long addr, unsigned long next)
1000 {
1001         int large = pmd_large(*pmdp);
1002
1003         if (large)
1004                 vmemmap_verify(pmdp_ptep(pmdp), node, addr, next);
1005
1006         return large;
1007 }
1008
1009 void __meminit vmemmap_set_pmd(pmd_t *pmdp, void *p, int node,
1010                                unsigned long addr, unsigned long next)
1011 {
1012         pte_t entry;
1013         pte_t *ptep = pmdp_ptep(pmdp);
1014
1015         VM_BUG_ON(!IS_ALIGNED(addr, PMD_SIZE));
1016         entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
1017         set_pte_at(&init_mm, addr, ptep, entry);
1018         asm volatile("ptesync": : :"memory");
1019
1020         vmemmap_verify(ptep, node, addr, next);
1021 }
1022
1023 static pte_t * __meminit radix__vmemmap_pte_populate(pmd_t *pmdp, unsigned long addr,
1024                                                      int node,
1025                                                      struct vmem_altmap *altmap,
1026                                                      struct page *reuse)
1027 {
1028         pte_t *pte = pte_offset_kernel(pmdp, addr);
1029
1030         if (pte_none(*pte)) {
1031                 pte_t entry;
1032                 void *p;
1033
1034                 if (!reuse) {
1035                         /*
1036                          * make sure we don't create altmap mappings
1037                          * covering things outside the device.
1038                          */
1039                         if (altmap && altmap_cross_boundary(altmap, addr, PAGE_SIZE))
1040                                 altmap = NULL;
1041
1042                         p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap);
1043                         if (!p && altmap)
1044                                 p = vmemmap_alloc_block_buf(PAGE_SIZE, node, NULL);
1045                         if (!p)
1046                                 return NULL;
1047                 } else {
1048                         /*
1049                          * When a PTE/PMD entry is freed from the init_mm
1050                          * there's a free_pages() call to this page allocated
1051                          * above. Thus this get_page() is paired with the
1052                          * put_page_testzero() on the freeing path.
1053                          * This can only called by certain ZONE_DEVICE path,
1054                          * and through vmemmap_populate_compound_pages() when
1055                          * slab is available.
1056                          */
1057                         get_page(reuse);
1058                         p = page_to_virt(reuse);
1059                 }
1060
1061                 VM_BUG_ON(!PAGE_ALIGNED(addr));
1062                 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
1063                 set_pte_at(&init_mm, addr, pte, entry);
1064                 asm volatile("ptesync": : :"memory");
1065         }
1066         return pte;
1067 }
1068
1069 static inline pud_t *vmemmap_pud_alloc(p4d_t *p4dp, int node,
1070                                        unsigned long address)
1071 {
1072         pud_t *pud;
1073
1074         /* All early vmemmap mapping to keep simple do it at PAGE_SIZE */
1075         if (unlikely(p4d_none(*p4dp))) {
1076                 if (unlikely(!slab_is_available())) {
1077                         pud = early_alloc_pgtable(PAGE_SIZE, node, 0, 0);
1078                         p4d_populate(&init_mm, p4dp, pud);
1079                         /* go to the pud_offset */
1080                 } else
1081                         return pud_alloc(&init_mm, p4dp, address);
1082         }
1083         return pud_offset(p4dp, address);
1084 }
1085
1086 static inline pmd_t *vmemmap_pmd_alloc(pud_t *pudp, int node,
1087                                        unsigned long address)
1088 {
1089         pmd_t *pmd;
1090
1091         /* All early vmemmap mapping to keep simple do it at PAGE_SIZE */
1092         if (unlikely(pud_none(*pudp))) {
1093                 if (unlikely(!slab_is_available())) {
1094                         pmd = early_alloc_pgtable(PAGE_SIZE, node, 0, 0);
1095                         pud_populate(&init_mm, pudp, pmd);
1096                 } else
1097                         return pmd_alloc(&init_mm, pudp, address);
1098         }
1099         return pmd_offset(pudp, address);
1100 }
1101
1102 static inline pte_t *vmemmap_pte_alloc(pmd_t *pmdp, int node,
1103                                        unsigned long address)
1104 {
1105         pte_t *pte;
1106
1107         /* All early vmemmap mapping to keep simple do it at PAGE_SIZE */
1108         if (unlikely(pmd_none(*pmdp))) {
1109                 if (unlikely(!slab_is_available())) {
1110                         pte = early_alloc_pgtable(PAGE_SIZE, node, 0, 0);
1111                         pmd_populate(&init_mm, pmdp, pte);
1112                 } else
1113                         return pte_alloc_kernel(pmdp, address);
1114         }
1115         return pte_offset_kernel(pmdp, address);
1116 }
1117
1118
1119
1120 int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, int node,
1121                                       struct vmem_altmap *altmap)
1122 {
1123         unsigned long addr;
1124         unsigned long next;
1125         pgd_t *pgd;
1126         p4d_t *p4d;
1127         pud_t *pud;
1128         pmd_t *pmd;
1129         pte_t *pte;
1130
1131         for (addr = start; addr < end; addr = next) {
1132                 next = pmd_addr_end(addr, end);
1133
1134                 pgd = pgd_offset_k(addr);
1135                 p4d = p4d_offset(pgd, addr);
1136                 pud = vmemmap_pud_alloc(p4d, node, addr);
1137                 if (!pud)
1138                         return -ENOMEM;
1139                 pmd = vmemmap_pmd_alloc(pud, node, addr);
1140                 if (!pmd)
1141                         return -ENOMEM;
1142
1143                 if (pmd_none(READ_ONCE(*pmd))) {
1144                         void *p;
1145
1146                         /*
1147                          * keep it simple by checking addr PMD_SIZE alignment
1148                          * and verifying the device boundary condition.
1149                          * For us to use a pmd mapping, both addr and pfn should
1150                          * be aligned. We skip if addr is not aligned and for
1151                          * pfn we hope we have extra area in the altmap that
1152                          * can help to find an aligned block. This can result
1153                          * in altmap block allocation failures, in which case
1154                          * we fallback to RAM for vmemmap allocation.
1155                          */
1156                         if (altmap && (!IS_ALIGNED(addr, PMD_SIZE) ||
1157                                        altmap_cross_boundary(altmap, addr, PMD_SIZE))) {
1158                                 /*
1159                                  * make sure we don't create altmap mappings
1160                                  * covering things outside the device.
1161                                  */
1162                                 goto base_mapping;
1163                         }
1164
1165                         p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
1166                         if (p) {
1167                                 vmemmap_set_pmd(pmd, p, node, addr, next);
1168                                 continue;
1169                         } else if (altmap) {
1170                                 /*
1171                                  * A vmemmap block allocation can fail due to
1172                                  * alignment requirements and we trying to align
1173                                  * things aggressively there by running out of
1174                                  * space. Try base mapping on failure.
1175                                  */
1176                                 goto base_mapping;
1177                         }
1178                 } else if (vmemmap_check_pmd(pmd, node, addr, next)) {
1179                         /*
1180                          * If a huge mapping exist due to early call to
1181                          * vmemmap_populate, let's try to use that.
1182                          */
1183                         continue;
1184                 }
1185 base_mapping:
1186                 /*
1187                  * Not able allocate higher order memory to back memmap
1188                  * or we found a pointer to pte page. Allocate base page
1189                  * size vmemmap
1190                  */
1191                 pte = vmemmap_pte_alloc(pmd, node, addr);
1192                 if (!pte)
1193                         return -ENOMEM;
1194
1195                 pte = radix__vmemmap_pte_populate(pmd, addr, node, altmap, NULL);
1196                 if (!pte)
1197                         return -ENOMEM;
1198
1199                 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
1200                 next = addr + PAGE_SIZE;
1201         }
1202         return 0;
1203 }
1204
1205 static pte_t * __meminit radix__vmemmap_populate_address(unsigned long addr, int node,
1206                                                          struct vmem_altmap *altmap,
1207                                                          struct page *reuse)
1208 {
1209         pgd_t *pgd;
1210         p4d_t *p4d;
1211         pud_t *pud;
1212         pmd_t *pmd;
1213         pte_t *pte;
1214
1215         pgd = pgd_offset_k(addr);
1216         p4d = p4d_offset(pgd, addr);
1217         pud = vmemmap_pud_alloc(p4d, node, addr);
1218         if (!pud)
1219                 return NULL;
1220         pmd = vmemmap_pmd_alloc(pud, node, addr);
1221         if (!pmd)
1222                 return NULL;
1223         if (pmd_leaf(*pmd))
1224                 /*
1225                  * The second page is mapped as a hugepage due to a nearby request.
1226                  * Force our mapping to page size without deduplication
1227                  */
1228                 return NULL;
1229         pte = vmemmap_pte_alloc(pmd, node, addr);
1230         if (!pte)
1231                 return NULL;
1232         radix__vmemmap_pte_populate(pmd, addr, node, NULL, NULL);
1233         vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
1234
1235         return pte;
1236 }
1237
1238 static pte_t * __meminit vmemmap_compound_tail_page(unsigned long addr,
1239                                                     unsigned long pfn_offset, int node)
1240 {
1241         pgd_t *pgd;
1242         p4d_t *p4d;
1243         pud_t *pud;
1244         pmd_t *pmd;
1245         pte_t *pte;
1246         unsigned long map_addr;
1247
1248         /* the second vmemmap page which we use for duplication */
1249         map_addr = addr - pfn_offset * sizeof(struct page) + PAGE_SIZE;
1250         pgd = pgd_offset_k(map_addr);
1251         p4d = p4d_offset(pgd, map_addr);
1252         pud = vmemmap_pud_alloc(p4d, node, map_addr);
1253         if (!pud)
1254                 return NULL;
1255         pmd = vmemmap_pmd_alloc(pud, node, map_addr);
1256         if (!pmd)
1257                 return NULL;
1258         if (pmd_leaf(*pmd))
1259                 /*
1260                  * The second page is mapped as a hugepage due to a nearby request.
1261                  * Force our mapping to page size without deduplication
1262                  */
1263                 return NULL;
1264         pte = vmemmap_pte_alloc(pmd, node, map_addr);
1265         if (!pte)
1266                 return NULL;
1267         /*
1268          * Check if there exist a mapping to the left
1269          */
1270         if (pte_none(*pte)) {
1271                 /*
1272                  * Populate the head page vmemmap page.
1273                  * It can fall in different pmd, hence
1274                  * vmemmap_populate_address()
1275                  */
1276                 pte = radix__vmemmap_populate_address(map_addr - PAGE_SIZE, node, NULL, NULL);
1277                 if (!pte)
1278                         return NULL;
1279                 /*
1280                  * Populate the tail pages vmemmap page
1281                  */
1282                 pte = radix__vmemmap_pte_populate(pmd, map_addr, node, NULL, NULL);
1283                 if (!pte)
1284                         return NULL;
1285                 vmemmap_verify(pte, node, map_addr, map_addr + PAGE_SIZE);
1286                 return pte;
1287         }
1288         return pte;
1289 }
1290
1291 int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn,
1292                                               unsigned long start,
1293                                               unsigned long end, int node,
1294                                               struct dev_pagemap *pgmap)
1295 {
1296         /*
1297          * we want to map things as base page size mapping so that
1298          * we can save space in vmemmap. We could have huge mapping
1299          * covering out both edges.
1300          */
1301         unsigned long addr;
1302         unsigned long addr_pfn = start_pfn;
1303         unsigned long next;
1304         pgd_t *pgd;
1305         p4d_t *p4d;
1306         pud_t *pud;
1307         pmd_t *pmd;
1308         pte_t *pte;
1309
1310         for (addr = start; addr < end; addr = next) {
1311
1312                 pgd = pgd_offset_k(addr);
1313                 p4d = p4d_offset(pgd, addr);
1314                 pud = vmemmap_pud_alloc(p4d, node, addr);
1315                 if (!pud)
1316                         return -ENOMEM;
1317                 pmd = vmemmap_pmd_alloc(pud, node, addr);
1318                 if (!pmd)
1319                         return -ENOMEM;
1320
1321                 if (pmd_leaf(READ_ONCE(*pmd))) {
1322                         /* existing huge mapping. Skip the range */
1323                         addr_pfn += (PMD_SIZE >> PAGE_SHIFT);
1324                         next = pmd_addr_end(addr, end);
1325                         continue;
1326                 }
1327                 pte = vmemmap_pte_alloc(pmd, node, addr);
1328                 if (!pte)
1329                         return -ENOMEM;
1330                 if (!pte_none(*pte)) {
1331                         /*
1332                          * This could be because we already have a compound
1333                          * page whose VMEMMAP_RESERVE_NR pages were mapped and
1334                          * this request fall in those pages.
1335                          */
1336                         addr_pfn += 1;
1337                         next = addr + PAGE_SIZE;
1338                         continue;
1339                 } else {
1340                         unsigned long nr_pages = pgmap_vmemmap_nr(pgmap);
1341                         unsigned long pfn_offset = addr_pfn - ALIGN_DOWN(addr_pfn, nr_pages);
1342                         pte_t *tail_page_pte;
1343
1344                         /*
1345                          * if the address is aligned to huge page size it is the
1346                          * head mapping.
1347                          */
1348                         if (pfn_offset == 0) {
1349                                 /* Populate the head page vmemmap page */
1350                                 pte = radix__vmemmap_pte_populate(pmd, addr, node, NULL, NULL);
1351                                 if (!pte)
1352                                         return -ENOMEM;
1353                                 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
1354
1355                                 /*
1356                                  * Populate the tail pages vmemmap page
1357                                  * It can fall in different pmd, hence
1358                                  * vmemmap_populate_address()
1359                                  */
1360                                 pte = radix__vmemmap_populate_address(addr + PAGE_SIZE, node, NULL, NULL);
1361                                 if (!pte)
1362                                         return -ENOMEM;
1363
1364                                 addr_pfn += 2;
1365                                 next = addr + 2 * PAGE_SIZE;
1366                                 continue;
1367                         }
1368                         /*
1369                          * get the 2nd mapping details
1370                          * Also create it if that doesn't exist
1371                          */
1372                         tail_page_pte = vmemmap_compound_tail_page(addr, pfn_offset, node);
1373                         if (!tail_page_pte) {
1374
1375                                 pte = radix__vmemmap_pte_populate(pmd, addr, node, NULL, NULL);
1376                                 if (!pte)
1377                                         return -ENOMEM;
1378                                 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
1379
1380                                 addr_pfn += 1;
1381                                 next = addr + PAGE_SIZE;
1382                                 continue;
1383                         }
1384
1385                         pte = radix__vmemmap_pte_populate(pmd, addr, node, NULL, pte_page(*tail_page_pte));
1386                         if (!pte)
1387                                 return -ENOMEM;
1388                         vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
1389
1390                         addr_pfn += 1;
1391                         next = addr + PAGE_SIZE;
1392                         continue;
1393                 }
1394         }
1395         return 0;
1396 }
1397
1398
1399 #ifdef CONFIG_MEMORY_HOTPLUG
1400 void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
1401 {
1402         remove_pagetable(start, start + page_size, true, NULL);
1403 }
1404
1405 void __ref radix__vmemmap_free(unsigned long start, unsigned long end,
1406                                struct vmem_altmap *altmap)
1407 {
1408         remove_pagetable(start, end, false, altmap);
1409 }
1410 #endif
1411 #endif
1412
1413 #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
1414 void radix__kernel_map_pages(struct page *page, int numpages, int enable)
1415 {
1416         unsigned long addr;
1417
1418         addr = (unsigned long)page_address(page);
1419
1420         if (enable)
1421                 set_memory_p(addr, numpages);
1422         else
1423                 set_memory_np(addr, numpages);
1424 }
1425 #endif
1426
1427 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1428
1429 unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
1430                                   pmd_t *pmdp, unsigned long clr,
1431                                   unsigned long set)
1432 {
1433         unsigned long old;
1434
1435 #ifdef CONFIG_DEBUG_VM
1436         WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
1437         assert_spin_locked(pmd_lockptr(mm, pmdp));
1438 #endif
1439
1440         old = radix__pte_update(mm, addr, pmdp_ptep(pmdp), clr, set, 1);
1441         trace_hugepage_update_pmd(addr, old, clr, set);
1442
1443         return old;
1444 }
1445
1446 unsigned long radix__pud_hugepage_update(struct mm_struct *mm, unsigned long addr,
1447                                          pud_t *pudp, unsigned long clr,
1448                                          unsigned long set)
1449 {
1450         unsigned long old;
1451
1452 #ifdef CONFIG_DEBUG_VM
1453         WARN_ON(!pud_devmap(*pudp));
1454         assert_spin_locked(pud_lockptr(mm, pudp));
1455 #endif
1456
1457         old = radix__pte_update(mm, addr, pudp_ptep(pudp), clr, set, 1);
1458         trace_hugepage_update_pud(addr, old, clr, set);
1459
1460         return old;
1461 }
1462
1463 pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
1464                         pmd_t *pmdp)
1465
1466 {
1467         pmd_t pmd;
1468
1469         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1470         VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
1471         VM_BUG_ON(pmd_devmap(*pmdp));
1472         /*
1473          * khugepaged calls this for normal pmd
1474          */
1475         pmd = *pmdp;
1476         pmd_clear(pmdp);
1477
1478         radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
1479
1480         return pmd;
1481 }
1482
1483 /*
1484  * For us pgtable_t is pte_t *. Inorder to save the deposisted
1485  * page table, we consider the allocated page table as a list
1486  * head. On withdraw we need to make sure we zero out the used
1487  * list_head memory area.
1488  */
1489 void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1490                                  pgtable_t pgtable)
1491 {
1492         struct list_head *lh = (struct list_head *) pgtable;
1493
1494         assert_spin_locked(pmd_lockptr(mm, pmdp));
1495
1496         /* FIFO */
1497         if (!pmd_huge_pte(mm, pmdp))
1498                 INIT_LIST_HEAD(lh);
1499         else
1500                 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
1501         pmd_huge_pte(mm, pmdp) = pgtable;
1502 }
1503
1504 pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
1505 {
1506         pte_t *ptep;
1507         pgtable_t pgtable;
1508         struct list_head *lh;
1509
1510         assert_spin_locked(pmd_lockptr(mm, pmdp));
1511
1512         /* FIFO */
1513         pgtable = pmd_huge_pte(mm, pmdp);
1514         lh = (struct list_head *) pgtable;
1515         if (list_empty(lh))
1516                 pmd_huge_pte(mm, pmdp) = NULL;
1517         else {
1518                 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
1519                 list_del(lh);
1520         }
1521         ptep = (pte_t *) pgtable;
1522         *ptep = __pte(0);
1523         ptep++;
1524         *ptep = __pte(0);
1525         return pgtable;
1526 }
1527
1528 pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
1529                                      unsigned long addr, pmd_t *pmdp)
1530 {
1531         pmd_t old_pmd;
1532         unsigned long old;
1533
1534         old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
1535         old_pmd = __pmd(old);
1536         return old_pmd;
1537 }
1538
1539 pud_t radix__pudp_huge_get_and_clear(struct mm_struct *mm,
1540                                      unsigned long addr, pud_t *pudp)
1541 {
1542         pud_t old_pud;
1543         unsigned long old;
1544
1545         old = radix__pud_hugepage_update(mm, addr, pudp, ~0UL, 0);
1546         old_pud = __pud(old);
1547         return old_pud;
1548 }
1549
1550 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1551
1552 void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
1553                                   pte_t entry, unsigned long address, int psize)
1554 {
1555         struct mm_struct *mm = vma->vm_mm;
1556         unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_SOFT_DIRTY |
1557                                               _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
1558
1559         unsigned long change = pte_val(entry) ^ pte_val(*ptep);
1560         /*
1561          * On POWER9, the NMMU is not able to relax PTE access permissions
1562          * for a translation with a TLB. The PTE must be invalidated, TLB
1563          * flushed before the new PTE is installed.
1564          *
1565          * This only needs to be done for radix, because hash translation does
1566          * flush when updating the linux pte (and we don't support NMMU
1567          * accelerators on HPT on POWER9 anyway XXX: do we?).
1568          *
1569          * POWER10 (and P9P) NMMU does behave as per ISA.
1570          */
1571         if (!cpu_has_feature(CPU_FTR_ARCH_31) && (change & _PAGE_RW) &&
1572             atomic_read(&mm->context.copros) > 0) {
1573                 unsigned long old_pte, new_pte;
1574
1575                 old_pte = __radix_pte_update(ptep, _PAGE_PRESENT, _PAGE_INVALID);
1576                 new_pte = old_pte | set;
1577                 radix__flush_tlb_page_psize(mm, address, psize);
1578                 __radix_pte_update(ptep, _PAGE_INVALID, new_pte);
1579         } else {
1580                 __radix_pte_update(ptep, 0, set);
1581                 /*
1582                  * Book3S does not require a TLB flush when relaxing access
1583                  * restrictions when the address space (modulo the POWER9 nest
1584                  * MMU issue above) because the MMU will reload the PTE after
1585                  * taking an access fault, as defined by the architecture. See
1586                  * "Setting a Reference or Change Bit or Upgrading Access
1587                  *  Authority (PTE Subject to Atomic Hardware Updates)" in
1588                  *  Power ISA Version 3.1B.
1589                  */
1590         }
1591         /* See ptesync comment in radix__set_pte_at */
1592 }
1593
1594 void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
1595                                     unsigned long addr, pte_t *ptep,
1596                                     pte_t old_pte, pte_t pte)
1597 {
1598         struct mm_struct *mm = vma->vm_mm;
1599
1600         /*
1601          * POWER9 NMMU must flush the TLB after clearing the PTE before
1602          * installing a PTE with more relaxed access permissions, see
1603          * radix__ptep_set_access_flags.
1604          */
1605         if (!cpu_has_feature(CPU_FTR_ARCH_31) &&
1606             is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
1607             (atomic_read(&mm->context.copros) > 0))
1608                 radix__flush_tlb_page(vma, addr);
1609
1610         set_pte_at(mm, addr, ptep, pte);
1611 }
1612
1613 int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
1614 {
1615         pte_t *ptep = (pte_t *)pud;
1616         pte_t new_pud = pfn_pte(__phys_to_pfn(addr), prot);
1617
1618         if (!radix_enabled())
1619                 return 0;
1620
1621         set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pud);
1622
1623         return 1;
1624 }
1625
1626 int pud_clear_huge(pud_t *pud)
1627 {
1628         if (pud_is_leaf(*pud)) {
1629                 pud_clear(pud);
1630                 return 1;
1631         }
1632
1633         return 0;
1634 }
1635
1636 int pud_free_pmd_page(pud_t *pud, unsigned long addr)
1637 {
1638         pmd_t *pmd;
1639         int i;
1640
1641         pmd = pud_pgtable(*pud);
1642         pud_clear(pud);
1643
1644         flush_tlb_kernel_range(addr, addr + PUD_SIZE);
1645
1646         for (i = 0; i < PTRS_PER_PMD; i++) {
1647                 if (!pmd_none(pmd[i])) {
1648                         pte_t *pte;
1649                         pte = (pte_t *)pmd_page_vaddr(pmd[i]);
1650
1651                         pte_free_kernel(&init_mm, pte);
1652                 }
1653         }
1654
1655         pmd_free(&init_mm, pmd);
1656
1657         return 1;
1658 }
1659
1660 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
1661 {
1662         pte_t *ptep = (pte_t *)pmd;
1663         pte_t new_pmd = pfn_pte(__phys_to_pfn(addr), prot);
1664
1665         if (!radix_enabled())
1666                 return 0;
1667
1668         set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pmd);
1669
1670         return 1;
1671 }
1672
1673 int pmd_clear_huge(pmd_t *pmd)
1674 {
1675         if (pmd_is_leaf(*pmd)) {
1676                 pmd_clear(pmd);
1677                 return 1;
1678         }
1679
1680         return 0;
1681 }
1682
1683 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
1684 {
1685         pte_t *pte;
1686
1687         pte = (pte_t *)pmd_page_vaddr(*pmd);
1688         pmd_clear(pmd);
1689
1690         flush_tlb_kernel_range(addr, addr + PMD_SIZE);
1691
1692         pte_free_kernel(&init_mm, pte);
1693
1694         return 1;
1695 }