1 // SPDX-License-Identifier: GPL-2.0
3 * arch/sparc64/mm/init.c
5 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 #include <linux/extable.h>
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/string.h>
13 #include <linux/init.h>
14 #include <linux/memblock.h>
16 #include <linux/hugetlb.h>
17 #include <linux/initrd.h>
18 #include <linux/swap.h>
19 #include <linux/pagemap.h>
20 #include <linux/poison.h>
22 #include <linux/seq_file.h>
23 #include <linux/kprobes.h>
24 #include <linux/cache.h>
25 #include <linux/sort.h>
26 #include <linux/ioport.h>
27 #include <linux/percpu.h>
28 #include <linux/mmzone.h>
29 #include <linux/gfp.h>
33 #include <asm/pgalloc.h>
34 #include <asm/oplib.h>
35 #include <asm/iommu.h>
37 #include <linux/uaccess.h>
38 #include <asm/mmu_context.h>
39 #include <asm/tlbflush.h>
41 #include <asm/starfire.h>
43 #include <asm/spitfire.h>
44 #include <asm/sections.h>
46 #include <asm/hypervisor.h>
48 #include <asm/mdesc.h>
49 #include <asm/cpudata.h>
50 #include <asm/setup.h>
55 unsigned long kern_linear_pte_xor[4] __read_mostly;
56 static unsigned long page_cache4v_flag;
58 /* A bitmap, two bits for every 256MB of physical memory. These two
59 * bits determine what page size we use for kernel linear
60 * translations. They form an index into kern_linear_pte_xor[]. The
61 * value in the indexed slot is XOR'd with the TLB miss virtual
62 * address to form the resulting TTE. The mapping is:
69 * All sun4v chips support 256MB pages. Only SPARC-T4 and later
70 * support 2GB pages, and hopefully future cpus will support the 16GB
71 * pages as well. For slots 2 and 3, we encode a 256MB TTE xor there
72 * if these larger page sizes are not supported by the cpu.
74 * It would be nice to determine this from the machine description
75 * 'cpu' properties, but we need to have this table setup before the
76 * MDESC is initialized.
79 #ifndef CONFIG_DEBUG_PAGEALLOC
80 /* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
81 * Space is allocated for this right after the trap table in
82 * arch/sparc64/kernel/head.S
84 extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
86 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
88 static unsigned long cpu_pgsz_mask;
90 #define MAX_BANKS 1024
92 static struct linux_prom64_registers pavail[MAX_BANKS];
93 static int pavail_ents;
95 u64 numa_latency[MAX_NUMNODES][MAX_NUMNODES];
97 static int cmp_p64(const void *a, const void *b)
99 const struct linux_prom64_registers *x = a, *y = b;
101 if (x->phys_addr > y->phys_addr)
103 if (x->phys_addr < y->phys_addr)
108 static void __init read_obp_memory(const char *property,
109 struct linux_prom64_registers *regs,
112 phandle node = prom_finddevice("/memory");
113 int prop_size = prom_getproplen(node, property);
116 ents = prop_size / sizeof(struct linux_prom64_registers);
117 if (ents > MAX_BANKS) {
118 prom_printf("The machine has more %s property entries than "
119 "this kernel can support (%d).\n",
120 property, MAX_BANKS);
124 ret = prom_getproperty(node, property, (char *) regs, prop_size);
126 prom_printf("Couldn't get %s property from /memory.\n",
131 /* Sanitize what we got from the firmware, by page aligning
134 for (i = 0; i < ents; i++) {
135 unsigned long base, size;
137 base = regs[i].phys_addr;
138 size = regs[i].reg_size;
141 if (base & ~PAGE_MASK) {
142 unsigned long new_base = PAGE_ALIGN(base);
144 size -= new_base - base;
145 if ((long) size < 0L)
150 /* If it is empty, simply get rid of it.
151 * This simplifies the logic of the other
152 * functions that process these arrays.
154 memmove(®s[i], ®s[i + 1],
155 (ents - i - 1) * sizeof(regs[0]));
160 regs[i].phys_addr = base;
161 regs[i].reg_size = size;
166 sort(regs, ents, sizeof(struct linux_prom64_registers),
170 /* Kernel physical address base and size in bytes. */
171 unsigned long kern_base __read_mostly;
172 unsigned long kern_size __read_mostly;
174 /* Initial ramdisk setup */
175 extern unsigned long sparc_ramdisk_image64;
176 extern unsigned int sparc_ramdisk_image;
177 extern unsigned int sparc_ramdisk_size;
179 struct page *mem_map_zero __read_mostly;
180 EXPORT_SYMBOL(mem_map_zero);
182 unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
184 unsigned long sparc64_kern_pri_context __read_mostly;
185 unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
186 unsigned long sparc64_kern_sec_context __read_mostly;
188 int num_kernel_image_mappings;
190 #ifdef CONFIG_DEBUG_DCFLUSH
191 atomic_t dcpage_flushes = ATOMIC_INIT(0);
193 atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
197 inline void flush_dcache_page_impl(struct page *page)
199 BUG_ON(tlb_type == hypervisor);
200 #ifdef CONFIG_DEBUG_DCFLUSH
201 atomic_inc(&dcpage_flushes);
204 #ifdef DCACHE_ALIASING_POSSIBLE
205 __flush_dcache_page(page_address(page),
206 ((tlb_type == spitfire) &&
207 page_mapping_file(page) != NULL));
209 if (page_mapping_file(page) != NULL &&
210 tlb_type == spitfire)
211 __flush_icache_page(__pa(page_address(page)));
215 #define PG_dcache_dirty PG_arch_1
216 #define PG_dcache_cpu_shift 32UL
217 #define PG_dcache_cpu_mask \
218 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
220 #define dcache_dirty_cpu(page) \
221 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
223 static inline void set_dcache_dirty(struct page *page, int this_cpu)
225 unsigned long mask = this_cpu;
226 unsigned long non_cpu_bits;
228 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
229 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
231 __asm__ __volatile__("1:\n\t"
233 "and %%g7, %1, %%g1\n\t"
234 "or %%g1, %0, %%g1\n\t"
235 "casx [%2], %%g7, %%g1\n\t"
237 "bne,pn %%xcc, 1b\n\t"
240 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
244 static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
246 unsigned long mask = (1UL << PG_dcache_dirty);
248 __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
251 "srlx %%g7, %4, %%g1\n\t"
252 "and %%g1, %3, %%g1\n\t"
254 "bne,pn %%icc, 2f\n\t"
255 " andn %%g7, %1, %%g1\n\t"
256 "casx [%2], %%g7, %%g1\n\t"
258 "bne,pn %%xcc, 1b\n\t"
262 : "r" (cpu), "r" (mask), "r" (&page->flags),
263 "i" (PG_dcache_cpu_mask),
264 "i" (PG_dcache_cpu_shift)
268 static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
270 unsigned long tsb_addr = (unsigned long) ent;
272 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
273 tsb_addr = __pa(tsb_addr);
275 __tsb_insert(tsb_addr, tag, pte);
278 unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
280 static void flush_dcache(unsigned long pfn)
284 page = pfn_to_page(pfn);
286 unsigned long pg_flags;
288 pg_flags = page->flags;
289 if (pg_flags & (1UL << PG_dcache_dirty)) {
290 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
292 int this_cpu = get_cpu();
294 /* This is just to optimize away some function calls
298 flush_dcache_page_impl(page);
300 smp_flush_dcache_page_impl(page, cpu);
302 clear_dcache_dirty_cpu(page, cpu);
309 /* mm->context.lock must be held */
310 static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index,
311 unsigned long tsb_hash_shift, unsigned long address,
314 struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
320 tsb += ((address >> tsb_hash_shift) &
321 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
322 tag = (address >> 22UL);
323 tsb_insert(tsb, tag, tte);
326 #ifdef CONFIG_HUGETLB_PAGE
327 static int __init hugetlbpage_init(void)
329 hugetlb_add_hstate(HPAGE_64K_SHIFT - PAGE_SHIFT);
330 hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT);
331 hugetlb_add_hstate(HPAGE_256MB_SHIFT - PAGE_SHIFT);
332 hugetlb_add_hstate(HPAGE_2GB_SHIFT - PAGE_SHIFT);
337 arch_initcall(hugetlbpage_init);
339 static void __init pud_huge_patch(void)
341 struct pud_huge_patch_entry *p;
344 p = &__pud_huge_patch;
346 *(unsigned int *)addr = p->insn;
348 __asm__ __volatile__("flush %0" : : "r" (addr));
351 bool __init arch_hugetlb_valid_size(unsigned long size)
353 unsigned int hugepage_shift = ilog2(size);
354 unsigned short hv_pgsz_idx;
355 unsigned int hv_pgsz_mask;
357 switch (hugepage_shift) {
358 case HPAGE_16GB_SHIFT:
359 hv_pgsz_mask = HV_PGSZ_MASK_16GB;
360 hv_pgsz_idx = HV_PGSZ_IDX_16GB;
363 case HPAGE_2GB_SHIFT:
364 hv_pgsz_mask = HV_PGSZ_MASK_2GB;
365 hv_pgsz_idx = HV_PGSZ_IDX_2GB;
367 case HPAGE_256MB_SHIFT:
368 hv_pgsz_mask = HV_PGSZ_MASK_256MB;
369 hv_pgsz_idx = HV_PGSZ_IDX_256MB;
372 hv_pgsz_mask = HV_PGSZ_MASK_4MB;
373 hv_pgsz_idx = HV_PGSZ_IDX_4MB;
375 case HPAGE_64K_SHIFT:
376 hv_pgsz_mask = HV_PGSZ_MASK_64K;
377 hv_pgsz_idx = HV_PGSZ_IDX_64K;
383 if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U)
388 #endif /* CONFIG_HUGETLB_PAGE */
390 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
392 struct mm_struct *mm;
397 if (tlb_type != hypervisor) {
398 unsigned long pfn = pte_pfn(pte);
406 /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */
407 if (!pte_accessible(mm, pte))
410 spin_lock_irqsave(&mm->context.lock, flags);
413 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
414 if (mm->context.hugetlb_pte_count || mm->context.thp_pte_count) {
415 unsigned long hugepage_size = PAGE_SIZE;
417 if (is_vm_hugetlb_page(vma))
418 hugepage_size = huge_page_size(hstate_vma(vma));
420 if (hugepage_size >= PUD_SIZE) {
421 unsigned long mask = 0x1ffc00000UL;
423 /* Transfer bits [32:22] from address to resolve
426 pte_val(pte) &= ~mask;
427 pte_val(pte) |= (address & mask);
428 } else if (hugepage_size >= PMD_SIZE) {
429 /* We are fabricating 8MB pages using 4MB
432 pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT));
435 if (hugepage_size >= PMD_SIZE) {
436 __update_mmu_tsb_insert(mm, MM_TSB_HUGE,
437 REAL_HPAGE_SHIFT, address, pte_val(pte));
443 __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
444 address, pte_val(pte));
446 spin_unlock_irqrestore(&mm->context.lock, flags);
449 void flush_dcache_page(struct page *page)
451 struct address_space *mapping;
454 if (tlb_type == hypervisor)
457 /* Do not bother with the expensive D-cache flush if it
458 * is merely the zero page. The 'bigcore' testcase in GDB
459 * causes this case to run millions of times.
461 if (page == ZERO_PAGE(0))
464 this_cpu = get_cpu();
466 mapping = page_mapping_file(page);
467 if (mapping && !mapping_mapped(mapping)) {
468 int dirty = test_bit(PG_dcache_dirty, &page->flags);
470 int dirty_cpu = dcache_dirty_cpu(page);
472 if (dirty_cpu == this_cpu)
474 smp_flush_dcache_page_impl(page, dirty_cpu);
476 set_dcache_dirty(page, this_cpu);
478 /* We could delay the flush for the !page_mapping
479 * case too. But that case is for exec env/arg
480 * pages and those are %99 certainly going to get
481 * faulted into the tlb (and thus flushed) anyways.
483 flush_dcache_page_impl(page);
489 EXPORT_SYMBOL(flush_dcache_page);
491 void __kprobes flush_icache_range(unsigned long start, unsigned long end)
493 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
494 if (tlb_type == spitfire) {
497 /* This code only runs on Spitfire cpus so this is
498 * why we can assume _PAGE_PADDR_4U.
500 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
501 unsigned long paddr, mask = _PAGE_PADDR_4U;
503 if (kaddr >= PAGE_OFFSET)
504 paddr = kaddr & mask;
506 pgd_t *pgdp = pgd_offset_k(kaddr);
507 p4d_t *p4dp = p4d_offset(pgdp, kaddr);
508 pud_t *pudp = pud_offset(p4dp, kaddr);
509 pmd_t *pmdp = pmd_offset(pudp, kaddr);
510 pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
512 paddr = pte_val(*ptep) & mask;
514 __flush_icache_page(paddr);
518 EXPORT_SYMBOL(flush_icache_range);
520 void mmu_info(struct seq_file *m)
522 static const char *pgsz_strings[] = {
523 "8K", "64K", "512K", "4MB", "32MB",
524 "256MB", "2GB", "16GB",
528 if (tlb_type == cheetah)
529 seq_printf(m, "MMU Type\t: Cheetah\n");
530 else if (tlb_type == cheetah_plus)
531 seq_printf(m, "MMU Type\t: Cheetah+\n");
532 else if (tlb_type == spitfire)
533 seq_printf(m, "MMU Type\t: Spitfire\n");
534 else if (tlb_type == hypervisor)
535 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
537 seq_printf(m, "MMU Type\t: ???\n");
539 seq_printf(m, "MMU PGSZs\t: ");
541 for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) {
542 if (cpu_pgsz_mask & (1UL << i)) {
543 seq_printf(m, "%s%s",
544 printed ? "," : "", pgsz_strings[i]);
550 #ifdef CONFIG_DEBUG_DCFLUSH
551 seq_printf(m, "DCPageFlushes\t: %d\n",
552 atomic_read(&dcpage_flushes));
554 seq_printf(m, "DCPageFlushesXC\t: %d\n",
555 atomic_read(&dcpage_flushes_xcall));
556 #endif /* CONFIG_SMP */
557 #endif /* CONFIG_DEBUG_DCFLUSH */
560 struct linux_prom_translation prom_trans[512] __read_mostly;
561 unsigned int prom_trans_ents __read_mostly;
563 unsigned long kern_locked_tte_data;
565 /* The obp translations are saved based on 8k pagesize, since obp can
566 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
567 * HI_OBP_ADDRESS range are handled in ktlb.S.
569 static inline int in_obp_range(unsigned long vaddr)
571 return (vaddr >= LOW_OBP_ADDRESS &&
572 vaddr < HI_OBP_ADDRESS);
575 static int cmp_ptrans(const void *a, const void *b)
577 const struct linux_prom_translation *x = a, *y = b;
579 if (x->virt > y->virt)
581 if (x->virt < y->virt)
586 /* Read OBP translations property into 'prom_trans[]'. */
587 static void __init read_obp_translations(void)
589 int n, node, ents, first, last, i;
591 node = prom_finddevice("/virtual-memory");
592 n = prom_getproplen(node, "translations");
593 if (unlikely(n == 0 || n == -1)) {
594 prom_printf("prom_mappings: Couldn't get size.\n");
597 if (unlikely(n > sizeof(prom_trans))) {
598 prom_printf("prom_mappings: Size %d is too big.\n", n);
602 if ((n = prom_getproperty(node, "translations",
603 (char *)&prom_trans[0],
604 sizeof(prom_trans))) == -1) {
605 prom_printf("prom_mappings: Couldn't get property.\n");
609 n = n / sizeof(struct linux_prom_translation);
613 sort(prom_trans, ents, sizeof(struct linux_prom_translation),
616 /* Now kick out all the non-OBP entries. */
617 for (i = 0; i < ents; i++) {
618 if (in_obp_range(prom_trans[i].virt))
622 for (; i < ents; i++) {
623 if (!in_obp_range(prom_trans[i].virt))
628 for (i = 0; i < (last - first); i++) {
629 struct linux_prom_translation *src = &prom_trans[i + first];
630 struct linux_prom_translation *dest = &prom_trans[i];
634 for (; i < ents; i++) {
635 struct linux_prom_translation *dest = &prom_trans[i];
636 dest->virt = dest->size = dest->data = 0x0UL;
639 prom_trans_ents = last - first;
641 if (tlb_type == spitfire) {
642 /* Clear diag TTE bits. */
643 for (i = 0; i < prom_trans_ents; i++)
644 prom_trans[i].data &= ~0x0003fe0000000000UL;
647 /* Force execute bit on. */
648 for (i = 0; i < prom_trans_ents; i++)
649 prom_trans[i].data |= (tlb_type == hypervisor ?
650 _PAGE_EXEC_4V : _PAGE_EXEC_4U);
653 static void __init hypervisor_tlb_lock(unsigned long vaddr,
657 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
660 prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: "
661 "errors with %lx\n", vaddr, 0, pte, mmu, ret);
666 static unsigned long kern_large_tte(unsigned long paddr);
668 static void __init remap_kernel(void)
670 unsigned long phys_page, tte_vaddr, tte_data;
671 int i, tlb_ent = sparc64_highest_locked_tlbent();
673 tte_vaddr = (unsigned long) KERNBASE;
674 phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
675 tte_data = kern_large_tte(phys_page);
677 kern_locked_tte_data = tte_data;
679 /* Now lock us into the TLBs via Hypervisor or OBP. */
680 if (tlb_type == hypervisor) {
681 for (i = 0; i < num_kernel_image_mappings; i++) {
682 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
683 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
684 tte_vaddr += 0x400000;
685 tte_data += 0x400000;
688 for (i = 0; i < num_kernel_image_mappings; i++) {
689 prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
690 prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
691 tte_vaddr += 0x400000;
692 tte_data += 0x400000;
694 sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
696 if (tlb_type == cheetah_plus) {
697 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
698 CTX_CHEETAH_PLUS_NUC);
699 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
700 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
705 static void __init inherit_prom_mappings(void)
707 /* Now fixup OBP's idea about where we really are mapped. */
708 printk("Remapping the kernel... ");
713 void prom_world(int enter)
718 __asm__ __volatile__("flushw");
721 void __flush_dcache_range(unsigned long start, unsigned long end)
725 if (tlb_type == spitfire) {
728 for (va = start; va < end; va += 32) {
729 spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
733 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
736 for (va = start; va < end; va += 32)
737 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
741 "i" (ASI_DCACHE_INVALIDATE));
744 EXPORT_SYMBOL(__flush_dcache_range);
746 /* get_new_mmu_context() uses "cache + 1". */
747 DEFINE_SPINLOCK(ctx_alloc_lock);
748 unsigned long tlb_context_cache = CTX_FIRST_VERSION;
749 #define MAX_CTX_NR (1UL << CTX_NR_BITS)
750 #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
751 DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
752 DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
754 static void mmu_context_wrap(void)
756 unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
757 unsigned long new_ver, new_ctx, old_ctx;
758 struct mm_struct *mm;
761 bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
763 /* Reserve kernel context */
764 set_bit(0, mmu_context_bmap);
766 new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
767 if (unlikely(new_ver == 0))
768 new_ver = CTX_FIRST_VERSION;
769 tlb_context_cache = new_ver;
772 * Make sure that any new mm that are added into per_cpu_secondary_mm,
773 * are going to go through get_new_mmu_context() path.
778 * Updated versions to current on those CPUs that had valid secondary
781 for_each_online_cpu(cpu) {
783 * If a new mm is stored after we took this mm from the array,
784 * it will go into get_new_mmu_context() path, because we
785 * already bumped the version in tlb_context_cache.
787 mm = per_cpu(per_cpu_secondary_mm, cpu);
789 if (unlikely(!mm || mm == &init_mm))
792 old_ctx = mm->context.sparc64_ctx_val;
793 if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
794 new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
795 set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
796 mm->context.sparc64_ctx_val = new_ctx;
801 /* Caller does TLB context flushing on local CPU if necessary.
802 * The caller also ensures that CTX_VALID(mm->context) is false.
804 * We must be careful about boundary cases so that we never
805 * let the user have CTX 0 (nucleus) or we ever use a CTX
806 * version of zero (and thus NO_CONTEXT would not be caught
807 * by version mis-match tests in mmu_context.h).
809 * Always invoked with interrupts disabled.
811 void get_new_mmu_context(struct mm_struct *mm)
813 unsigned long ctx, new_ctx;
814 unsigned long orig_pgsz_bits;
816 spin_lock(&ctx_alloc_lock);
818 /* wrap might have happened, test again if our context became valid */
819 if (unlikely(CTX_VALID(mm->context)))
821 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
822 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
823 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
824 if (new_ctx >= (1 << CTX_NR_BITS)) {
825 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
826 if (new_ctx >= ctx) {
831 if (mm->context.sparc64_ctx_val)
832 cpumask_clear(mm_cpumask(mm));
833 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
834 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
835 tlb_context_cache = new_ctx;
836 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
838 spin_unlock(&ctx_alloc_lock);
841 static int numa_enabled = 1;
842 static int numa_debug;
844 static int __init early_numa(char *p)
849 if (strstr(p, "off"))
852 if (strstr(p, "debug"))
857 early_param("numa", early_numa);
859 #define numadbg(f, a...) \
860 do { if (numa_debug) \
861 printk(KERN_INFO f, ## a); \
864 static void __init find_ramdisk(unsigned long phys_base)
866 #ifdef CONFIG_BLK_DEV_INITRD
867 if (sparc_ramdisk_image || sparc_ramdisk_image64) {
868 unsigned long ramdisk_image;
870 /* Older versions of the bootloader only supported a
871 * 32-bit physical address for the ramdisk image
872 * location, stored at sparc_ramdisk_image. Newer
873 * SILO versions set sparc_ramdisk_image to zero and
874 * provide a full 64-bit physical address at
875 * sparc_ramdisk_image64.
877 ramdisk_image = sparc_ramdisk_image;
879 ramdisk_image = sparc_ramdisk_image64;
881 /* Another bootloader quirk. The bootloader normalizes
882 * the physical address to KERNBASE, so we have to
883 * factor that back out and add in the lowest valid
884 * physical page address to get the true physical address.
886 ramdisk_image -= KERNBASE;
887 ramdisk_image += phys_base;
889 numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
890 ramdisk_image, sparc_ramdisk_size);
892 initrd_start = ramdisk_image;
893 initrd_end = ramdisk_image + sparc_ramdisk_size;
895 memblock_reserve(initrd_start, sparc_ramdisk_size);
897 initrd_start += PAGE_OFFSET;
898 initrd_end += PAGE_OFFSET;
903 struct node_mem_mask {
907 static struct node_mem_mask node_masks[MAX_NUMNODES];
908 static int num_node_masks;
910 #ifdef CONFIG_NEED_MULTIPLE_NODES
912 struct mdesc_mlgroup {
919 static struct mdesc_mlgroup *mlgroups;
920 static int num_mlgroups;
922 int numa_cpu_lookup_table[NR_CPUS];
923 cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
925 struct mdesc_mblock {
928 u64 offset; /* RA-to-PA */
930 static struct mdesc_mblock *mblocks;
931 static int num_mblocks;
933 static struct mdesc_mblock * __init addr_to_mblock(unsigned long addr)
935 struct mdesc_mblock *m = NULL;
938 for (i = 0; i < num_mblocks; i++) {
941 if (addr >= m->base &&
942 addr < (m->base + m->size)) {
950 static u64 __init memblock_nid_range_sun4u(u64 start, u64 end, int *nid)
952 int prev_nid, new_nid;
954 prev_nid = NUMA_NO_NODE;
955 for ( ; start < end; start += PAGE_SIZE) {
956 for (new_nid = 0; new_nid < num_node_masks; new_nid++) {
957 struct node_mem_mask *p = &node_masks[new_nid];
959 if ((start & p->mask) == p->match) {
960 if (prev_nid == NUMA_NO_NODE)
966 if (new_nid == num_node_masks) {
968 WARN_ONCE(1, "addr[%Lx] doesn't match a NUMA node rule. Some memory will be owned by node 0.",
973 if (prev_nid != new_nid)
978 return start > end ? end : start;
981 static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
983 u64 ret_end, pa_start, m_mask, m_match, m_end;
984 struct mdesc_mblock *mblock;
987 if (tlb_type != hypervisor)
988 return memblock_nid_range_sun4u(start, end, nid);
990 mblock = addr_to_mblock(start);
992 WARN_ONCE(1, "memblock_nid_range: Can't find mblock addr[%Lx]",
1000 pa_start = start + mblock->offset;
1004 for (_nid = 0; _nid < num_node_masks; _nid++) {
1005 struct node_mem_mask *const m = &node_masks[_nid];
1007 if ((pa_start & m->mask) == m->match) {
1014 if (num_node_masks == _nid) {
1015 /* We could not find NUMA group, so default to 0, but lets
1016 * search for latency group, so we could calculate the correct
1017 * end address that we return
1021 for (i = 0; i < num_mlgroups; i++) {
1022 struct mdesc_mlgroup *const m = &mlgroups[i];
1024 if ((pa_start & m->mask) == m->match) {
1031 if (i == num_mlgroups) {
1032 WARN_ONCE(1, "memblock_nid_range: Can't find latency group addr[%Lx]",
1041 * Each latency group has match and mask, and each memory block has an
1042 * offset. An address belongs to a latency group if its address matches
1043 * the following formula: ((addr + offset) & mask) == match
1044 * It is, however, slow to check every single page if it matches a
1045 * particular latency group. As optimization we calculate end value by
1046 * using bit arithmetics.
1048 m_end = m_match + (1ul << __ffs(m_mask)) - mblock->offset;
1049 m_end += pa_start & ~((1ul << fls64(m_mask)) - 1);
1050 ret_end = m_end > end ? end : m_end;
1058 /* This must be invoked after performing all of the necessary
1059 * memblock_set_node() calls for 'nid'. We need to be able to get
1060 * correct data from get_pfn_range_for_nid().
1062 static void __init allocate_node_data(int nid)
1064 struct pglist_data *p;
1065 unsigned long start_pfn, end_pfn;
1066 #ifdef CONFIG_NEED_MULTIPLE_NODES
1068 NODE_DATA(nid) = memblock_alloc_node(sizeof(struct pglist_data),
1069 SMP_CACHE_BYTES, nid);
1070 if (!NODE_DATA(nid)) {
1071 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
1075 NODE_DATA(nid)->node_id = nid;
1080 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1081 p->node_start_pfn = start_pfn;
1082 p->node_spanned_pages = end_pfn - start_pfn;
1085 static void init_node_masks_nonnuma(void)
1087 #ifdef CONFIG_NEED_MULTIPLE_NODES
1091 numadbg("Initializing tables for non-numa.\n");
1093 node_masks[0].mask = 0;
1094 node_masks[0].match = 0;
1097 #ifdef CONFIG_NEED_MULTIPLE_NODES
1098 for (i = 0; i < NR_CPUS; i++)
1099 numa_cpu_lookup_table[i] = 0;
1101 cpumask_setall(&numa_cpumask_lookup_table[0]);
1105 #ifdef CONFIG_NEED_MULTIPLE_NODES
1106 struct pglist_data *node_data[MAX_NUMNODES];
1108 EXPORT_SYMBOL(numa_cpu_lookup_table);
1109 EXPORT_SYMBOL(numa_cpumask_lookup_table);
1110 EXPORT_SYMBOL(node_data);
1112 static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
1117 mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
1118 u64 target = mdesc_arc_target(md, arc);
1121 val = mdesc_get_property(md, target,
1122 "cfg-handle", NULL);
1123 if (val && *val == cfg_handle)
1129 static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
1132 u64 arc, candidate, best_latency = ~(u64)0;
1134 candidate = MDESC_NODE_NULL;
1135 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1136 u64 target = mdesc_arc_target(md, arc);
1137 const char *name = mdesc_node_name(md, target);
1140 if (strcmp(name, "pio-latency-group"))
1143 val = mdesc_get_property(md, target, "latency", NULL);
1147 if (*val < best_latency) {
1149 best_latency = *val;
1153 if (candidate == MDESC_NODE_NULL)
1156 return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
1159 int of_node_to_nid(struct device_node *dp)
1161 const struct linux_prom64_registers *regs;
1162 struct mdesc_handle *md;
1167 /* This is the right thing to do on currently supported
1168 * SUN4U NUMA platforms as well, as the PCI controller does
1169 * not sit behind any particular memory controller.
1174 regs = of_get_property(dp, "reg", NULL);
1178 cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
1184 mdesc_for_each_node_by_name(md, grp, "group") {
1185 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
1197 static void __init add_node_ranges(void)
1199 struct memblock_region *reg;
1200 unsigned long prev_max;
1203 prev_max = memblock.memory.max;
1205 for_each_memblock(memory, reg) {
1206 unsigned long size = reg->size;
1207 unsigned long start, end;
1211 while (start < end) {
1212 unsigned long this_end;
1215 this_end = memblock_nid_range(start, end, &nid);
1217 numadbg("Setting memblock NUMA node nid[%d] "
1218 "start[%lx] end[%lx]\n",
1219 nid, start, this_end);
1221 memblock_set_node(start, this_end - start,
1222 &memblock.memory, nid);
1223 if (memblock.memory.max != prev_max)
1224 goto memblock_resized;
1230 static int __init grab_mlgroups(struct mdesc_handle *md)
1232 unsigned long paddr;
1236 mdesc_for_each_node_by_name(md, node, "memory-latency-group")
1241 paddr = memblock_phys_alloc(count * sizeof(struct mdesc_mlgroup),
1246 mlgroups = __va(paddr);
1247 num_mlgroups = count;
1250 mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
1251 struct mdesc_mlgroup *m = &mlgroups[count++];
1256 val = mdesc_get_property(md, node, "latency", NULL);
1258 val = mdesc_get_property(md, node, "address-match", NULL);
1260 val = mdesc_get_property(md, node, "address-mask", NULL);
1263 numadbg("MLGROUP[%d]: node[%llx] latency[%llx] "
1264 "match[%llx] mask[%llx]\n",
1265 count - 1, m->node, m->latency, m->match, m->mask);
1271 static int __init grab_mblocks(struct mdesc_handle *md)
1273 unsigned long paddr;
1277 mdesc_for_each_node_by_name(md, node, "mblock")
1282 paddr = memblock_phys_alloc(count * sizeof(struct mdesc_mblock),
1287 mblocks = __va(paddr);
1288 num_mblocks = count;
1291 mdesc_for_each_node_by_name(md, node, "mblock") {
1292 struct mdesc_mblock *m = &mblocks[count++];
1295 val = mdesc_get_property(md, node, "base", NULL);
1297 val = mdesc_get_property(md, node, "size", NULL);
1299 val = mdesc_get_property(md, node,
1300 "address-congruence-offset", NULL);
1302 /* The address-congruence-offset property is optional.
1303 * Explicity zero it be identifty this.
1310 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
1311 count - 1, m->base, m->size, m->offset);
1317 static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1318 u64 grp, cpumask_t *mask)
1322 cpumask_clear(mask);
1324 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
1325 u64 target = mdesc_arc_target(md, arc);
1326 const char *name = mdesc_node_name(md, target);
1329 if (strcmp(name, "cpu"))
1331 id = mdesc_get_property(md, target, "id", NULL);
1332 if (*id < nr_cpu_ids)
1333 cpumask_set_cpu(*id, mask);
1337 static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
1341 for (i = 0; i < num_mlgroups; i++) {
1342 struct mdesc_mlgroup *m = &mlgroups[i];
1343 if (m->node == node)
1349 int __node_distance(int from, int to)
1351 if ((from >= MAX_NUMNODES) || (to >= MAX_NUMNODES)) {
1352 pr_warn("Returning default NUMA distance value for %d->%d\n",
1354 return (from == to) ? LOCAL_DISTANCE : REMOTE_DISTANCE;
1356 return numa_latency[from][to];
1358 EXPORT_SYMBOL(__node_distance);
1360 static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
1364 for (i = 0; i < MAX_NUMNODES; i++) {
1365 struct node_mem_mask *n = &node_masks[i];
1367 if ((grp->mask == n->mask) && (grp->match == n->match))
1373 static void __init find_numa_latencies_for_group(struct mdesc_handle *md,
1378 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1380 u64 target = mdesc_arc_target(md, arc);
1381 struct mdesc_mlgroup *m = find_mlgroup(target);
1385 tnode = find_best_numa_node_for_mlgroup(m);
1386 if (tnode == MAX_NUMNODES)
1388 numa_latency[index][tnode] = m->latency;
1392 static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
1395 struct mdesc_mlgroup *candidate = NULL;
1396 u64 arc, best_latency = ~(u64)0;
1397 struct node_mem_mask *n;
1399 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1400 u64 target = mdesc_arc_target(md, arc);
1401 struct mdesc_mlgroup *m = find_mlgroup(target);
1404 if (m->latency < best_latency) {
1406 best_latency = m->latency;
1412 if (num_node_masks != index) {
1413 printk(KERN_ERR "Inconsistent NUMA state, "
1414 "index[%d] != num_node_masks[%d]\n",
1415 index, num_node_masks);
1419 n = &node_masks[num_node_masks++];
1421 n->mask = candidate->mask;
1422 n->match = candidate->match;
1424 numadbg("NUMA NODE[%d]: mask[%lx] match[%lx] (latency[%llx])\n",
1425 index, n->mask, n->match, candidate->latency);
1430 static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
1436 numa_parse_mdesc_group_cpus(md, grp, &mask);
1438 for_each_cpu(cpu, &mask)
1439 numa_cpu_lookup_table[cpu] = index;
1440 cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
1443 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
1444 for_each_cpu(cpu, &mask)
1449 return numa_attach_mlgroup(md, grp, index);
1452 static int __init numa_parse_mdesc(void)
1454 struct mdesc_handle *md = mdesc_grab();
1455 int i, j, err, count;
1458 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1459 if (node == MDESC_NODE_NULL) {
1464 err = grab_mblocks(md);
1468 err = grab_mlgroups(md);
1473 mdesc_for_each_node_by_name(md, node, "group") {
1474 err = numa_parse_mdesc_group(md, node, count);
1481 mdesc_for_each_node_by_name(md, node, "group") {
1482 find_numa_latencies_for_group(md, node, count);
1486 /* Normalize numa latency matrix according to ACPI SLIT spec. */
1487 for (i = 0; i < MAX_NUMNODES; i++) {
1488 u64 self_latency = numa_latency[i][i];
1490 for (j = 0; j < MAX_NUMNODES; j++) {
1491 numa_latency[i][j] =
1492 (numa_latency[i][j] * LOCAL_DISTANCE) /
1499 for (i = 0; i < num_node_masks; i++) {
1500 allocate_node_data(i);
1510 static int __init numa_parse_jbus(void)
1512 unsigned long cpu, index;
1514 /* NUMA node id is encoded in bits 36 and higher, and there is
1515 * a 1-to-1 mapping from CPU ID to NUMA node ID.
1518 for_each_present_cpu(cpu) {
1519 numa_cpu_lookup_table[cpu] = index;
1520 cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
1521 node_masks[index].mask = ~((1UL << 36UL) - 1UL);
1522 node_masks[index].match = cpu << 36UL;
1526 num_node_masks = index;
1530 for (index = 0; index < num_node_masks; index++) {
1531 allocate_node_data(index);
1532 node_set_online(index);
1538 static int __init numa_parse_sun4u(void)
1540 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1543 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
1544 if ((ver >> 32UL) == __JALAPENO_ID ||
1545 (ver >> 32UL) == __SERRANO_ID)
1546 return numa_parse_jbus();
1551 static int __init bootmem_init_numa(void)
1556 numadbg("bootmem_init_numa()\n");
1558 /* Some sane defaults for numa latency values */
1559 for (i = 0; i < MAX_NUMNODES; i++) {
1560 for (j = 0; j < MAX_NUMNODES; j++)
1561 numa_latency[i][j] = (i == j) ?
1562 LOCAL_DISTANCE : REMOTE_DISTANCE;
1566 if (tlb_type == hypervisor)
1567 err = numa_parse_mdesc();
1569 err = numa_parse_sun4u();
1576 static int bootmem_init_numa(void)
1583 static void __init bootmem_init_nonnuma(void)
1585 unsigned long top_of_ram = memblock_end_of_DRAM();
1586 unsigned long total_ram = memblock_phys_mem_size();
1588 numadbg("bootmem_init_nonnuma()\n");
1590 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1591 top_of_ram, total_ram);
1592 printk(KERN_INFO "Memory hole size: %ldMB\n",
1593 (top_of_ram - total_ram) >> 20);
1595 init_node_masks_nonnuma();
1596 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
1597 allocate_node_data(0);
1601 static unsigned long __init bootmem_init(unsigned long phys_base)
1603 unsigned long end_pfn;
1605 end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1606 max_pfn = max_low_pfn = end_pfn;
1607 min_low_pfn = (phys_base >> PAGE_SHIFT);
1609 if (bootmem_init_numa() < 0)
1610 bootmem_init_nonnuma();
1612 /* Dump memblock with node info. */
1613 memblock_dump_all();
1615 /* XXX cpu notifier XXX */
1617 sparse_memory_present_with_active_regions(MAX_NUMNODES);
1623 static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1624 static int pall_ents __initdata;
1626 static unsigned long max_phys_bits = 40;
1628 bool kern_addr_valid(unsigned long addr)
1636 if ((long)addr < 0L) {
1637 unsigned long pa = __pa(addr);
1639 if ((pa >> max_phys_bits) != 0UL)
1642 return pfn_valid(pa >> PAGE_SHIFT);
1645 if (addr >= (unsigned long) KERNBASE &&
1646 addr < (unsigned long)&_end)
1649 pgd = pgd_offset_k(addr);
1653 p4d = p4d_offset(pgd, addr);
1657 pud = pud_offset(p4d, addr);
1661 if (pud_large(*pud))
1662 return pfn_valid(pud_pfn(*pud));
1664 pmd = pmd_offset(pud, addr);
1668 if (pmd_large(*pmd))
1669 return pfn_valid(pmd_pfn(*pmd));
1671 pte = pte_offset_kernel(pmd, addr);
1675 return pfn_valid(pte_pfn(*pte));
1677 EXPORT_SYMBOL(kern_addr_valid);
1679 static unsigned long __ref kernel_map_hugepud(unsigned long vstart,
1683 const unsigned long mask16gb = (1UL << 34) - 1UL;
1684 u64 pte_val = vstart;
1686 /* Each PUD is 8GB */
1687 if ((vstart & mask16gb) ||
1688 (vend - vstart <= mask16gb)) {
1689 pte_val ^= kern_linear_pte_xor[2];
1690 pud_val(*pud) = pte_val | _PAGE_PUD_HUGE;
1692 return vstart + PUD_SIZE;
1695 pte_val ^= kern_linear_pte_xor[3];
1696 pte_val |= _PAGE_PUD_HUGE;
1698 vend = vstart + mask16gb + 1UL;
1699 while (vstart < vend) {
1700 pud_val(*pud) = pte_val;
1702 pte_val += PUD_SIZE;
1709 static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend,
1712 if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE)
1718 static unsigned long __ref kernel_map_hugepmd(unsigned long vstart,
1722 const unsigned long mask256mb = (1UL << 28) - 1UL;
1723 const unsigned long mask2gb = (1UL << 31) - 1UL;
1724 u64 pte_val = vstart;
1726 /* Each PMD is 8MB */
1727 if ((vstart & mask256mb) ||
1728 (vend - vstart <= mask256mb)) {
1729 pte_val ^= kern_linear_pte_xor[0];
1730 pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE;
1732 return vstart + PMD_SIZE;
1735 if ((vstart & mask2gb) ||
1736 (vend - vstart <= mask2gb)) {
1737 pte_val ^= kern_linear_pte_xor[1];
1738 pte_val |= _PAGE_PMD_HUGE;
1739 vend = vstart + mask256mb + 1UL;
1741 pte_val ^= kern_linear_pte_xor[2];
1742 pte_val |= _PAGE_PMD_HUGE;
1743 vend = vstart + mask2gb + 1UL;
1746 while (vstart < vend) {
1747 pmd_val(*pmd) = pte_val;
1749 pte_val += PMD_SIZE;
1757 static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend,
1760 if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE)
1766 static unsigned long __ref kernel_map_range(unsigned long pstart,
1767 unsigned long pend, pgprot_t prot,
1770 unsigned long vstart = PAGE_OFFSET + pstart;
1771 unsigned long vend = PAGE_OFFSET + pend;
1772 unsigned long alloc_bytes = 0UL;
1774 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
1775 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
1780 while (vstart < vend) {
1781 unsigned long this_end, paddr = __pa(vstart);
1782 pgd_t *pgd = pgd_offset_k(vstart);
1788 if (pgd_none(*pgd)) {
1791 new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
1795 alloc_bytes += PAGE_SIZE;
1796 pgd_populate(&init_mm, pgd, new);
1799 p4d = p4d_offset(pgd, vstart);
1800 if (p4d_none(*p4d)) {
1803 new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
1807 alloc_bytes += PAGE_SIZE;
1808 p4d_populate(&init_mm, p4d, new);
1811 pud = pud_offset(p4d, vstart);
1812 if (pud_none(*pud)) {
1815 if (kernel_can_map_hugepud(vstart, vend, use_huge)) {
1816 vstart = kernel_map_hugepud(vstart, vend, pud);
1819 new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
1823 alloc_bytes += PAGE_SIZE;
1824 pud_populate(&init_mm, pud, new);
1827 pmd = pmd_offset(pud, vstart);
1828 if (pmd_none(*pmd)) {
1831 if (kernel_can_map_hugepmd(vstart, vend, use_huge)) {
1832 vstart = kernel_map_hugepmd(vstart, vend, pmd);
1835 new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
1839 alloc_bytes += PAGE_SIZE;
1840 pmd_populate_kernel(&init_mm, pmd, new);
1843 pte = pte_offset_kernel(pmd, vstart);
1844 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1845 if (this_end > vend)
1848 while (vstart < this_end) {
1849 pte_val(*pte) = (paddr | pgprot_val(prot));
1851 vstart += PAGE_SIZE;
1860 panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
1861 __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1865 static void __init flush_all_kernel_tsbs(void)
1869 for (i = 0; i < KERNEL_TSB_NENTRIES; i++) {
1870 struct tsb *ent = &swapper_tsb[i];
1872 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
1874 #ifndef CONFIG_DEBUG_PAGEALLOC
1875 for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) {
1876 struct tsb *ent = &swapper_4m_tsb[i];
1878 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
1883 extern unsigned int kvmap_linear_patch[1];
1885 static void __init kernel_physical_mapping_init(void)
1887 unsigned long i, mem_alloced = 0UL;
1888 bool use_huge = true;
1890 #ifdef CONFIG_DEBUG_PAGEALLOC
1893 for (i = 0; i < pall_ents; i++) {
1894 unsigned long phys_start, phys_end;
1896 phys_start = pall[i].phys_addr;
1897 phys_end = phys_start + pall[i].reg_size;
1899 mem_alloced += kernel_map_range(phys_start, phys_end,
1900 PAGE_KERNEL, use_huge);
1903 printk("Allocated %ld bytes for kernel page tables.\n",
1906 kvmap_linear_patch[0] = 0x01000000; /* nop */
1907 flushi(&kvmap_linear_patch[0]);
1909 flush_all_kernel_tsbs();
1914 #ifdef CONFIG_DEBUG_PAGEALLOC
1915 void __kernel_map_pages(struct page *page, int numpages, int enable)
1917 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1918 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1920 kernel_map_range(phys_start, phys_end,
1921 (enable ? PAGE_KERNEL : __pgprot(0)), false);
1923 flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1924 PAGE_OFFSET + phys_end);
1926 /* we should perform an IPI and flush all tlbs,
1927 * but that can deadlock->flush only current cpu.
1929 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1930 PAGE_OFFSET + phys_end);
1934 unsigned long __init find_ecache_flush_span(unsigned long size)
1938 for (i = 0; i < pavail_ents; i++) {
1939 if (pavail[i].reg_size >= size)
1940 return pavail[i].phys_addr;
1946 unsigned long PAGE_OFFSET;
1947 EXPORT_SYMBOL(PAGE_OFFSET);
1949 unsigned long VMALLOC_END = 0x0000010000000000UL;
1950 EXPORT_SYMBOL(VMALLOC_END);
1952 unsigned long sparc64_va_hole_top = 0xfffff80000000000UL;
1953 unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL;
1955 static void __init setup_page_offset(void)
1957 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1958 /* Cheetah/Panther support a full 64-bit virtual
1959 * address, so we can use all that our page tables
1962 sparc64_va_hole_top = 0xfff0000000000000UL;
1963 sparc64_va_hole_bottom = 0x0010000000000000UL;
1966 } else if (tlb_type == hypervisor) {
1967 switch (sun4v_chip_type) {
1968 case SUN4V_CHIP_NIAGARA1:
1969 case SUN4V_CHIP_NIAGARA2:
1970 /* T1 and T2 support 48-bit virtual addresses. */
1971 sparc64_va_hole_top = 0xffff800000000000UL;
1972 sparc64_va_hole_bottom = 0x0000800000000000UL;
1976 case SUN4V_CHIP_NIAGARA3:
1977 /* T3 supports 48-bit virtual addresses. */
1978 sparc64_va_hole_top = 0xffff800000000000UL;
1979 sparc64_va_hole_bottom = 0x0000800000000000UL;
1983 case SUN4V_CHIP_NIAGARA4:
1984 case SUN4V_CHIP_NIAGARA5:
1985 case SUN4V_CHIP_SPARC64X:
1986 case SUN4V_CHIP_SPARC_M6:
1987 /* T4 and later support 52-bit virtual addresses. */
1988 sparc64_va_hole_top = 0xfff8000000000000UL;
1989 sparc64_va_hole_bottom = 0x0008000000000000UL;
1992 case SUN4V_CHIP_SPARC_M7:
1993 case SUN4V_CHIP_SPARC_SN:
1994 /* M7 and later support 52-bit virtual addresses. */
1995 sparc64_va_hole_top = 0xfff8000000000000UL;
1996 sparc64_va_hole_bottom = 0x0008000000000000UL;
1999 case SUN4V_CHIP_SPARC_M8:
2001 /* M8 and later support 54-bit virtual addresses.
2002 * However, restricting M8 and above VA bits to 53
2003 * as 4-level page table cannot support more than
2006 sparc64_va_hole_top = 0xfff0000000000000UL;
2007 sparc64_va_hole_bottom = 0x0010000000000000UL;
2013 if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) {
2014 prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n",
2019 PAGE_OFFSET = sparc64_va_hole_top;
2020 VMALLOC_END = ((sparc64_va_hole_bottom >> 1) +
2021 (sparc64_va_hole_bottom >> 2));
2023 pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
2024 PAGE_OFFSET, max_phys_bits);
2025 pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n",
2026 VMALLOC_START, VMALLOC_END);
2027 pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n",
2028 VMEMMAP_BASE, VMEMMAP_BASE << 1);
2031 static void __init tsb_phys_patch(void)
2033 struct tsb_ldquad_phys_patch_entry *pquad;
2034 struct tsb_phys_patch_entry *p;
2036 pquad = &__tsb_ldquad_phys_patch;
2037 while (pquad < &__tsb_ldquad_phys_patch_end) {
2038 unsigned long addr = pquad->addr;
2040 if (tlb_type == hypervisor)
2041 *(unsigned int *) addr = pquad->sun4v_insn;
2043 *(unsigned int *) addr = pquad->sun4u_insn;
2045 __asm__ __volatile__("flush %0"
2052 p = &__tsb_phys_patch;
2053 while (p < &__tsb_phys_patch_end) {
2054 unsigned long addr = p->addr;
2056 *(unsigned int *) addr = p->insn;
2058 __asm__ __volatile__("flush %0"
2066 /* Don't mark as init, we give this to the Hypervisor. */
2067 #ifndef CONFIG_DEBUG_PAGEALLOC
2068 #define NUM_KTSB_DESCR 2
2070 #define NUM_KTSB_DESCR 1
2072 static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
2074 /* The swapper TSBs are loaded with a base sequence of:
2076 * sethi %uhi(SYMBOL), REG1
2077 * sethi %hi(SYMBOL), REG2
2078 * or REG1, %ulo(SYMBOL), REG1
2079 * or REG2, %lo(SYMBOL), REG2
2080 * sllx REG1, 32, REG1
2081 * or REG1, REG2, REG1
2083 * When we use physical addressing for the TSB accesses, we patch the
2084 * first four instructions in the above sequence.
2087 static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
2089 unsigned long high_bits, low_bits;
2091 high_bits = (pa >> 32) & 0xffffffff;
2092 low_bits = (pa >> 0) & 0xffffffff;
2094 while (start < end) {
2095 unsigned int *ia = (unsigned int *)(unsigned long)*start;
2097 ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10);
2098 __asm__ __volatile__("flush %0" : : "r" (ia));
2100 ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10);
2101 __asm__ __volatile__("flush %0" : : "r" (ia + 1));
2103 ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff);
2104 __asm__ __volatile__("flush %0" : : "r" (ia + 2));
2106 ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff);
2107 __asm__ __volatile__("flush %0" : : "r" (ia + 3));
2113 static void ktsb_phys_patch(void)
2115 extern unsigned int __swapper_tsb_phys_patch;
2116 extern unsigned int __swapper_tsb_phys_patch_end;
2117 unsigned long ktsb_pa;
2119 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
2120 patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
2121 &__swapper_tsb_phys_patch_end, ktsb_pa);
2122 #ifndef CONFIG_DEBUG_PAGEALLOC
2124 extern unsigned int __swapper_4m_tsb_phys_patch;
2125 extern unsigned int __swapper_4m_tsb_phys_patch_end;
2126 ktsb_pa = (kern_base +
2127 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
2128 patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
2129 &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
2134 static void __init sun4v_ktsb_init(void)
2136 unsigned long ktsb_pa;
2138 /* First KTSB for PAGE_SIZE mappings. */
2139 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
2141 switch (PAGE_SIZE) {
2144 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
2145 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
2149 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
2150 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
2154 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
2155 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
2158 case 4 * 1024 * 1024:
2159 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
2160 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
2164 ktsb_descr[0].assoc = 1;
2165 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
2166 ktsb_descr[0].ctx_idx = 0;
2167 ktsb_descr[0].tsb_base = ktsb_pa;
2168 ktsb_descr[0].resv = 0;
2170 #ifndef CONFIG_DEBUG_PAGEALLOC
2171 /* Second KTSB for 4MB/256MB/2GB/16GB mappings. */
2172 ktsb_pa = (kern_base +
2173 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
2175 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
2176 ktsb_descr[1].pgsz_mask = ((HV_PGSZ_MASK_4MB |
2177 HV_PGSZ_MASK_256MB |
2179 HV_PGSZ_MASK_16GB) &
2181 ktsb_descr[1].assoc = 1;
2182 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
2183 ktsb_descr[1].ctx_idx = 0;
2184 ktsb_descr[1].tsb_base = ktsb_pa;
2185 ktsb_descr[1].resv = 0;
2189 void sun4v_ktsb_register(void)
2191 unsigned long pa, ret;
2193 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
2195 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
2197 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
2198 "errors with %lx\n", pa, ret);
2203 static void __init sun4u_linear_pte_xor_finalize(void)
2205 #ifndef CONFIG_DEBUG_PAGEALLOC
2206 /* This is where we would add Panther support for
2207 * 32MB and 256MB pages.
2212 static void __init sun4v_linear_pte_xor_finalize(void)
2214 unsigned long pagecv_flag;
2216 /* Bit 9 of TTE is no longer CV bit on M7 processor and it instead
2217 * enables MCD error. Do not set bit 9 on M7 processor.
2219 switch (sun4v_chip_type) {
2220 case SUN4V_CHIP_SPARC_M7:
2221 case SUN4V_CHIP_SPARC_M8:
2222 case SUN4V_CHIP_SPARC_SN:
2226 pagecv_flag = _PAGE_CV_4V;
2229 #ifndef CONFIG_DEBUG_PAGEALLOC
2230 if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
2231 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
2233 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | pagecv_flag |
2234 _PAGE_P_4V | _PAGE_W_4V);
2236 kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
2239 if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
2240 kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
2242 kern_linear_pte_xor[2] |= (_PAGE_CP_4V | pagecv_flag |
2243 _PAGE_P_4V | _PAGE_W_4V);
2245 kern_linear_pte_xor[2] = kern_linear_pte_xor[1];
2248 if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
2249 kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
2251 kern_linear_pte_xor[3] |= (_PAGE_CP_4V | pagecv_flag |
2252 _PAGE_P_4V | _PAGE_W_4V);
2254 kern_linear_pte_xor[3] = kern_linear_pte_xor[2];
2259 /* paging_init() sets up the page tables */
2261 static unsigned long last_valid_pfn;
2263 static void sun4u_pgprot_init(void);
2264 static void sun4v_pgprot_init(void);
2266 #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
2267 #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
2268 #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2269 #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2270 #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2271 #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2273 /* We need to exclude reserved regions. This exclusion will include
2274 * vmlinux and initrd. To be more precise the initrd size could be used to
2275 * compute a new lower limit because it is freed later during initialization.
2277 static void __init reduce_memory(phys_addr_t limit_ram)
2279 limit_ram += memblock_reserved_size();
2280 memblock_enforce_memory_limit(limit_ram);
2283 void __init paging_init(void)
2285 unsigned long end_pfn, shift, phys_base;
2286 unsigned long real_end, i;
2288 setup_page_offset();
2290 /* These build time checkes make sure that the dcache_dirty_cpu()
2291 * page->flags usage will work.
2293 * When a page gets marked as dcache-dirty, we store the
2294 * cpu number starting at bit 32 in the page->flags. Also,
2295 * functions like clear_dcache_dirty_cpu use the cpu mask
2296 * in 13-bit signed-immediate instruction fields.
2300 * Page flags must not reach into upper 32 bits that are used
2301 * for the cpu number
2303 BUILD_BUG_ON(NR_PAGEFLAGS > 32);
2306 * The bit fields placed in the high range must not reach below
2307 * the 32 bit boundary. Otherwise we cannot place the cpu field
2308 * at the 32 bit boundary.
2310 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
2311 ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
2313 BUILD_BUG_ON(NR_CPUS > 4096);
2315 kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
2316 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
2318 /* Invalidate both kernel TSBs. */
2319 memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
2320 #ifndef CONFIG_DEBUG_PAGEALLOC
2321 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
2324 /* TTE.cv bit on sparc v9 occupies the same position as TTE.mcde
2325 * bit on M7 processor. This is a conflicting usage of the same
2326 * bit. Enabling TTE.cv on M7 would turn on Memory Corruption
2327 * Detection error on all pages and this will lead to problems
2328 * later. Kernel does not run with MCD enabled and hence rest
2329 * of the required steps to fully configure memory corruption
2330 * detection are not taken. We need to ensure TTE.mcde is not
2331 * set on M7 processor. Compute the value of cacheability
2332 * flag for use later taking this into consideration.
2334 switch (sun4v_chip_type) {
2335 case SUN4V_CHIP_SPARC_M7:
2336 case SUN4V_CHIP_SPARC_M8:
2337 case SUN4V_CHIP_SPARC_SN:
2338 page_cache4v_flag = _PAGE_CP_4V;
2341 page_cache4v_flag = _PAGE_CACHE_4V;
2345 if (tlb_type == hypervisor)
2346 sun4v_pgprot_init();
2348 sun4u_pgprot_init();
2350 if (tlb_type == cheetah_plus ||
2351 tlb_type == hypervisor) {
2356 if (tlb_type == hypervisor)
2357 sun4v_patch_tlb_handlers();
2359 /* Find available physical memory...
2361 * Read it twice in order to work around a bug in openfirmware.
2362 * The call to grab this table itself can cause openfirmware to
2363 * allocate memory, which in turn can take away some space from
2364 * the list of available memory. Reading it twice makes sure
2365 * we really do get the final value.
2367 read_obp_translations();
2368 read_obp_memory("reg", &pall[0], &pall_ents);
2369 read_obp_memory("available", &pavail[0], &pavail_ents);
2370 read_obp_memory("available", &pavail[0], &pavail_ents);
2372 phys_base = 0xffffffffffffffffUL;
2373 for (i = 0; i < pavail_ents; i++) {
2374 phys_base = min(phys_base, pavail[i].phys_addr);
2375 memblock_add(pavail[i].phys_addr, pavail[i].reg_size);
2378 memblock_reserve(kern_base, kern_size);
2380 find_ramdisk(phys_base);
2382 if (cmdline_memory_size)
2383 reduce_memory(cmdline_memory_size);
2385 memblock_allow_resize();
2386 memblock_dump_all();
2388 set_bit(0, mmu_context_bmap);
2390 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
2392 real_end = (unsigned long)_end;
2393 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB);
2394 printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
2395 num_kernel_image_mappings);
2397 /* Set kernel pgd to upper alias so physical page computations
2400 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
2402 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
2404 inherit_prom_mappings();
2406 /* Ok, we can use our TLB miss and window trap handlers safely. */
2411 prom_build_devicetree();
2412 of_populate_present_mask();
2414 of_fill_in_cpu_data();
2417 if (tlb_type == hypervisor) {
2419 mdesc_populate_present_mask(cpu_all_mask);
2421 mdesc_fill_in_cpu_data(cpu_all_mask);
2423 mdesc_get_page_sizes(cpu_all_mask, &cpu_pgsz_mask);
2425 sun4v_linear_pte_xor_finalize();
2428 sun4v_ktsb_register();
2430 unsigned long impl, ver;
2432 cpu_pgsz_mask = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K |
2433 HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB);
2435 __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
2436 impl = ((ver >> 32) & 0xffff);
2437 if (impl == PANTHER_IMPL)
2438 cpu_pgsz_mask |= (HV_PGSZ_MASK_32MB |
2439 HV_PGSZ_MASK_256MB);
2441 sun4u_linear_pte_xor_finalize();
2444 /* Flush the TLBs and the 4M TSB so that the updated linear
2445 * pte XOR settings are realized for all mappings.
2448 #ifndef CONFIG_DEBUG_PAGEALLOC
2449 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
2453 /* Setup bootmem... */
2454 last_valid_pfn = end_pfn = bootmem_init(phys_base);
2456 kernel_physical_mapping_init();
2459 unsigned long max_zone_pfns[MAX_NR_ZONES];
2461 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
2463 max_zone_pfns[ZONE_NORMAL] = end_pfn;
2465 free_area_init(max_zone_pfns);
2468 printk("Booting Linux...\n");
2471 int page_in_phys_avail(unsigned long paddr)
2477 for (i = 0; i < pavail_ents; i++) {
2478 unsigned long start, end;
2480 start = pavail[i].phys_addr;
2481 end = start + pavail[i].reg_size;
2483 if (paddr >= start && paddr < end)
2486 if (paddr >= kern_base && paddr < (kern_base + kern_size))
2488 #ifdef CONFIG_BLK_DEV_INITRD
2489 if (paddr >= __pa(initrd_start) &&
2490 paddr < __pa(PAGE_ALIGN(initrd_end)))
2497 static void __init register_page_bootmem_info(void)
2499 #ifdef CONFIG_NEED_MULTIPLE_NODES
2502 for_each_online_node(i)
2503 if (NODE_DATA(i)->node_spanned_pages)
2504 register_page_bootmem_info_node(NODE_DATA(i));
2507 void __init mem_init(void)
2509 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
2511 memblock_free_all();
2514 * Must be done after boot memory is put on freelist, because here we
2515 * might set fields in deferred struct pages that have not yet been
2516 * initialized, and memblock_free_all() initializes all the reserved
2517 * deferred pages for us.
2519 register_page_bootmem_info();
2522 * Set up the zero page, mark it reserved, so that page count
2523 * is not manipulated when freeing the page from user ptes.
2525 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
2526 if (mem_map_zero == NULL) {
2527 prom_printf("paging_init: Cannot alloc zero page.\n");
2530 mark_page_reserved(mem_map_zero);
2532 mem_init_print_info(NULL);
2534 if (tlb_type == cheetah || tlb_type == cheetah_plus)
2535 cheetah_ecache_flush_init();
2538 void free_initmem(void)
2540 unsigned long addr, initend;
2543 /* If the physical memory maps were trimmed by kernel command
2544 * line options, don't even try freeing this initmem stuff up.
2545 * The kernel image could have been in the trimmed out region
2546 * and if so the freeing below will free invalid page structs.
2548 if (cmdline_memory_size)
2552 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
2554 addr = PAGE_ALIGN((unsigned long)(__init_begin));
2555 initend = (unsigned long)(__init_end) & PAGE_MASK;
2556 for (; addr < initend; addr += PAGE_SIZE) {
2560 ((unsigned long) __va(kern_base)) -
2561 ((unsigned long) KERNBASE));
2562 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
2565 free_reserved_page(virt_to_page(page));
2569 pgprot_t PAGE_KERNEL __read_mostly;
2570 EXPORT_SYMBOL(PAGE_KERNEL);
2572 pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
2573 pgprot_t PAGE_COPY __read_mostly;
2575 pgprot_t PAGE_SHARED __read_mostly;
2576 EXPORT_SYMBOL(PAGE_SHARED);
2578 unsigned long pg_iobits __read_mostly;
2580 unsigned long _PAGE_IE __read_mostly;
2581 EXPORT_SYMBOL(_PAGE_IE);
2583 unsigned long _PAGE_E __read_mostly;
2584 EXPORT_SYMBOL(_PAGE_E);
2586 unsigned long _PAGE_CACHE __read_mostly;
2587 EXPORT_SYMBOL(_PAGE_CACHE);
2589 #ifdef CONFIG_SPARSEMEM_VMEMMAP
2590 int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
2591 int node, struct vmem_altmap *altmap)
2593 unsigned long pte_base;
2595 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2596 _PAGE_CP_4U | _PAGE_CV_4U |
2597 _PAGE_P_4U | _PAGE_W_4U);
2598 if (tlb_type == hypervisor)
2599 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2600 page_cache4v_flag | _PAGE_P_4V | _PAGE_W_4V);
2602 pte_base |= _PAGE_PMD_HUGE;
2604 vstart = vstart & PMD_MASK;
2605 vend = ALIGN(vend, PMD_SIZE);
2606 for (; vstart < vend; vstart += PMD_SIZE) {
2607 pgd_t *pgd = vmemmap_pgd_populate(vstart, node);
2616 p4d = vmemmap_p4d_populate(pgd, vstart, node);
2620 pud = vmemmap_pud_populate(p4d, vstart, node);
2624 pmd = pmd_offset(pud, vstart);
2625 pte = pmd_val(*pmd);
2626 if (!(pte & _PAGE_VALID)) {
2627 void *block = vmemmap_alloc_block(PMD_SIZE, node);
2632 pmd_val(*pmd) = pte_base | __pa(block);
2639 void vmemmap_free(unsigned long start, unsigned long end,
2640 struct vmem_altmap *altmap)
2643 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
2645 static void prot_init_common(unsigned long page_none,
2646 unsigned long page_shared,
2647 unsigned long page_copy,
2648 unsigned long page_readonly,
2649 unsigned long page_exec_bit)
2651 PAGE_COPY = __pgprot(page_copy);
2652 PAGE_SHARED = __pgprot(page_shared);
2654 protection_map[0x0] = __pgprot(page_none);
2655 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
2656 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
2657 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
2658 protection_map[0x4] = __pgprot(page_readonly);
2659 protection_map[0x5] = __pgprot(page_readonly);
2660 protection_map[0x6] = __pgprot(page_copy);
2661 protection_map[0x7] = __pgprot(page_copy);
2662 protection_map[0x8] = __pgprot(page_none);
2663 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
2664 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
2665 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
2666 protection_map[0xc] = __pgprot(page_readonly);
2667 protection_map[0xd] = __pgprot(page_readonly);
2668 protection_map[0xe] = __pgprot(page_shared);
2669 protection_map[0xf] = __pgprot(page_shared);
2672 static void __init sun4u_pgprot_init(void)
2674 unsigned long page_none, page_shared, page_copy, page_readonly;
2675 unsigned long page_exec_bit;
2678 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2679 _PAGE_CACHE_4U | _PAGE_P_4U |
2680 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2682 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2683 _PAGE_CACHE_4U | _PAGE_P_4U |
2684 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2685 _PAGE_EXEC_4U | _PAGE_L_4U);
2687 _PAGE_IE = _PAGE_IE_4U;
2688 _PAGE_E = _PAGE_E_4U;
2689 _PAGE_CACHE = _PAGE_CACHE_4U;
2691 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
2692 __ACCESS_BITS_4U | _PAGE_E_4U);
2694 #ifdef CONFIG_DEBUG_PAGEALLOC
2695 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
2697 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
2700 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
2701 _PAGE_P_4U | _PAGE_W_4U);
2703 for (i = 1; i < 4; i++)
2704 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
2706 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
2707 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
2708 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
2711 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
2712 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2713 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
2714 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2715 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2716 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2717 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2719 page_exec_bit = _PAGE_EXEC_4U;
2721 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2725 static void __init sun4v_pgprot_init(void)
2727 unsigned long page_none, page_shared, page_copy, page_readonly;
2728 unsigned long page_exec_bit;
2731 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
2732 page_cache4v_flag | _PAGE_P_4V |
2733 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
2735 PAGE_KERNEL_LOCKED = PAGE_KERNEL;
2737 _PAGE_IE = _PAGE_IE_4V;
2738 _PAGE_E = _PAGE_E_4V;
2739 _PAGE_CACHE = page_cache4v_flag;
2741 #ifdef CONFIG_DEBUG_PAGEALLOC
2742 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
2744 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
2747 kern_linear_pte_xor[0] |= (page_cache4v_flag | _PAGE_P_4V |
2750 for (i = 1; i < 4; i++)
2751 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
2753 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
2754 __ACCESS_BITS_4V | _PAGE_E_4V);
2756 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
2757 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
2758 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
2759 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
2761 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | page_cache4v_flag;
2762 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
2763 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
2764 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
2765 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2766 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
2767 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2769 page_exec_bit = _PAGE_EXEC_4V;
2771 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2775 unsigned long pte_sz_bits(unsigned long sz)
2777 if (tlb_type == hypervisor) {
2781 return _PAGE_SZ8K_4V;
2783 return _PAGE_SZ64K_4V;
2785 return _PAGE_SZ512K_4V;
2786 case 4 * 1024 * 1024:
2787 return _PAGE_SZ4MB_4V;
2793 return _PAGE_SZ8K_4U;
2795 return _PAGE_SZ64K_4U;
2797 return _PAGE_SZ512K_4U;
2798 case 4 * 1024 * 1024:
2799 return _PAGE_SZ4MB_4U;
2804 pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
2808 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
2809 pte_val(pte) |= (((unsigned long)space) << 32);
2810 pte_val(pte) |= pte_sz_bits(page_size);
2815 static unsigned long kern_large_tte(unsigned long paddr)
2819 val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2820 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
2821 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
2822 if (tlb_type == hypervisor)
2823 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2824 page_cache4v_flag | _PAGE_P_4V |
2825 _PAGE_EXEC_4V | _PAGE_W_4V);
2830 /* If not locked, zap it. */
2831 void __flush_tlb_all(void)
2833 unsigned long pstate;
2836 __asm__ __volatile__("flushw\n\t"
2837 "rdpr %%pstate, %0\n\t"
2838 "wrpr %0, %1, %%pstate"
2841 if (tlb_type == hypervisor) {
2842 sun4v_mmu_demap_all();
2843 } else if (tlb_type == spitfire) {
2844 for (i = 0; i < 64; i++) {
2845 /* Spitfire Errata #32 workaround */
2846 /* NOTE: Always runs on spitfire, so no
2847 * cheetah+ page size encodings.
2849 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2853 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2855 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
2856 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2859 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
2860 spitfire_put_dtlb_data(i, 0x0UL);
2863 /* Spitfire Errata #32 workaround */
2864 /* NOTE: Always runs on spitfire, so no
2865 * cheetah+ page size encodings.
2867 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2871 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2873 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
2874 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2877 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
2878 spitfire_put_itlb_data(i, 0x0UL);
2881 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
2882 cheetah_flush_dtlb_all();
2883 cheetah_flush_itlb_all();
2885 __asm__ __volatile__("wrpr %0, 0, %%pstate"
2889 pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
2891 struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2895 pte = (pte_t *) page_address(page);
2900 pgtable_t pte_alloc_one(struct mm_struct *mm)
2902 struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2905 if (!pgtable_pte_page_ctor(page)) {
2906 free_unref_page(page);
2909 return (pte_t *) page_address(page);
2912 void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
2914 free_page((unsigned long)pte);
2917 static void __pte_free(pgtable_t pte)
2919 struct page *page = virt_to_page(pte);
2921 pgtable_pte_page_dtor(page);
2925 void pte_free(struct mm_struct *mm, pgtable_t pte)
2930 void pgtable_free(void *table, bool is_page)
2935 kmem_cache_free(pgtable_cache, table);
2938 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2939 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
2942 unsigned long pte, flags;
2943 struct mm_struct *mm;
2946 if (!pmd_large(entry) || !pmd_young(entry))
2949 pte = pmd_val(entry);
2951 /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */
2952 if (!(pte & _PAGE_VALID))
2955 /* We are fabricating 8MB pages using 4MB real hw pages. */
2956 pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
2960 spin_lock_irqsave(&mm->context.lock, flags);
2962 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
2963 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
2966 spin_unlock_irqrestore(&mm->context.lock, flags);
2968 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2970 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
2971 static void context_reload(void *__data)
2973 struct mm_struct *mm = __data;
2975 if (mm == current->mm)
2976 load_secondary_context(mm);
2979 void hugetlb_setup(struct pt_regs *regs)
2981 struct mm_struct *mm = current->mm;
2982 struct tsb_config *tp;
2984 if (faulthandler_disabled() || !mm) {
2985 const struct exception_table_entry *entry;
2987 entry = search_exception_tables(regs->tpc);
2989 regs->tpc = entry->fixup;
2990 regs->tnpc = regs->tpc + 4;
2993 pr_alert("Unexpected HugeTLB setup in atomic context.\n");
2994 die_if_kernel("HugeTSB in atomic", regs);
2997 tp = &mm->context.tsb_block[MM_TSB_HUGE];
2998 if (likely(tp->tsb == NULL))
2999 tsb_grow(mm, MM_TSB_HUGE, 0);
3001 tsb_context_switch(mm);
3004 /* On UltraSPARC-III+ and later, configure the second half of
3005 * the Data-TLB for huge pages.
3007 if (tlb_type == cheetah_plus) {
3008 bool need_context_reload = false;
3011 spin_lock_irq(&ctx_alloc_lock);
3012 ctx = mm->context.sparc64_ctx_val;
3013 ctx &= ~CTX_PGSZ_MASK;
3014 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
3015 ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
3017 if (ctx != mm->context.sparc64_ctx_val) {
3018 /* When changing the page size fields, we
3019 * must perform a context flush so that no
3020 * stale entries match. This flush must
3021 * occur with the original context register
3024 do_flush_tlb_mm(mm);
3026 /* Reload the context register of all processors
3027 * also executing in this address space.
3029 mm->context.sparc64_ctx_val = ctx;
3030 need_context_reload = true;
3032 spin_unlock_irq(&ctx_alloc_lock);
3034 if (need_context_reload)
3035 on_each_cpu(context_reload, mm, 0);
3040 static struct resource code_resource = {
3041 .name = "Kernel code",
3042 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
3045 static struct resource data_resource = {
3046 .name = "Kernel data",
3047 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
3050 static struct resource bss_resource = {
3051 .name = "Kernel bss",
3052 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
3055 static inline resource_size_t compute_kern_paddr(void *addr)
3057 return (resource_size_t) (addr - KERNBASE + kern_base);
3060 static void __init kernel_lds_init(void)
3062 code_resource.start = compute_kern_paddr(_text);
3063 code_resource.end = compute_kern_paddr(_etext - 1);
3064 data_resource.start = compute_kern_paddr(_etext);
3065 data_resource.end = compute_kern_paddr(_edata - 1);
3066 bss_resource.start = compute_kern_paddr(__bss_start);
3067 bss_resource.end = compute_kern_paddr(_end - 1);
3070 static int __init report_memory(void)
3073 struct resource *res;
3077 for (i = 0; i < pavail_ents; i++) {
3078 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
3081 pr_warn("Failed to allocate source.\n");
3085 res->name = "System RAM";
3086 res->start = pavail[i].phys_addr;
3087 res->end = pavail[i].phys_addr + pavail[i].reg_size - 1;
3088 res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
3090 if (insert_resource(&iomem_resource, res) < 0) {
3091 pr_warn("Resource insertion failed.\n");
3095 insert_resource(res, &code_resource);
3096 insert_resource(res, &data_resource);
3097 insert_resource(res, &bss_resource);
3102 arch_initcall(report_memory);
3105 #define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range
3107 #define do_flush_tlb_kernel_range __flush_tlb_kernel_range
3110 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
3112 if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
3113 if (start < LOW_OBP_ADDRESS) {
3114 flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
3115 do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
3117 if (end > HI_OBP_ADDRESS) {
3118 flush_tsb_kernel_range(HI_OBP_ADDRESS, end);
3119 do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end);
3122 flush_tsb_kernel_range(start, end);
3123 do_flush_tlb_kernel_range(start, end);
3127 void copy_user_highpage(struct page *to, struct page *from,
3128 unsigned long vaddr, struct vm_area_struct *vma)
3132 vfrom = kmap_atomic(from);
3133 vto = kmap_atomic(to);
3134 copy_user_page(vto, vfrom, vaddr, to);
3136 kunmap_atomic(vfrom);
3138 /* If this page has ADI enabled, copy over any ADI tags
3141 if (vma->vm_flags & VM_SPARC_ADI) {
3142 unsigned long pfrom, pto, i, adi_tag;
3144 pfrom = page_to_phys(from);
3145 pto = page_to_phys(to);
3147 for (i = pfrom; i < (pfrom + PAGE_SIZE); i += adi_blksize()) {
3148 asm volatile("ldxa [%1] %2, %0\n\t"
3150 : "r" (i), "i" (ASI_MCD_REAL));
3151 asm volatile("stxa %0, [%1] %2\n\t"
3153 : "r" (adi_tag), "r" (pto),
3154 "i" (ASI_MCD_REAL));
3155 pto += adi_blksize();
3157 asm volatile("membar #Sync\n\t");
3160 EXPORT_SYMBOL(copy_user_highpage);
3162 void copy_highpage(struct page *to, struct page *from)
3166 vfrom = kmap_atomic(from);
3167 vto = kmap_atomic(to);
3168 copy_page(vto, vfrom);
3170 kunmap_atomic(vfrom);
3172 /* If this platform is ADI enabled, copy any ADI tags
3175 if (adi_capable()) {
3176 unsigned long pfrom, pto, i, adi_tag;
3178 pfrom = page_to_phys(from);
3179 pto = page_to_phys(to);
3181 for (i = pfrom; i < (pfrom + PAGE_SIZE); i += adi_blksize()) {
3182 asm volatile("ldxa [%1] %2, %0\n\t"
3184 : "r" (i), "i" (ASI_MCD_REAL));
3185 asm volatile("stxa %0, [%1] %2\n\t"
3187 : "r" (adi_tag), "r" (pto),
3188 "i" (ASI_MCD_REAL));
3189 pto += adi_blksize();
3191 asm volatile("membar #Sync\n\t");
3194 EXPORT_SYMBOL(copy_highpage);