2 * srmmu.c: SRMMU specific routines for memory management.
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org)
11 #include <linux/seq_file.h>
12 #include <linux/spinlock.h>
13 #include <linux/bootmem.h>
14 #include <linux/pagemap.h>
15 #include <linux/vmalloc.h>
16 #include <linux/kdebug.h>
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/log2.h>
20 #include <linux/gfp.h>
24 #include <asm/mmu_context.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlbflush.h>
27 #include <asm/io-unit.h>
28 #include <asm/pgalloc.h>
29 #include <asm/pgtable.h>
30 #include <asm/bitext.h>
31 #include <asm/vaddrs.h>
32 #include <asm/cache.h>
33 #include <asm/traps.h>
34 #include <asm/oplib.h>
42 /* Now the cpu specific definitions. */
43 #include <asm/turbosparc.h>
44 #include <asm/tsunami.h>
45 #include <asm/viking.h>
46 #include <asm/swift.h>
53 enum mbus_module srmmu_modtype;
54 static unsigned int hwbug_bitmask;
58 struct ctx_list *ctx_list_pool;
59 struct ctx_list ctx_free;
60 struct ctx_list ctx_used;
62 extern struct resource sparc_iomap;
64 extern unsigned long last_valid_pfn;
66 static pgd_t *srmmu_swapper_pg_dir;
68 const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
71 const struct sparc32_cachetlb_ops *local_ops;
73 #define FLUSH_BEGIN(mm)
76 #define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) {
80 int flush_page_for_dma_global = 1;
84 ctxd_t *srmmu_ctx_table_phys;
85 static ctxd_t *srmmu_context_table;
87 int viking_mxcc_present;
88 static DEFINE_SPINLOCK(srmmu_context_spinlock);
90 static int is_hypersparc;
92 static int srmmu_cache_pagetables;
94 /* these will be initialized in srmmu_nocache_calcsize() */
95 static unsigned long srmmu_nocache_size;
96 static unsigned long srmmu_nocache_end;
98 /* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */
99 #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
101 /* The context table is a nocache user with the biggest alignment needs. */
102 #define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS)
104 void *srmmu_nocache_pool;
105 void *srmmu_nocache_bitmap;
106 static struct bit_map srmmu_nocache_map;
108 static inline int srmmu_pmd_none(pmd_t pmd)
109 { return !(pmd_val(pmd) & 0xFFFFFFF); }
111 /* XXX should we hyper_flush_whole_icache here - Anton */
112 static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
113 { set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); }
115 void pmd_set(pmd_t *pmdp, pte_t *ptep)
117 unsigned long ptp; /* Physical address, shifted right by 4 */
120 ptp = __nocache_pa((unsigned long) ptep) >> 4;
121 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
122 set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
123 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
127 void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
129 unsigned long ptp; /* Physical address, shifted right by 4 */
132 ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */
133 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
134 set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
135 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
139 /* Find an entry in the third-level page table.. */
140 pte_t *pte_offset_kernel(pmd_t *dir, unsigned long address)
144 pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4);
145 return (pte_t *) pte +
146 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
150 * size: bytes to allocate in the nocache area.
151 * align: bytes, number to align at.
152 * Returns the virtual address of the allocated area.
154 static unsigned long __srmmu_get_nocache(int size, int align)
158 if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
159 printk("Size 0x%x too small for nocache request\n", size);
160 size = SRMMU_NOCACHE_BITMAP_SHIFT;
162 if (size & (SRMMU_NOCACHE_BITMAP_SHIFT-1)) {
163 printk("Size 0x%x unaligned int nocache request\n", size);
164 size += SRMMU_NOCACHE_BITMAP_SHIFT-1;
166 BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX);
168 offset = bit_map_string_get(&srmmu_nocache_map,
169 size >> SRMMU_NOCACHE_BITMAP_SHIFT,
170 align >> SRMMU_NOCACHE_BITMAP_SHIFT);
172 printk("srmmu: out of nocache %d: %d/%d\n",
173 size, (int) srmmu_nocache_size,
174 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
178 return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT));
181 unsigned long srmmu_get_nocache(int size, int align)
185 tmp = __srmmu_get_nocache(size, align);
188 memset((void *)tmp, 0, size);
193 void srmmu_free_nocache(unsigned long vaddr, int size)
197 if (vaddr < SRMMU_NOCACHE_VADDR) {
198 printk("Vaddr %lx is smaller than nocache base 0x%lx\n",
199 vaddr, (unsigned long)SRMMU_NOCACHE_VADDR);
202 if (vaddr + size > srmmu_nocache_end) {
203 printk("Vaddr %lx is bigger than nocache end 0x%lx\n",
204 vaddr, srmmu_nocache_end);
207 if (!is_power_of_2(size)) {
208 printk("Size 0x%x is not a power of 2\n", size);
211 if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
212 printk("Size 0x%x is too small\n", size);
215 if (vaddr & (size - 1)) {
216 printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size);
220 offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT;
221 size = size >> SRMMU_NOCACHE_BITMAP_SHIFT;
223 bit_map_clear(&srmmu_nocache_map, offset, size);
226 static void srmmu_early_allocate_ptable_skeleton(unsigned long start,
229 extern unsigned long probe_memory(void); /* in fault.c */
232 * Reserve nocache dynamically proportionally to the amount of
233 * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002
235 static void __init srmmu_nocache_calcsize(void)
237 unsigned long sysmemavail = probe_memory() / 1024;
238 int srmmu_nocache_npages;
240 srmmu_nocache_npages =
241 sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256;
243 /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */
244 // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256;
245 if (srmmu_nocache_npages < SRMMU_MIN_NOCACHE_PAGES)
246 srmmu_nocache_npages = SRMMU_MIN_NOCACHE_PAGES;
248 /* anything above 1280 blows up */
249 if (srmmu_nocache_npages > SRMMU_MAX_NOCACHE_PAGES)
250 srmmu_nocache_npages = SRMMU_MAX_NOCACHE_PAGES;
252 srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE;
253 srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size;
256 static void __init srmmu_nocache_init(void)
258 unsigned int bitmap_bits;
262 unsigned long paddr, vaddr;
263 unsigned long pteval;
265 bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
267 srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size,
268 SRMMU_NOCACHE_ALIGN_MAX, 0UL);
269 memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
271 srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL);
272 bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
274 srmmu_swapper_pg_dir = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
275 memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE);
276 init_mm.pgd = srmmu_swapper_pg_dir;
278 srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end);
280 paddr = __pa((unsigned long)srmmu_nocache_pool);
281 vaddr = SRMMU_NOCACHE_VADDR;
283 while (vaddr < srmmu_nocache_end) {
284 pgd = pgd_offset_k(vaddr);
285 pmd = pmd_offset(__nocache_fix(pgd), vaddr);
286 pte = pte_offset_kernel(__nocache_fix(pmd), vaddr);
288 pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV);
290 if (srmmu_cache_pagetables)
291 pteval |= SRMMU_CACHE;
293 set_pte(__nocache_fix(pte), __pte(pteval));
303 pgd_t *get_pgd_fast(void)
307 pgd = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
309 pgd_t *init = pgd_offset_k(0);
310 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
311 memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
312 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
319 * Hardware needs alignment to 256 only, but we align to whole page size
320 * to reduce fragmentation problems due to the buddy principle.
321 * XXX Provide actual fragmentation statistics in /proc.
323 * Alignments up to the page size are the same for physical and virtual
324 * addresses of the nocache area.
326 pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
331 if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0)
333 page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT);
334 pgtable_page_ctor(page);
338 void pte_free(struct mm_struct *mm, pgtable_t pte)
342 pgtable_page_dtor(pte);
343 p = (unsigned long)page_address(pte); /* Cached address (for test) */
346 p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */
347 p = (unsigned long) __nocache_va(p); /* Nocached virtual */
348 srmmu_free_nocache(p, PTE_SIZE);
353 static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
355 struct ctx_list *ctxp;
357 ctxp = ctx_free.next;
358 if (ctxp != &ctx_free) {
359 remove_from_ctx_list(ctxp);
360 add_to_used_ctxlist(ctxp);
361 mm->context = ctxp->ctx_number;
365 ctxp = ctx_used.next;
366 if (ctxp->ctx_mm == old_mm)
368 if (ctxp == &ctx_used)
369 panic("out of mmu contexts");
370 flush_cache_mm(ctxp->ctx_mm);
371 flush_tlb_mm(ctxp->ctx_mm);
372 remove_from_ctx_list(ctxp);
373 add_to_used_ctxlist(ctxp);
374 ctxp->ctx_mm->context = NO_CONTEXT;
376 mm->context = ctxp->ctx_number;
379 static inline void free_context(int context)
381 struct ctx_list *ctx_old;
383 ctx_old = ctx_list_pool + context;
384 remove_from_ctx_list(ctx_old);
385 add_to_free_ctxlist(ctx_old);
389 void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
390 struct task_struct *tsk)
392 if (mm->context == NO_CONTEXT) {
393 spin_lock(&srmmu_context_spinlock);
394 alloc_context(old_mm, mm);
395 spin_unlock(&srmmu_context_spinlock);
396 srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
399 if (sparc_cpu_model == sparc_leon)
403 hyper_flush_whole_icache();
405 srmmu_set_context(mm->context);
408 /* Low level IO area allocation on the SRMMU. */
409 static inline void srmmu_mapioaddr(unsigned long physaddr,
410 unsigned long virt_addr, int bus_type)
417 physaddr &= PAGE_MASK;
418 pgdp = pgd_offset_k(virt_addr);
419 pmdp = pmd_offset(pgdp, virt_addr);
420 ptep = pte_offset_kernel(pmdp, virt_addr);
421 tmp = (physaddr >> 4) | SRMMU_ET_PTE;
423 /* I need to test whether this is consistent over all
424 * sun4m's. The bus_type represents the upper 4 bits of
425 * 36-bit physical address on the I/O space lines...
427 tmp |= (bus_type << 28);
429 __flush_page_to_ram(virt_addr);
430 set_pte(ptep, __pte(tmp));
433 void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
434 unsigned long xva, unsigned int len)
438 srmmu_mapioaddr(xpa, xva, bus);
445 static inline void srmmu_unmapioaddr(unsigned long virt_addr)
451 pgdp = pgd_offset_k(virt_addr);
452 pmdp = pmd_offset(pgdp, virt_addr);
453 ptep = pte_offset_kernel(pmdp, virt_addr);
455 /* No need to flush uncacheable page. */
459 void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
463 srmmu_unmapioaddr(virt_addr);
464 virt_addr += PAGE_SIZE;
470 extern void tsunami_flush_cache_all(void);
471 extern void tsunami_flush_cache_mm(struct mm_struct *mm);
472 extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
473 extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
474 extern void tsunami_flush_page_to_ram(unsigned long page);
475 extern void tsunami_flush_page_for_dma(unsigned long page);
476 extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
477 extern void tsunami_flush_tlb_all(void);
478 extern void tsunami_flush_tlb_mm(struct mm_struct *mm);
479 extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
480 extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
481 extern void tsunami_setup_blockops(void);
484 extern void swift_flush_cache_all(void);
485 extern void swift_flush_cache_mm(struct mm_struct *mm);
486 extern void swift_flush_cache_range(struct vm_area_struct *vma,
487 unsigned long start, unsigned long end);
488 extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
489 extern void swift_flush_page_to_ram(unsigned long page);
490 extern void swift_flush_page_for_dma(unsigned long page);
491 extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
492 extern void swift_flush_tlb_all(void);
493 extern void swift_flush_tlb_mm(struct mm_struct *mm);
494 extern void swift_flush_tlb_range(struct vm_area_struct *vma,
495 unsigned long start, unsigned long end);
496 extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
498 #if 0 /* P3: deadwood to debug precise flushes on Swift. */
499 void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
504 if ((ctx1 = vma->vm_mm->context) != -1) {
505 cctx = srmmu_get_context();
506 /* Is context # ever different from current context? P3 */
508 printk("flush ctx %02x curr %02x\n", ctx1, cctx);
509 srmmu_set_context(ctx1);
510 swift_flush_page(page);
511 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
512 "r" (page), "i" (ASI_M_FLUSH_PROBE));
513 srmmu_set_context(cctx);
515 /* Rm. prot. bits from virt. c. */
516 /* swift_flush_cache_all(); */
517 /* swift_flush_cache_page(vma, page); */
518 swift_flush_page(page);
520 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
521 "r" (page), "i" (ASI_M_FLUSH_PROBE));
522 /* same as above: srmmu_flush_tlb_page() */
529 * The following are all MBUS based SRMMU modules, and therefore could
530 * be found in a multiprocessor configuration. On the whole, these
531 * chips seems to be much more touchy about DVMA and page tables
532 * with respect to cache coherency.
536 extern void viking_flush_cache_all(void);
537 extern void viking_flush_cache_mm(struct mm_struct *mm);
538 extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
540 extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
541 extern void viking_flush_page_to_ram(unsigned long page);
542 extern void viking_flush_page_for_dma(unsigned long page);
543 extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr);
544 extern void viking_flush_page(unsigned long page);
545 extern void viking_mxcc_flush_page(unsigned long page);
546 extern void viking_flush_tlb_all(void);
547 extern void viking_flush_tlb_mm(struct mm_struct *mm);
548 extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
550 extern void viking_flush_tlb_page(struct vm_area_struct *vma,
552 extern void sun4dsmp_flush_tlb_all(void);
553 extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm);
554 extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
556 extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma,
560 extern void hypersparc_flush_cache_all(void);
561 extern void hypersparc_flush_cache_mm(struct mm_struct *mm);
562 extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
563 extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
564 extern void hypersparc_flush_page_to_ram(unsigned long page);
565 extern void hypersparc_flush_page_for_dma(unsigned long page);
566 extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
567 extern void hypersparc_flush_tlb_all(void);
568 extern void hypersparc_flush_tlb_mm(struct mm_struct *mm);
569 extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
570 extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
571 extern void hypersparc_setup_blockops(void);
574 * NOTE: All of this startup code assumes the low 16mb (approx.) of
575 * kernel mappings are done with one single contiguous chunk of
576 * ram. On small ram machines (classics mainly) we only get
577 * around 8mb mapped for us.
580 static void __init early_pgtable_allocfail(char *type)
582 prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
586 static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
593 while (start < end) {
594 pgdp = pgd_offset_k(start);
595 if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
596 pmdp = (pmd_t *) __srmmu_get_nocache(
597 SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
599 early_pgtable_allocfail("pmd");
600 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
601 pgd_set(__nocache_fix(pgdp), pmdp);
603 pmdp = pmd_offset(__nocache_fix(pgdp), start);
604 if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
605 ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
607 early_pgtable_allocfail("pte");
608 memset(__nocache_fix(ptep), 0, PTE_SIZE);
609 pmd_set(__nocache_fix(pmdp), ptep);
611 if (start > (0xffffffffUL - PMD_SIZE))
613 start = (start + PMD_SIZE) & PMD_MASK;
617 static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
624 while (start < end) {
625 pgdp = pgd_offset_k(start);
626 if (pgd_none(*pgdp)) {
627 pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
629 early_pgtable_allocfail("pmd");
630 memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
633 pmdp = pmd_offset(pgdp, start);
634 if (srmmu_pmd_none(*pmdp)) {
635 ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
638 early_pgtable_allocfail("pte");
639 memset(ptep, 0, PTE_SIZE);
642 if (start > (0xffffffffUL - PMD_SIZE))
644 start = (start + PMD_SIZE) & PMD_MASK;
648 /* These flush types are not available on all chips... */
649 static inline unsigned long srmmu_probe(unsigned long vaddr)
651 unsigned long retval;
653 if (sparc_cpu_model != sparc_leon) {
656 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
658 "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
660 retval = leon_swprobe(vaddr, 0);
666 * This is much cleaner than poking around physical address space
667 * looking at the prom's page table directly which is what most
668 * other OS's do. Yuck... this is much better.
670 static void __init srmmu_inherit_prom_mappings(unsigned long start,
676 int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
677 unsigned long prompte;
679 while (start <= end) {
681 break; /* probably wrap around */
682 if (start == 0xfef00000)
683 start = KADB_DEBUGGER_BEGVM;
684 if (!(prompte = srmmu_probe(start))) {
689 /* A red snapper, see what it really is. */
692 if (!(start & ~(SRMMU_REAL_PMD_MASK))) {
693 if (srmmu_probe((start - PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte)
697 if (!(start & ~(SRMMU_PGDIR_MASK))) {
698 if (srmmu_probe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
703 pgdp = pgd_offset_k(start);
705 *(pgd_t *)__nocache_fix(pgdp) = __pgd(prompte);
706 start += SRMMU_PGDIR_SIZE;
709 if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
710 pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
712 early_pgtable_allocfail("pmd");
713 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
714 pgd_set(__nocache_fix(pgdp), pmdp);
716 pmdp = pmd_offset(__nocache_fix(pgdp), start);
717 if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
718 ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
720 early_pgtable_allocfail("pte");
721 memset(__nocache_fix(ptep), 0, PTE_SIZE);
722 pmd_set(__nocache_fix(pmdp), ptep);
725 /* We bend the rule where all 16 PTPs in a pmd_t point
726 * inside the same PTE page, and we leak a perfectly
727 * good hardware PTE piece. Alternatives seem worse.
729 unsigned int x; /* Index of HW PMD in soft cluster */
730 x = (start >> PMD_SHIFT) & 15;
731 *(unsigned long *)__nocache_fix(&pmdp->pmdv[x]) = prompte;
732 start += SRMMU_REAL_PMD_SIZE;
735 ptep = pte_offset_kernel(__nocache_fix(pmdp), start);
736 *(pte_t *)__nocache_fix(ptep) = __pte(prompte);
741 #define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
743 /* Create a third-level SRMMU 16MB page mapping. */
744 static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base)
746 pgd_t *pgdp = pgd_offset_k(vaddr);
747 unsigned long big_pte;
749 big_pte = KERNEL_PTE(phys_base >> 4);
750 *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte);
753 /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
754 static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
756 unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK);
757 unsigned long vstart = (vbase & SRMMU_PGDIR_MASK);
758 unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
759 /* Map "low" memory only */
760 const unsigned long min_vaddr = PAGE_OFFSET;
761 const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM;
763 if (vstart < min_vaddr || vstart >= max_vaddr)
766 if (vend > max_vaddr || vend < min_vaddr)
769 while (vstart < vend) {
770 do_large_mapping(vstart, pstart);
771 vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE;
776 static void __init map_kernel(void)
781 do_large_mapping(PAGE_OFFSET, phys_base);
784 for (i = 0; sp_banks[i].num_bytes != 0; i++) {
785 map_spbank((unsigned long)__va(sp_banks[i].base_addr), i);
789 /* Paging initialization on the Sparc Reference MMU. */
790 extern void sparc_context_init(int);
792 void (*poke_srmmu)(void) __cpuinitdata = NULL;
794 extern unsigned long bootmem_init(unsigned long *pages_avail);
796 void __init srmmu_paging_init(void)
804 unsigned long pages_avail;
806 sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */
808 if (sparc_cpu_model == sun4d)
809 num_contexts = 65536; /* We know it is Viking */
811 /* Find the number of contexts on the srmmu. */
812 cpunode = prom_getchild(prom_root_node);
814 while (cpunode != 0) {
815 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
816 if (!strcmp(node_str, "cpu")) {
817 num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
820 cpunode = prom_getsibling(cpunode);
825 prom_printf("Something wrong, can't find cpu node in paging_init.\n");
830 last_valid_pfn = bootmem_init(&pages_avail);
832 srmmu_nocache_calcsize();
833 srmmu_nocache_init();
834 srmmu_inherit_prom_mappings(0xfe400000, (LINUX_OPPROM_ENDVM-PAGE_SIZE));
837 /* ctx table has to be physically aligned to its size */
838 srmmu_context_table = (ctxd_t *)__srmmu_get_nocache(num_contexts*sizeof(ctxd_t), num_contexts*sizeof(ctxd_t));
839 srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table);
841 for (i = 0; i < num_contexts; i++)
842 srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
845 srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
847 /* Stop from hanging here... */
848 local_ops->tlb_all();
854 srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END);
855 srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
857 srmmu_allocate_ptable_skeleton(
858 __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP);
859 srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END);
861 pgd = pgd_offset_k(PKMAP_BASE);
862 pmd = pmd_offset(pgd, PKMAP_BASE);
863 pte = pte_offset_kernel(pmd, PKMAP_BASE);
864 pkmap_page_table = pte;
869 sparc_context_init(num_contexts);
874 unsigned long zones_size[MAX_NR_ZONES];
875 unsigned long zholes_size[MAX_NR_ZONES];
876 unsigned long npages;
879 for (znum = 0; znum < MAX_NR_ZONES; znum++)
880 zones_size[znum] = zholes_size[znum] = 0;
882 npages = max_low_pfn - pfn_base;
884 zones_size[ZONE_DMA] = npages;
885 zholes_size[ZONE_DMA] = npages - pages_avail;
887 npages = highend_pfn - max_low_pfn;
888 zones_size[ZONE_HIGHMEM] = npages;
889 zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
891 free_area_init_node(0, zones_size, pfn_base, zholes_size);
895 void mmu_info(struct seq_file *m)
900 "nocache total\t: %ld\n"
901 "nocache used\t: %d\n",
905 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
908 void destroy_context(struct mm_struct *mm)
911 if (mm->context != NO_CONTEXT) {
913 srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
915 spin_lock(&srmmu_context_spinlock);
916 free_context(mm->context);
917 spin_unlock(&srmmu_context_spinlock);
918 mm->context = NO_CONTEXT;
922 /* Init various srmmu chip types. */
923 static void __init srmmu_is_bad(void)
925 prom_printf("Could not determine SRMMU chip type.\n");
929 static void __init init_vac_layout(void)
936 unsigned long max_size = 0;
937 unsigned long min_line_size = 0x10000000;
940 nd = prom_getchild(prom_root_node);
941 while ((nd = prom_getsibling(nd)) != 0) {
942 prom_getstring(nd, "device_type", node_str, sizeof(node_str));
943 if (!strcmp(node_str, "cpu")) {
944 vac_line_size = prom_getint(nd, "cache-line-size");
945 if (vac_line_size == -1) {
946 prom_printf("can't determine cache-line-size, halting.\n");
949 cache_lines = prom_getint(nd, "cache-nlines");
950 if (cache_lines == -1) {
951 prom_printf("can't determine cache-nlines, halting.\n");
955 vac_cache_size = cache_lines * vac_line_size;
957 if (vac_cache_size > max_size)
958 max_size = vac_cache_size;
959 if (vac_line_size < min_line_size)
960 min_line_size = vac_line_size;
961 //FIXME: cpus not contiguous!!
963 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
971 prom_printf("No CPU nodes found, halting.\n");
975 vac_cache_size = max_size;
976 vac_line_size = min_line_size;
978 printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n",
979 (int)vac_cache_size, (int)vac_line_size);
982 static void __cpuinit poke_hypersparc(void)
984 volatile unsigned long clear;
985 unsigned long mreg = srmmu_get_mmureg();
987 hyper_flush_unconditional_combined();
989 mreg &= ~(HYPERSPARC_CWENABLE);
990 mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE);
991 mreg |= (HYPERSPARC_CMODE);
993 srmmu_set_mmureg(mreg);
995 #if 0 /* XXX I think this is bad news... -DaveM */
996 hyper_clear_all_tags();
999 put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
1000 hyper_flush_whole_icache();
1001 clear = srmmu_get_faddr();
1002 clear = srmmu_get_fstatus();
1005 static const struct sparc32_cachetlb_ops hypersparc_ops = {
1006 .cache_all = hypersparc_flush_cache_all,
1007 .cache_mm = hypersparc_flush_cache_mm,
1008 .cache_page = hypersparc_flush_cache_page,
1009 .cache_range = hypersparc_flush_cache_range,
1010 .tlb_all = hypersparc_flush_tlb_all,
1011 .tlb_mm = hypersparc_flush_tlb_mm,
1012 .tlb_page = hypersparc_flush_tlb_page,
1013 .tlb_range = hypersparc_flush_tlb_range,
1014 .page_to_ram = hypersparc_flush_page_to_ram,
1015 .sig_insns = hypersparc_flush_sig_insns,
1016 .page_for_dma = hypersparc_flush_page_for_dma,
1019 static void __init init_hypersparc(void)
1021 srmmu_name = "ROSS HyperSparc";
1022 srmmu_modtype = HyperSparc;
1027 sparc32_cachetlb_ops = &hypersparc_ops;
1029 poke_srmmu = poke_hypersparc;
1031 hypersparc_setup_blockops();
1034 static void __cpuinit poke_swift(void)
1038 /* Clear any crap from the cache or else... */
1039 swift_flush_cache_all();
1041 /* Enable I & D caches */
1042 mreg = srmmu_get_mmureg();
1043 mreg |= (SWIFT_IE | SWIFT_DE);
1045 * The Swift branch folding logic is completely broken. At
1046 * trap time, if things are just right, if can mistakenly
1047 * think that a trap is coming from kernel mode when in fact
1048 * it is coming from user mode (it mis-executes the branch in
1049 * the trap code). So you see things like crashme completely
1050 * hosing your machine which is completely unacceptable. Turn
1051 * this shit off... nice job Fujitsu.
1053 mreg &= ~(SWIFT_BF);
1054 srmmu_set_mmureg(mreg);
1057 static const struct sparc32_cachetlb_ops swift_ops = {
1058 .cache_all = swift_flush_cache_all,
1059 .cache_mm = swift_flush_cache_mm,
1060 .cache_page = swift_flush_cache_page,
1061 .cache_range = swift_flush_cache_range,
1062 .tlb_all = swift_flush_tlb_all,
1063 .tlb_mm = swift_flush_tlb_mm,
1064 .tlb_page = swift_flush_tlb_page,
1065 .tlb_range = swift_flush_tlb_range,
1066 .page_to_ram = swift_flush_page_to_ram,
1067 .sig_insns = swift_flush_sig_insns,
1068 .page_for_dma = swift_flush_page_for_dma,
1071 #define SWIFT_MASKID_ADDR 0x10003018
1072 static void __init init_swift(void)
1074 unsigned long swift_rev;
1076 __asm__ __volatile__("lda [%1] %2, %0\n\t"
1077 "srl %0, 0x18, %0\n\t" :
1079 "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
1080 srmmu_name = "Fujitsu Swift";
1081 switch (swift_rev) {
1086 srmmu_modtype = Swift_lots_o_bugs;
1087 hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
1089 * Gee george, I wonder why Sun is so hush hush about
1090 * this hardware bug... really braindamage stuff going
1091 * on here. However I think we can find a way to avoid
1092 * all of the workaround overhead under Linux. Basically,
1093 * any page fault can cause kernel pages to become user
1094 * accessible (the mmu gets confused and clears some of
1095 * the ACC bits in kernel ptes). Aha, sounds pretty
1096 * horrible eh? But wait, after extensive testing it appears
1097 * that if you use pgd_t level large kernel pte's (like the
1098 * 4MB pages on the Pentium) the bug does not get tripped
1099 * at all. This avoids almost all of the major overhead.
1100 * Welcome to a world where your vendor tells you to,
1101 * "apply this kernel patch" instead of "sorry for the
1102 * broken hardware, send it back and we'll give you
1103 * properly functioning parts"
1108 srmmu_modtype = Swift_bad_c;
1109 hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
1111 * You see Sun allude to this hardware bug but never
1112 * admit things directly, they'll say things like,
1113 * "the Swift chip cache problems" or similar.
1117 srmmu_modtype = Swift_ok;
1121 sparc32_cachetlb_ops = &swift_ops;
1122 flush_page_for_dma_global = 0;
1125 * Are you now convinced that the Swift is one of the
1126 * biggest VLSI abortions of all time? Bravo Fujitsu!
1127 * Fujitsu, the !#?!%$'d up processor people. I bet if
1128 * you examined the microcode of the Swift you'd find
1129 * XXX's all over the place.
1131 poke_srmmu = poke_swift;
1134 static void turbosparc_flush_cache_all(void)
1136 flush_user_windows();
1137 turbosparc_idflash_clear();
1140 static void turbosparc_flush_cache_mm(struct mm_struct *mm)
1143 flush_user_windows();
1144 turbosparc_idflash_clear();
1148 static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1150 FLUSH_BEGIN(vma->vm_mm)
1151 flush_user_windows();
1152 turbosparc_idflash_clear();
1156 static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1158 FLUSH_BEGIN(vma->vm_mm)
1159 flush_user_windows();
1160 if (vma->vm_flags & VM_EXEC)
1161 turbosparc_flush_icache();
1162 turbosparc_flush_dcache();
1166 /* TurboSparc is copy-back, if we turn it on, but this does not work. */
1167 static void turbosparc_flush_page_to_ram(unsigned long page)
1169 #ifdef TURBOSPARC_WRITEBACK
1170 volatile unsigned long clear;
1172 if (srmmu_probe(page))
1173 turbosparc_flush_page_cache(page);
1174 clear = srmmu_get_fstatus();
1178 static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1182 static void turbosparc_flush_page_for_dma(unsigned long page)
1184 turbosparc_flush_dcache();
1187 static void turbosparc_flush_tlb_all(void)
1189 srmmu_flush_whole_tlb();
1192 static void turbosparc_flush_tlb_mm(struct mm_struct *mm)
1195 srmmu_flush_whole_tlb();
1199 static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1201 FLUSH_BEGIN(vma->vm_mm)
1202 srmmu_flush_whole_tlb();
1206 static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1208 FLUSH_BEGIN(vma->vm_mm)
1209 srmmu_flush_whole_tlb();
1214 static void __cpuinit poke_turbosparc(void)
1216 unsigned long mreg = srmmu_get_mmureg();
1217 unsigned long ccreg;
1219 /* Clear any crap from the cache or else... */
1220 turbosparc_flush_cache_all();
1221 /* Temporarily disable I & D caches */
1222 mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE);
1223 mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */
1224 srmmu_set_mmureg(mreg);
1226 ccreg = turbosparc_get_ccreg();
1228 #ifdef TURBOSPARC_WRITEBACK
1229 ccreg |= (TURBOSPARC_SNENABLE); /* Do DVMA snooping in Dcache */
1230 ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE);
1231 /* Write-back D-cache, emulate VLSI
1232 * abortion number three, not number one */
1234 /* For now let's play safe, optimize later */
1235 ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE);
1236 /* Do DVMA snooping in Dcache, Write-thru D-cache */
1237 ccreg &= ~(TURBOSPARC_uS2);
1238 /* Emulate VLSI abortion number three, not number one */
1241 switch (ccreg & 7) {
1242 case 0: /* No SE cache */
1243 case 7: /* Test mode */
1246 ccreg |= (TURBOSPARC_SCENABLE);
1248 turbosparc_set_ccreg(ccreg);
1250 mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */
1251 mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */
1252 srmmu_set_mmureg(mreg);
1255 static const struct sparc32_cachetlb_ops turbosparc_ops = {
1256 .cache_all = turbosparc_flush_cache_all,
1257 .cache_mm = turbosparc_flush_cache_mm,
1258 .cache_page = turbosparc_flush_cache_page,
1259 .cache_range = turbosparc_flush_cache_range,
1260 .tlb_all = turbosparc_flush_tlb_all,
1261 .tlb_mm = turbosparc_flush_tlb_mm,
1262 .tlb_page = turbosparc_flush_tlb_page,
1263 .tlb_range = turbosparc_flush_tlb_range,
1264 .page_to_ram = turbosparc_flush_page_to_ram,
1265 .sig_insns = turbosparc_flush_sig_insns,
1266 .page_for_dma = turbosparc_flush_page_for_dma,
1269 static void __init init_turbosparc(void)
1271 srmmu_name = "Fujitsu TurboSparc";
1272 srmmu_modtype = TurboSparc;
1273 sparc32_cachetlb_ops = &turbosparc_ops;
1274 poke_srmmu = poke_turbosparc;
1277 static void __cpuinit poke_tsunami(void)
1279 unsigned long mreg = srmmu_get_mmureg();
1281 tsunami_flush_icache();
1282 tsunami_flush_dcache();
1283 mreg &= ~TSUNAMI_ITD;
1284 mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
1285 srmmu_set_mmureg(mreg);
1288 static const struct sparc32_cachetlb_ops tsunami_ops = {
1289 .cache_all = tsunami_flush_cache_all,
1290 .cache_mm = tsunami_flush_cache_mm,
1291 .cache_page = tsunami_flush_cache_page,
1292 .cache_range = tsunami_flush_cache_range,
1293 .tlb_all = tsunami_flush_tlb_all,
1294 .tlb_mm = tsunami_flush_tlb_mm,
1295 .tlb_page = tsunami_flush_tlb_page,
1296 .tlb_range = tsunami_flush_tlb_range,
1297 .page_to_ram = tsunami_flush_page_to_ram,
1298 .sig_insns = tsunami_flush_sig_insns,
1299 .page_for_dma = tsunami_flush_page_for_dma,
1302 static void __init init_tsunami(void)
1305 * Tsunami's pretty sane, Sun and TI actually got it
1306 * somewhat right this time. Fujitsu should have
1307 * taken some lessons from them.
1310 srmmu_name = "TI Tsunami";
1311 srmmu_modtype = Tsunami;
1312 sparc32_cachetlb_ops = &tsunami_ops;
1313 poke_srmmu = poke_tsunami;
1315 tsunami_setup_blockops();
1318 static void __cpuinit poke_viking(void)
1320 unsigned long mreg = srmmu_get_mmureg();
1321 static int smp_catch;
1323 if (viking_mxcc_present) {
1324 unsigned long mxcc_control = mxcc_get_creg();
1326 mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
1327 mxcc_control &= ~(MXCC_CTL_RRC);
1328 mxcc_set_creg(mxcc_control);
1331 * We don't need memory parity checks.
1332 * XXX This is a mess, have to dig out later. ecd.
1333 viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
1336 /* We do cache ptables on MXCC. */
1337 mreg |= VIKING_TCENABLE;
1339 unsigned long bpreg;
1341 mreg &= ~(VIKING_TCENABLE);
1343 /* Must disable mixed-cmd mode here for other cpu's. */
1344 bpreg = viking_get_bpreg();
1345 bpreg &= ~(VIKING_ACTION_MIX);
1346 viking_set_bpreg(bpreg);
1348 /* Just in case PROM does something funny. */
1353 mreg |= VIKING_SPENABLE;
1354 mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
1355 mreg |= VIKING_SBENABLE;
1356 mreg &= ~(VIKING_ACENABLE);
1357 srmmu_set_mmureg(mreg);
1360 static struct sparc32_cachetlb_ops viking_ops = {
1361 .cache_all = viking_flush_cache_all,
1362 .cache_mm = viking_flush_cache_mm,
1363 .cache_page = viking_flush_cache_page,
1364 .cache_range = viking_flush_cache_range,
1365 .tlb_all = viking_flush_tlb_all,
1366 .tlb_mm = viking_flush_tlb_mm,
1367 .tlb_page = viking_flush_tlb_page,
1368 .tlb_range = viking_flush_tlb_range,
1369 .page_to_ram = viking_flush_page_to_ram,
1370 .sig_insns = viking_flush_sig_insns,
1371 .page_for_dma = viking_flush_page_for_dma,
1375 /* On sun4d the cpu broadcasts local TLB flushes, so we can just
1376 * perform the local TLB flush and all the other cpus will see it.
1377 * But, unfortunately, there is a bug in the sun4d XBUS backplane
1378 * that requires that we add some synchronization to these flushes.
1380 * The bug is that the fifo which keeps track of all the pending TLB
1381 * broadcasts in the system is an entry or two too small, so if we
1382 * have too many going at once we'll overflow that fifo and lose a TLB
1383 * flush resulting in corruption.
1385 * Our workaround is to take a global spinlock around the TLB flushes,
1386 * which guarentees we won't ever have too many pending. It's a big
1387 * hammer, but a semaphore like system to make sure we only have N TLB
1388 * flushes going at once will require SMP locking anyways so there's
1389 * no real value in trying any harder than this.
1391 static struct sparc32_cachetlb_ops viking_sun4d_smp_ops = {
1392 .cache_all = viking_flush_cache_all,
1393 .cache_mm = viking_flush_cache_mm,
1394 .cache_page = viking_flush_cache_page,
1395 .cache_range = viking_flush_cache_range,
1396 .tlb_all = sun4dsmp_flush_tlb_all,
1397 .tlb_mm = sun4dsmp_flush_tlb_mm,
1398 .tlb_page = sun4dsmp_flush_tlb_page,
1399 .tlb_range = sun4dsmp_flush_tlb_range,
1400 .page_to_ram = viking_flush_page_to_ram,
1401 .sig_insns = viking_flush_sig_insns,
1402 .page_for_dma = viking_flush_page_for_dma,
1406 static void __init init_viking(void)
1408 unsigned long mreg = srmmu_get_mmureg();
1410 /* Ahhh, the viking. SRMMU VLSI abortion number two... */
1411 if (mreg & VIKING_MMODE) {
1412 srmmu_name = "TI Viking";
1413 viking_mxcc_present = 0;
1417 * We need this to make sure old viking takes no hits
1418 * on it's cache for dma snoops to workaround the
1419 * "load from non-cacheable memory" interrupt bug.
1420 * This is only necessary because of the new way in
1421 * which we use the IOMMU.
1423 viking_ops.page_for_dma = viking_flush_page;
1425 viking_sun4d_smp_ops.page_for_dma = viking_flush_page;
1427 flush_page_for_dma_global = 0;
1429 srmmu_name = "TI Viking/MXCC";
1430 viking_mxcc_present = 1;
1431 srmmu_cache_pagetables = 1;
1434 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1437 if (sparc_cpu_model == sun4d)
1438 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1439 &viking_sun4d_smp_ops;
1442 poke_srmmu = poke_viking;
1445 /* Probe for the srmmu chip version. */
1446 static void __init get_srmmu_type(void)
1448 unsigned long mreg, psr;
1449 unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
1451 srmmu_modtype = SRMMU_INVAL_MOD;
1454 mreg = srmmu_get_mmureg(); psr = get_psr();
1455 mod_typ = (mreg & 0xf0000000) >> 28;
1456 mod_rev = (mreg & 0x0f000000) >> 24;
1457 psr_typ = (psr >> 28) & 0xf;
1458 psr_vers = (psr >> 24) & 0xf;
1460 /* First, check for sparc-leon. */
1461 if (sparc_cpu_model == sparc_leon) {
1466 /* Second, check for HyperSparc or Cypress. */
1470 /* UP or MP Hypersparc */
1482 prom_printf("Sparc-Linux Cypress support does not longer exit.\n");
1489 /* Now Fujitsu TurboSparc. It might happen that it is
1490 * in Swift emulation mode, so we will check later...
1492 if (psr_typ == 0 && psr_vers == 5) {
1497 /* Next check for Fujitsu Swift. */
1498 if (psr_typ == 0 && psr_vers == 4) {
1502 /* Look if it is not a TurboSparc emulating Swift... */
1503 cpunode = prom_getchild(prom_root_node);
1504 while ((cpunode = prom_getsibling(cpunode)) != 0) {
1505 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
1506 if (!strcmp(node_str, "cpu")) {
1507 if (!prom_getintdefault(cpunode, "psr-implementation", 1) &&
1508 prom_getintdefault(cpunode, "psr-version", 1) == 5) {
1520 /* Now the Viking family of srmmu. */
1523 ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
1528 /* Finally the Tsunami. */
1529 if (psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
1539 /* Local cross-calls. */
1540 static void smp_flush_page_for_dma(unsigned long page)
1542 xc1((smpfunc_t) local_ops->page_for_dma, page);
1543 local_ops->page_for_dma(page);
1546 static void smp_flush_cache_all(void)
1548 xc0((smpfunc_t) local_ops->cache_all);
1549 local_ops->cache_all();
1552 static void smp_flush_tlb_all(void)
1554 xc0((smpfunc_t) local_ops->tlb_all);
1555 local_ops->tlb_all();
1558 static void smp_flush_cache_mm(struct mm_struct *mm)
1560 if (mm->context != NO_CONTEXT) {
1562 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1563 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1564 if (!cpumask_empty(&cpu_mask))
1565 xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm);
1566 local_ops->cache_mm(mm);
1570 static void smp_flush_tlb_mm(struct mm_struct *mm)
1572 if (mm->context != NO_CONTEXT) {
1574 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1575 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1576 if (!cpumask_empty(&cpu_mask)) {
1577 xc1((smpfunc_t) local_ops->tlb_mm, (unsigned long) mm);
1578 if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
1579 cpumask_copy(mm_cpumask(mm),
1580 cpumask_of(smp_processor_id()));
1582 local_ops->tlb_mm(mm);
1586 static void smp_flush_cache_range(struct vm_area_struct *vma,
1587 unsigned long start,
1590 struct mm_struct *mm = vma->vm_mm;
1592 if (mm->context != NO_CONTEXT) {
1594 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1595 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1596 if (!cpumask_empty(&cpu_mask))
1597 xc3((smpfunc_t) local_ops->cache_range,
1598 (unsigned long) vma, start, end);
1599 local_ops->cache_range(vma, start, end);
1603 static void smp_flush_tlb_range(struct vm_area_struct *vma,
1604 unsigned long start,
1607 struct mm_struct *mm = vma->vm_mm;
1609 if (mm->context != NO_CONTEXT) {
1611 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1612 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1613 if (!cpumask_empty(&cpu_mask))
1614 xc3((smpfunc_t) local_ops->tlb_range,
1615 (unsigned long) vma, start, end);
1616 local_ops->tlb_range(vma, start, end);
1620 static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1622 struct mm_struct *mm = vma->vm_mm;
1624 if (mm->context != NO_CONTEXT) {
1626 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1627 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1628 if (!cpumask_empty(&cpu_mask))
1629 xc2((smpfunc_t) local_ops->cache_page,
1630 (unsigned long) vma, page);
1631 local_ops->cache_page(vma, page);
1635 static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1637 struct mm_struct *mm = vma->vm_mm;
1639 if (mm->context != NO_CONTEXT) {
1641 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1642 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1643 if (!cpumask_empty(&cpu_mask))
1644 xc2((smpfunc_t) local_ops->tlb_page,
1645 (unsigned long) vma, page);
1646 local_ops->tlb_page(vma, page);
1650 static void smp_flush_page_to_ram(unsigned long page)
1652 /* Current theory is that those who call this are the one's
1653 * who have just dirtied their cache with the pages contents
1654 * in kernel space, therefore we only run this on local cpu.
1656 * XXX This experiment failed, research further... -DaveM
1659 xc1((smpfunc_t) local_ops->page_to_ram, page);
1661 local_ops->page_to_ram(page);
1664 static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1667 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1668 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1669 if (!cpumask_empty(&cpu_mask))
1670 xc2((smpfunc_t) local_ops->sig_insns,
1671 (unsigned long) mm, insn_addr);
1672 local_ops->sig_insns(mm, insn_addr);
1675 static struct sparc32_cachetlb_ops smp_cachetlb_ops = {
1676 .cache_all = smp_flush_cache_all,
1677 .cache_mm = smp_flush_cache_mm,
1678 .cache_page = smp_flush_cache_page,
1679 .cache_range = smp_flush_cache_range,
1680 .tlb_all = smp_flush_tlb_all,
1681 .tlb_mm = smp_flush_tlb_mm,
1682 .tlb_page = smp_flush_tlb_page,
1683 .tlb_range = smp_flush_tlb_range,
1684 .page_to_ram = smp_flush_page_to_ram,
1685 .sig_insns = smp_flush_sig_insns,
1686 .page_for_dma = smp_flush_page_for_dma,
1690 /* Load up routines and constants for sun4m and sun4d mmu */
1691 void __init load_mmu(void)
1693 extern void ld_mmu_iommu(void);
1694 extern void ld_mmu_iounit(void);
1700 /* El switcheroo... */
1701 local_ops = sparc32_cachetlb_ops;
1703 if (sparc_cpu_model == sun4d || sparc_cpu_model == sparc_leon) {
1704 smp_cachetlb_ops.tlb_all = local_ops->tlb_all;
1705 smp_cachetlb_ops.tlb_mm = local_ops->tlb_mm;
1706 smp_cachetlb_ops.tlb_range = local_ops->tlb_range;
1707 smp_cachetlb_ops.tlb_page = local_ops->tlb_page;
1710 if (poke_srmmu == poke_viking) {
1711 /* Avoid unnecessary cross calls. */
1712 smp_cachetlb_ops.cache_all = local_ops->cache_all;
1713 smp_cachetlb_ops.cache_mm = local_ops->cache_mm;
1714 smp_cachetlb_ops.cache_range = local_ops->cache_range;
1715 smp_cachetlb_ops.cache_page = local_ops->cache_page;
1717 smp_cachetlb_ops.page_to_ram = local_ops->page_to_ram;
1718 smp_cachetlb_ops.sig_insns = local_ops->sig_insns;
1719 smp_cachetlb_ops.page_for_dma = local_ops->page_for_dma;
1722 /* It really is const after this point. */
1723 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1727 if (sparc_cpu_model == sun4d)
1732 if (sparc_cpu_model == sun4d)
1734 else if (sparc_cpu_model == sparc_leon)