Merge branch 'mips-next' of http://dev.phrozen.org/githttp/mips-next into mips-for...
[platform/adaptation/renesas_rcar/renesas_kernel.git] / arch / mips / mm / tlbex.c
index 818c525..0561335 100644 (file)
@@ -158,7 +158,7 @@ enum label_id {
        label_smp_pgtable_change,
        label_r3000_write_probe_fail,
        label_large_segbits_fault,
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
        label_tlb_huge_update,
 #endif
 };
@@ -177,7 +177,7 @@ UASM_L_LA(_nopage_tlbm)
 UASM_L_LA(_smp_pgtable_change)
 UASM_L_LA(_r3000_write_probe_fail)
 UASM_L_LA(_large_segbits_fault)
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 UASM_L_LA(_tlb_huge_update)
 #endif
 
@@ -210,19 +210,59 @@ static void __cpuinit uasm_bgezl_label(struct uasm_label **l,
 }
 
 /*
- * For debug purposes.
+ * pgtable bits are assigned dynamically depending on processor feature
+ * and statically based on kernel configuration.  This spits out the actual
+ * values the kernel is using.  Required to make sense from disassembled
+ * TLB exception handlers.
  */
-static inline void dump_handler(const u32 *handler, int count)
+static void output_pgtable_bits_defines(void)
+{
+#define pr_define(fmt, ...)                                    \
+       pr_debug("#define " fmt, ##__VA_ARGS__)
+
+       pr_debug("#include <asm/asm.h>\n");
+       pr_debug("#include <asm/regdef.h>\n");
+       pr_debug("\n");
+
+       pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT);
+       pr_define("_PAGE_READ_SHIFT %d\n", _PAGE_READ_SHIFT);
+       pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT);
+       pr_define("_PAGE_ACCESSED_SHIFT %d\n", _PAGE_ACCESSED_SHIFT);
+       pr_define("_PAGE_MODIFIED_SHIFT %d\n", _PAGE_MODIFIED_SHIFT);
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+       pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
+       pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT);
+#endif
+       if (cpu_has_rixi) {
+#ifdef _PAGE_NO_EXEC_SHIFT
+               pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
+#endif
+#ifdef _PAGE_NO_READ_SHIFT
+               pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
+#endif
+       }
+       pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
+       pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
+       pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
+       pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT);
+       pr_debug("\n");
+}
+
+static inline void dump_handler(const char *symbol, const u32 *handler, int count)
 {
        int i;
 
+       pr_debug("LEAF(%s)\n", symbol);
+
        pr_debug("\t.set push\n");
        pr_debug("\t.set noreorder\n");
 
        for (i = 0; i < count; i++)
-               pr_debug("\t%p\t.word 0x%08x\n", &handler[i], handler[i]);
+               pr_debug("\t.word\t0x%08x\t\t# %p\n", handler[i], &handler[i]);
+
+       pr_debug("\t.set\tpop\n");
 
-       pr_debug("\t.set pop\n");
+       pr_debug("\tEND(%s)\n", symbol);
 }
 
 /* The only general purpose registers allowed in TLB handlers. */
@@ -405,7 +445,7 @@ static void __cpuinit build_r3000_tlb_refill_handler(void)
 
        memcpy((void *)ebase, tlb_handler, 0x80);
 
-       dump_handler((u32 *)ebase, 32);
+       dump_handler("r3000_tlb_refill", (u32 *)ebase, 32);
 }
 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
 
@@ -447,7 +487,6 @@ static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
        case CPU_R4600:
        case CPU_R4700:
        case CPU_R5000:
-       case CPU_R5000A:
        case CPU_NEVADA:
                uasm_i_nop(p);
                uasm_i_tlbp(p);
@@ -521,7 +560,6 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
                break;
 
        case CPU_R5000:
-       case CPU_R5000A:
        case CPU_NEVADA:
                uasm_i_nop(p); /* QED specifies 2 nops hazard */
                uasm_i_nop(p); /* QED specifies 2 nops hazard */
@@ -569,24 +607,6 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
                tlbw(p);
                break;
 
-       case CPU_RM9000:
-               /*
-                * When the JTLB is updated by tlbwi or tlbwr, a subsequent
-                * use of the JTLB for instructions should not occur for 4
-                * cpu cycles and use for data translations should not occur
-                * for 3 cpu cycles.
-                */
-               uasm_i_ssnop(p);
-               uasm_i_ssnop(p);
-               uasm_i_ssnop(p);
-               uasm_i_ssnop(p);
-               tlbw(p);
-               uasm_i_ssnop(p);
-               uasm_i_ssnop(p);
-               uasm_i_ssnop(p);
-               uasm_i_ssnop(p);
-               break;
-
        case CPU_VR4111:
        case CPU_VR4121:
        case CPU_VR4122:
@@ -633,7 +653,7 @@ static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
        }
 }
 
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 
 static __cpuinit void build_restore_pagemask(u32 **p,
                                             struct uasm_reloc **r,
@@ -759,7 +779,7 @@ static __cpuinit void build_huge_handler_tail(u32 **p,
        build_huge_update_entries(p, pte, ptr);
        build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
 }
-#endif /* CONFIG_HUGETLB_PAGE */
+#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
 
 #ifdef CONFIG_64BIT
 /*
@@ -1204,7 +1224,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
        /* Adjust the context during the load latency. */
        build_adjust_context(p, tmp);
 
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
        uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
        /*
         * The in the LWX case we don't want to do the load in the
@@ -1213,7 +1233,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
         */
        if (use_lwx_insns())
                uasm_i_nop(p);
-#endif /* CONFIG_HUGETLB_PAGE */
+#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
 
 
        /* build_update_entries */
@@ -1316,7 +1336,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
                build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
 #endif
 
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
                build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
 #endif
 
@@ -1326,7 +1346,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
                uasm_l_leave(&l, p);
                uasm_i_eret(&p); /* return from trap */
        }
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
        uasm_l_tlb_huge_update(&l, p);
        build_huge_update_entries(&p, htlb_info.huge_pte, K1);
        build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
@@ -1371,7 +1391,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
                uasm_copy_handler(relocs, labels, tlb_handler, p, f);
                final_len = p - tlb_handler;
        } else {
-#if defined(CONFIG_HUGETLB_PAGE)
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
                const enum label_id ls = label_tlb_huge_update;
 #else
                const enum label_id ls = label_vmalloc;
@@ -1440,7 +1460,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
 
        memcpy((void *)ebase, final_handler, 0x100);
 
-       dump_handler((u32 *)ebase, 64);
+       dump_handler("r4000_tlb_refill", (u32 *)ebase, 64);
 }
 
 /*
@@ -1497,7 +1517,8 @@ static void __cpuinit build_r4000_setup_pgd(void)
        pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
                 (unsigned int)(p - tlbmiss_handler_setup_pgd));
 
-       dump_handler(tlbmiss_handler_setup_pgd,
+       dump_handler("tlbmiss_handler",
+                    tlbmiss_handler_setup_pgd,
                     ARRAY_SIZE(tlbmiss_handler_setup_pgd));
 }
 #endif
@@ -1767,7 +1788,7 @@ static void __cpuinit build_r3000_tlb_load_handler(void)
        pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
                 (unsigned int)(p - handle_tlbl));
 
-       dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
+       dump_handler("r3000_tlb_load", handle_tlbl, ARRAY_SIZE(handle_tlbl));
 }
 
 static void __cpuinit build_r3000_tlb_store_handler(void)
@@ -1797,7 +1818,7 @@ static void __cpuinit build_r3000_tlb_store_handler(void)
        pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
                 (unsigned int)(p - handle_tlbs));
 
-       dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
+       dump_handler("r3000_tlb_store", handle_tlbs, ARRAY_SIZE(handle_tlbs));
 }
 
 static void __cpuinit build_r3000_tlb_modify_handler(void)
@@ -1827,7 +1848,7 @@ static void __cpuinit build_r3000_tlb_modify_handler(void)
        pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
                 (unsigned int)(p - handle_tlbm));
 
-       dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
+       dump_handler("r3000_tlb_modify", handle_tlbm, ARRAY_SIZE(handle_tlbm));
 }
 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
 
@@ -1846,7 +1867,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
        build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */
 #endif
 
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
        /*
         * For huge tlb entries, pmd doesn't contain an address but
         * instead contains the tlb pte. Check the PAGE_HUGE bit and
@@ -1962,7 +1983,7 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
        build_make_valid(&p, &r, wr.r1, wr.r2);
        build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
 
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
        /*
         * This is the entry point when build_r4000_tlbchange_handler_head
         * spots a huge page.
@@ -2034,7 +2055,7 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
        pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
                 (unsigned int)(p - handle_tlbl));
 
-       dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
+       dump_handler("r4000_tlb_load", handle_tlbl, ARRAY_SIZE(handle_tlbl));
 }
 
 static void __cpuinit build_r4000_tlb_store_handler(void)
@@ -2055,7 +2076,7 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
        build_make_write(&p, &r, wr.r1, wr.r2);
        build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
 
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
        /*
         * This is the entry point when
         * build_r4000_tlbchange_handler_head spots a huge page.
@@ -2081,7 +2102,7 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
        pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
                 (unsigned int)(p - handle_tlbs));
 
-       dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
+       dump_handler("r4000_tlb_store", handle_tlbs, ARRAY_SIZE(handle_tlbs));
 }
 
 static void __cpuinit build_r4000_tlb_modify_handler(void)
@@ -2103,7 +2124,7 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
        build_make_write(&p, &r, wr.r1, wr.r2);
        build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
 
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
        /*
         * This is the entry point when
         * build_r4000_tlbchange_handler_head spots a huge page.
@@ -2129,7 +2150,7 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
        pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
                 (unsigned int)(p - handle_tlbm));
 
-       dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
+       dump_handler("r4000_tlb_modify", handle_tlbm, ARRAY_SIZE(handle_tlbm));
 }
 
 void __cpuinit build_tlb_refill_handler(void)
@@ -2141,6 +2162,8 @@ void __cpuinit build_tlb_refill_handler(void)
         */
        static int run_once = 0;
 
+       output_pgtable_bits_defines();
+
 #ifdef CONFIG_64BIT
        check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
 #endif