case POWERPC_MMU_2_06a:
case POWERPC_MMU_2_07:
case POWERPC_MMU_2_07a:
- env->tlb_need_flush = 0;
#endif /* defined(TARGET_PPC64) */
+ env->tlb_need_flush = 0;
tlb_flush(CPU(cpu), 1);
break;
default:
void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
{
#if !defined(FLUSH_ALL_TLBS)
- PowerPCCPU *cpu = ppc_env_get_cpu(env);
- CPUState *cs;
-
addr &= TARGET_PAGE_MASK;
switch (env->mmu_model) {
case POWERPC_MMU_SOFT_6xx:
break;
case POWERPC_MMU_32B:
case POWERPC_MMU_601:
- /* tlbie invalidate TLBs for all segments */
- addr &= ~((target_ulong)-1ULL << 28);
- cs = CPU(cpu);
- /* XXX: this case should be optimized,
- * giving a mask to tlb_flush_page
- */
- /* This is broken, some CPUs invalidate a whole congruence
- * class on an even smaller subset of bits and some OSes take
- * advantage of this. Just blow the whole thing away.
+ /* Actual CPUs invalidate entire congruence classes based on the
+ * geometry of their TLBs and some OSes take that into account,
+ * we just mark the TLB to be flushed later (context synchronizing
+ * event or sync instruction on 32-bit).
*/
-#if 0
- tlb_flush_page(cs, addr | (0x0 << 28));
- tlb_flush_page(cs, addr | (0x1 << 28));
- tlb_flush_page(cs, addr | (0x2 << 28));
- tlb_flush_page(cs, addr | (0x3 << 28));
- tlb_flush_page(cs, addr | (0x4 << 28));
- tlb_flush_page(cs, addr | (0x5 << 28));
- tlb_flush_page(cs, addr | (0x6 << 28));
- tlb_flush_page(cs, addr | (0x7 << 28));
- tlb_flush_page(cs, addr | (0x8 << 28));
- tlb_flush_page(cs, addr | (0x9 << 28));
- tlb_flush_page(cs, addr | (0xA << 28));
- tlb_flush_page(cs, addr | (0xB << 28));
- tlb_flush_page(cs, addr | (0xC << 28));
- tlb_flush_page(cs, addr | (0xD << 28));
- tlb_flush_page(cs, addr | (0xE << 28));
- tlb_flush_page(cs, addr | (0xF << 28));
-#else
- tlb_flush(cs, 1);
-#endif
+ env->tlb_need_flush = 1;
break;
#if defined(TARGET_PPC64)
case POWERPC_MMU_64B:
void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value)
{
- PowerPCCPU *cpu = ppc_env_get_cpu(env);
-
qemu_log_mask(CPU_LOG_MMU,
"%s: reg=%d " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__,
(int)srnum, value, env->sr[srnum]);
#if defined(TARGET_PPC64)
if (env->mmu_model & POWERPC_MMU_64) {
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
uint64_t esid, vsid;
/* ESID = srnum */
}
}
#else
- tlb_flush(CPU(cpu), 1);
+ env->tlb_need_flush = 1;
#endif
}
}
uint32_t exception;
/* Routine used to access memory */
bool pr, hv;
+ bool lazy_tlb_flush;
int mem_idx;
int access_type;
/* Translation flags */
{
}
-#if !defined(CONFIG_USER_ONLY) && defined(TARGET_PPC64)
+#if !defined(CONFIG_USER_ONLY)
static inline void gen_check_tlb_flush(DisasContext *ctx)
{
- TCGv_i32 t = tcg_temp_new_i32();
- TCGLabel *l = gen_new_label();
+ TCGv_i32 t;
+ TCGLabel *l;
+ if (!ctx->lazy_tlb_flush) {
+ return;
+ }
+ l = gen_new_label();
+ t = tcg_temp_new_i32();
tcg_gen_ld_i32(t, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, l);
gen_helper_check_tlb_flush(cpu_env);
uint32_t l = (ctx->opcode >> 21) & 3;
/*
- * For l == 2, it's a ptesync, We need to check for a pending TLB flush.
- * This can only happen in kernel mode however so check MSR_PR as well.
+ * We may need to check for a pending TLB flush.
+ *
+ * We do this on ptesync (l == 2) on ppc64 and any sync pn ppc32.
+ *
+ * Additionally, this can only happen in kernel mode however so
+ * check MSR_PR as well.
*/
- if (l == 2 && !ctx->pr) {
+ if (((l == 2) || !(ctx->insns_flags & PPC_64B)) && !ctx->pr) {
gen_check_tlb_flush(ctx);
}
}
ctx.sf_mode = msr_is_64bit(env, env->msr);
ctx.has_cfar = !!(env->flags & POWERPC_FLAG_CFAR);
#endif
+ if (env->mmu_model == POWERPC_MMU_32B ||
+ env->mmu_model == POWERPC_MMU_601 ||
+ (env->mmu_model & POWERPC_MMU_64B))
+ ctx.lazy_tlb_flush = true;
+
ctx.fpu_enabled = msr_fp;
if ((env->flags & POWERPC_FLAG_SPE) && msr_spe)
ctx.spe_enabled = msr_spe;