From: KONRAD Frederic Date: Thu, 27 Oct 2016 15:10:06 +0000 (+0100) Subject: tcg: protect translation related stuff with tb_lock. X-Git-Tag: TizenStudio_2.0_p2.3.2~9^2~14^2~5^2~91^2~6 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=a5e998262fd76fd4b3e537db77bfb8a396bfae69;p=sdk%2Femulator%2Fqemu.git tcg: protect translation related stuff with tb_lock. This protects all translation related work with tb_lock() too ensure thread safety. This effectively serialises all code generation. In addition to the code generation we also take the lock for TB invalidation. This has a knock on effect of meaning tb_lock() is held for modification of the SoftMMU TLB by non-self threads which will be used in later patches. Signed-off-by: KONRAD Frederic Message-Id: <1439220437-23957-8-git-send-email-fred.konrad@greensocs.com> Signed-off-by: Emilio G. Cota Signed-off-by: Paolo Bonzini [AJB: moved into tree, clean-up history] Signed-off-by: Alex Bennée Reviewed-by: Richard Henderson Message-Id: <20161027151030.20863-10-alex.bennee@linaro.org> Signed-off-by: Paolo Bonzini --- diff --git a/cpu-exec.c b/cpu-exec.c index 4879c7d..e9b50a6 100644 --- a/cpu-exec.c +++ b/cpu-exec.c @@ -211,15 +211,21 @@ static void cpu_exec_nocache(CPUState *cpu, int max_cycles, if (max_cycles > CF_COUNT_MASK) max_cycles = CF_COUNT_MASK; + tb_lock(); tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags, max_cycles | CF_NOCACHE | (ignore_icount ? CF_IGNORE_ICOUNT : 0)); tb->orig_tb = orig_tb; + tb_unlock(); + /* execute the generated code */ trace_exec_tb_nocache(tb, tb->pc); cpu_tb_exec(cpu, tb); + + tb_lock(); tb_phys_invalidate(tb, -1); tb_free(tb); + tb_unlock(); } #endif diff --git a/exec.c b/exec.c index 4c84389..ab30629 100644 --- a/exec.c +++ b/exec.c @@ -2064,6 +2064,12 @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags) continue; } cpu->watchpoint_hit = wp; + + /* The tb_lock will be reset when cpu_loop_exit or + * cpu_loop_exit_noexc longjmp back into the cpu_exec + * main loop. + */ + tb_lock(); tb_check_watchpoint(cpu); if (wp->flags & BP_STOP_BEFORE_ACCESS) { cpu->exception_index = EXCP_DEBUG; diff --git a/hw/i386/kvmvapic.c b/hw/i386/kvmvapic.c index 74a549b..4448253 100644 --- a/hw/i386/kvmvapic.c +++ b/hw/i386/kvmvapic.c @@ -17,6 +17,7 @@ #include "sysemu/kvm.h" #include "hw/i386/apic_internal.h" #include "hw/sysbus.h" +#include "tcg/tcg.h" #define VAPIC_IO_PORT 0x7e @@ -449,6 +450,9 @@ static void patch_instruction(VAPICROMState *s, X86CPU *cpu, target_ulong ip) resume_all_vcpus(); if (!kvm_enabled()) { + /* tb_lock will be reset when cpu_loop_exit_noexc longjmps + * back into the cpu_exec loop. */ + tb_lock(); tb_gen_code(cs, current_pc, current_cs_base, current_flags, 1); cpu_loop_exit_noexc(cs); } diff --git a/translate-all.c b/translate-all.c index 3ff43ec..874f415 100644 --- a/translate-all.c +++ b/translate-all.c @@ -363,7 +363,9 @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr) { TranslationBlock *tb; + bool r = false; + tb_lock(); tb = tb_find_pc(retaddr); if (tb) { cpu_restore_state_from_tb(cpu, tb, retaddr); @@ -372,9 +374,11 @@ bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr) tb_phys_invalidate(tb, -1); tb_free(tb); } - return true; + r = true; } - return false; + tb_unlock(); + + return r; } void page_size_init(void) @@ -1456,6 +1460,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, /* we remove all the TBs in the range [start, end[ */ /* XXX: see if in some cases it could be faster to invalidate all the code */ + tb_lock(); tb = p->first_tb; while (tb != NULL) { n = (uintptr_t)tb & 3; @@ -1515,6 +1520,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, cpu_loop_exit_noexc(cpu); } #endif + tb_unlock(); } #ifdef CONFIG_SOFTMMU @@ -1584,6 +1590,8 @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc) if (!p) { return false; } + + tb_lock(); tb = p->first_tb; #ifdef TARGET_HAS_PRECISE_SMC if (tb && pc != 0) { @@ -1621,9 +1629,13 @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc) modifying the memory. It will ensure that it cannot modify itself */ tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1); + /* tb_lock will be reset after cpu_loop_exit_noexc longjmps + * back into the cpu_exec loop. */ return true; } #endif + tb_unlock(); + return false; } #endif @@ -1718,6 +1730,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) target_ulong pc, cs_base; uint32_t flags; + tb_lock(); tb = tb_find_pc(retaddr); if (!tb) { cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p", @@ -1769,11 +1782,16 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) /* FIXME: In theory this could raise an exception. In practice we have already translated the block once so it's probably ok. */ tb_gen_code(cpu, pc, cs_base, flags, cflags); + /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not - the first in the TB) then we end up generating a whole new TB and - repeating the fault, which is horribly inefficient. - Better would be to execute just this insn uncached, or generate a - second new TB. */ + * the first in the TB) then we end up generating a whole new TB and + * repeating the fault, which is horribly inefficient. + * Better would be to execute just this insn uncached, or generate a + * second new TB. + * + * cpu_loop_exit_noexc will longjmp back to cpu_exec where the + * tb_lock gets reset. + */ cpu_loop_exit_noexc(cpu); } @@ -1837,6 +1855,8 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf) TranslationBlock *tb; struct qht_stats hst; + tb_lock(); + target_code_size = 0; max_target_code_size = 0; cross_page = 0; @@ -1898,6 +1918,8 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf) tcg_ctx.tb_ctx.tb_phys_invalidate_count); cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count); tcg_dump_info(f, cpu_fprintf); + + tb_unlock(); } void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)