2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "disas/disas.h"
24 #include "qemu/atomic.h"
25 #include "sysemu/qtest.h"
26 #include "qemu/timer.h"
27 #include "exec/address-spaces.h"
28 #include "exec/memory-internal.h"
30 #include "exec/tb-hash.h"
31 #include "sysemu/hax.h"
33 /* -icount align implementation. */
35 typedef struct SyncClocks {
37 int64_t last_cpu_icount;
38 int64_t realtime_clock;
41 #if !defined(CONFIG_USER_ONLY)
42 /* Allow the guest to have a max 3ms advance.
43 * The difference between the 2 clocks could therefore
46 #define VM_CLOCK_ADVANCE 3000000
47 #define THRESHOLD_REDUCE 1.5
48 #define MAX_DELAY_PRINT_RATE 2000000000LL
49 #define MAX_NB_PRINTS 100
51 static void align_clocks(SyncClocks *sc, const CPUState *cpu)
55 if (!icount_align_option) {
59 cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
60 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
61 sc->last_cpu_icount = cpu_icount;
63 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
65 struct timespec sleep_delay, rem_delay;
66 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
67 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
68 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
69 sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
74 Sleep(sc->diff_clk / SCALE_MS);
80 static void print_delay(const SyncClocks *sc)
82 static float threshold_delay;
83 static int64_t last_realtime_clock;
86 if (icount_align_option &&
87 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
88 nb_prints < MAX_NB_PRINTS) {
89 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
90 (-sc->diff_clk / (float)1000000000LL <
91 (threshold_delay - THRESHOLD_REDUCE))) {
92 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
93 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
97 last_realtime_clock = sc->realtime_clock;
102 static void init_delay_params(SyncClocks *sc,
105 if (!icount_align_option) {
108 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
109 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
110 sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
111 if (sc->diff_clk < max_delay) {
112 max_delay = sc->diff_clk;
114 if (sc->diff_clk > max_advance) {
115 max_advance = sc->diff_clk;
118 /* Print every 2s max if the guest is late. We limit the number
119 of printed messages to NB_PRINT_MAX(currently 100) */
123 static void align_clocks(SyncClocks *sc, const CPUState *cpu)
127 static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
130 #endif /* CONFIG USER ONLY */
132 void cpu_loop_exit(CPUState *cpu)
134 cpu->current_tb = NULL;
135 siglongjmp(cpu->jmp_env, 1);
138 /* exit the current TB from a signal handler. The host registers are
139 restored in a state compatible with the CPU emulator
141 #if defined(CONFIG_SOFTMMU)
142 void cpu_resume_from_signal(CPUState *cpu, void *puc)
144 /* XXX: restore cpu registers saved in host registers */
146 cpu->exception_index = -1;
147 siglongjmp(cpu->jmp_env, 1);
150 void cpu_reload_memory_map(CPUState *cpu)
152 AddressSpaceDispatch *d;
154 if (qemu_in_vcpu_thread()) {
155 /* Do not let the guest prolong the critical section as much as it
158 * Currently, this is prevented by the I/O thread's periodinc kicking
159 * of the VCPU thread (iothread_requesting_mutex, qemu_cpu_kick_thread)
160 * but this will go away once TCG's execution moves out of the global
163 * This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which
164 * only protects cpu->as->dispatch. Since we reload it below, we can
165 * split the critical section.
171 /* The CPU and TLB are protected by the iothread lock. */
172 d = atomic_rcu_read(&cpu->as->dispatch);
173 cpu->memory_dispatch = d;
178 /* Execute a TB, and fix up the CPU state afterwards if necessary */
179 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
181 CPUArchState *env = cpu->env_ptr;
184 #if defined(DEBUG_DISAS)
185 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
186 #if defined(TARGET_I386)
187 log_cpu_state(cpu, CPU_DUMP_CCOP);
188 #elif defined(TARGET_M68K)
189 /* ??? Should not modify env state for dumping. */
190 cpu_m68k_flush_flags(env, env->cc_op);
191 env->cc_op = CC_OP_FLAGS;
192 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
193 log_cpu_state(cpu, 0);
195 log_cpu_state(cpu, 0);
198 #endif /* DEBUG_DISAS */
201 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
203 trace_exec_tb_exit((void *) (next_tb & ~TB_EXIT_MASK),
204 next_tb & TB_EXIT_MASK);
206 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
207 /* We didn't start executing this TB (eg because the instruction
208 * counter hit zero); we must restore the guest PC to the address
209 * of the start of the TB.
211 CPUClass *cc = CPU_GET_CLASS(cpu);
212 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
213 if (cc->synchronize_from_tb) {
214 cc->synchronize_from_tb(cpu, tb);
217 cc->set_pc(cpu, tb->pc);
220 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
221 /* We were asked to stop executing TBs (probably a pending
222 * interrupt. We've now stopped, so clear the flag.
224 cpu->tcg_exit_req = 0;
229 /* Execute the code without caching the generated code. An interpreter
230 could be used if available. */
231 static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
232 TranslationBlock *orig_tb)
234 TranslationBlock *tb;
235 target_ulong pc = orig_tb->pc;
236 target_ulong cs_base = orig_tb->cs_base;
237 uint64_t flags = orig_tb->flags;
239 /* Should never happen.
240 We only end up here when an existing TB is too long. */
241 if (max_cycles > CF_COUNT_MASK)
242 max_cycles = CF_COUNT_MASK;
244 /* tb_gen_code can flush our orig_tb, invalidate it now */
245 tb_phys_invalidate(orig_tb, -1);
246 tb = tb_gen_code(cpu, pc, cs_base, flags,
247 max_cycles | CF_NOCACHE);
248 cpu->current_tb = tb;
249 /* execute the generated code */
250 trace_exec_tb_nocache(tb, tb->pc);
251 cpu_tb_exec(cpu, tb->tc_ptr);
252 cpu->current_tb = NULL;
253 tb_phys_invalidate(tb, -1);
257 static TranslationBlock *tb_find_slow(CPUState *cpu,
259 target_ulong cs_base,
262 CPUArchState *env = (CPUArchState *)cpu->env_ptr;
263 TranslationBlock *tb, **ptb1;
265 tb_page_addr_t phys_pc, phys_page1;
266 target_ulong virt_page2;
268 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
270 /* find translated block using physical mappings */
271 phys_pc = get_page_addr_code(env, pc);
272 phys_page1 = phys_pc & TARGET_PAGE_MASK;
273 h = tb_phys_hash_func(phys_pc);
274 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
280 tb->page_addr[0] == phys_page1 &&
281 tb->cs_base == cs_base &&
282 tb->flags == flags) {
283 /* check next page if needed */
284 if (tb->page_addr[1] != -1) {
285 tb_page_addr_t phys_page2;
287 virt_page2 = (pc & TARGET_PAGE_MASK) +
289 phys_page2 = get_page_addr_code(env, virt_page2);
290 if (tb->page_addr[1] == phys_page2)
296 ptb1 = &tb->phys_hash_next;
299 /* if no translated code available, then translate it now */
300 tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
303 /* Move the last found TB to the head of the list */
305 *ptb1 = tb->phys_hash_next;
306 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
307 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
309 /* we add the TB in the virtual pc hash table */
310 cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
314 static inline TranslationBlock *tb_find_fast(CPUState *cpu)
316 CPUArchState *env = (CPUArchState *)cpu->env_ptr;
317 TranslationBlock *tb;
318 target_ulong cs_base, pc;
321 /* we record a subset of the CPU state. It will
322 always be the same before a given translated block
324 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
325 tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
326 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
327 tb->flags != flags)) {
328 tb = tb_find_slow(cpu, pc, cs_base, flags);
333 static void cpu_handle_debug_exception(CPUState *cpu)
335 CPUClass *cc = CPU_GET_CLASS(cpu);
338 if (!cpu->watchpoint_hit) {
339 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
340 wp->flags &= ~BP_WATCHPOINT_HIT;
344 cc->debug_excp_handler(cpu);
347 /* main execution loop */
349 volatile sig_atomic_t exit_request;
351 int cpu_exec(CPUState *cpu)
353 CPUClass *cc = CPU_GET_CLASS(cpu);
355 X86CPU *x86_cpu = X86_CPU(cpu);
356 CPUArchState *env = &x86_cpu->env;
358 int ret, interrupt_request;
359 TranslationBlock *tb;
364 /* This must be volatile so it is not trashed by longjmp() */
365 volatile bool have_tb_lock = false;
368 if (!cpu_has_work(cpu)) {
377 /* As long as current_cpu is null, up to the assignment just above,
378 * requests by other threads to exit the execution loop are expected to
379 * be issued using the exit_request global. We must make sure that our
380 * evaluation of the global value is performed past the current_cpu
381 * value transition point, which requires a memory barrier as well as
382 * an instruction scheduling constraint on modern architectures. */
387 if (unlikely(exit_request)) {
388 cpu->exit_request = 1;
391 cc->cpu_exec_enter(cpu);
393 /* Calculate difference between guest clock and host clock.
394 * This delay includes the delay of the last cycle, so
395 * what we have to do is sleep until it is 0. As for the
396 * advance/delay we gain here, we try to fix it next time.
398 init_delay_params(&sc, cpu);
400 /* prepare setjmp context for exception handling */
402 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
403 /* if an exception is pending, we execute it here */
404 if (cpu->exception_index >= 0) {
405 if (cpu->exception_index >= EXCP_INTERRUPT) {
406 /* exit request from the cpu execution loop */
407 ret = cpu->exception_index;
408 if (ret == EXCP_DEBUG) {
409 cpu_handle_debug_exception(cpu);
411 cpu->exception_index = -1;
414 #if defined(CONFIG_USER_ONLY)
415 /* if user mode only, we simulate a fake exception
416 which will be handled outside the cpu execution
418 #if defined(TARGET_I386)
419 cc->do_interrupt(cpu);
421 ret = cpu->exception_index;
422 cpu->exception_index = -1;
425 cc->do_interrupt(cpu);
426 cpu->exception_index = -1;
432 if (hax_enabled() && !hax_vcpu_exec(cpu))
433 longjmp(cpu->jmp_env, 1);
436 next_tb = 0; /* force lookup of first TB */
438 interrupt_request = cpu->interrupt_request;
439 if (unlikely(interrupt_request)) {
440 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
441 /* Mask out external interrupts for this step. */
442 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
444 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
445 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
446 cpu->exception_index = EXCP_DEBUG;
449 if (interrupt_request & CPU_INTERRUPT_HALT) {
450 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
452 cpu->exception_index = EXCP_HLT;
455 #if defined(TARGET_I386)
456 if (interrupt_request & CPU_INTERRUPT_INIT) {
457 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
458 do_cpu_init(x86_cpu);
459 cpu->exception_index = EXCP_HALTED;
463 if (interrupt_request & CPU_INTERRUPT_RESET) {
467 /* The target hook has 3 exit conditions:
468 False when the interrupt isn't processed,
469 True when it is, and we should restart on a new TB,
470 and via longjmp via cpu_loop_exit. */
471 if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
474 /* Don't use the cached interrupt_request value,
475 do_interrupt may have updated the EXITTB flag. */
476 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
477 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
478 /* ensure that no TB jump will be modified as
479 the program flow was changed */
483 if (unlikely(cpu->exit_request)) {
484 cpu->exit_request = 0;
485 cpu->exception_index = EXCP_INTERRUPT;
488 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
490 tb = tb_find_fast(cpu);
491 /* Note: we do it here to avoid a gcc bug on Mac OS X when
492 doing it in tb_find_slow */
493 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
494 /* as some TB could have been invalidated because
495 of memory exceptions while generating the code, we
496 must recompute the hash index here */
498 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
500 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
501 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
502 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
504 /* see if we can patch the calling TB. When the TB
505 spans two pages, we cannot safely do a direct
507 if (next_tb != 0 && tb->page_addr[1] == -1) {
508 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
509 next_tb & TB_EXIT_MASK, tb);
511 have_tb_lock = false;
512 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
514 /* cpu_interrupt might be called while translating the
515 TB, but before it is linked into a potentially
516 infinite loop and becomes env->current_tb. Avoid
517 starting execution if there is a pending interrupt. */
518 cpu->current_tb = tb;
520 if (likely(!cpu->exit_request)) {
521 trace_exec_tb(tb, tb->pc);
523 /* execute the generated code */
524 next_tb = cpu_tb_exec(cpu, tc_ptr);
525 switch (next_tb & TB_EXIT_MASK) {
526 case TB_EXIT_REQUESTED:
527 /* Something asked us to stop executing
528 * chained TBs; just continue round the main
529 * loop. Whatever requested the exit will also
530 * have set something else (eg exit_request or
531 * interrupt_request) which we will handle
532 * next time around the loop.
536 case TB_EXIT_ICOUNT_EXPIRED:
538 /* Instruction counter expired. */
539 int insns_left = cpu->icount_decr.u32;
540 if (cpu->icount_extra && insns_left >= 0) {
541 /* Refill decrementer and continue execution. */
542 cpu->icount_extra += insns_left;
543 insns_left = MIN(0xffff, cpu->icount_extra);
544 cpu->icount_extra -= insns_left;
545 cpu->icount_decr.u16.low = insns_left;
547 if (insns_left > 0) {
548 /* Execute remaining instructions. */
549 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
550 cpu_exec_nocache(cpu, insns_left, tb);
551 align_clocks(&sc, cpu);
553 cpu->exception_index = EXCP_INTERRUPT;
563 cpu->current_tb = NULL;
565 if (hax_enabled() && hax_stop_emulation(cpu))
568 /* Try to align the host and virtual clocks
569 if the guest is in advance */
570 align_clocks(&sc, cpu);
571 /* reset soft MMU for next block (it can currently
572 only be set by a memory fault) */
575 /* Reload env after longjmp - the compiler may have smashed all
576 * local variables as longjmp is marked 'noreturn'. */
578 cc = CPU_GET_CLASS(cpu);
581 x86_cpu = X86_CPU(cpu);
585 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
586 have_tb_lock = false;
591 cc->cpu_exec_exit(cpu);
594 /* fail safe : never use current_cpu outside cpu_exec() */