2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
22 #include "disas/disas.h"
24 #include "qemu/atomic.h"
25 #include "sysemu/qtest.h"
26 #include "qemu/timer.h"
27 #include "exec/address-spaces.h"
29 #include "exec/tb-hash.h"
31 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
32 #include "hw/i386/apic.h"
34 #include "sysemu/replay.h"
36 #include "sysemu/hax.h"
38 /* -icount align implementation. */
40 typedef struct SyncClocks {
42 int64_t last_cpu_icount;
43 int64_t realtime_clock;
46 #if !defined(CONFIG_USER_ONLY)
47 /* Allow the guest to have a max 3ms advance.
48 * The difference between the 2 clocks could therefore
51 #define VM_CLOCK_ADVANCE 3000000
52 #define THRESHOLD_REDUCE 1.5
53 #define MAX_DELAY_PRINT_RATE 2000000000LL
54 #define MAX_NB_PRINTS 100
56 static void align_clocks(SyncClocks *sc, const CPUState *cpu)
60 if (!icount_align_option) {
64 cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
65 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
66 sc->last_cpu_icount = cpu_icount;
68 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
70 struct timespec sleep_delay, rem_delay;
71 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
72 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
73 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
74 sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
79 Sleep(sc->diff_clk / SCALE_MS);
85 static void print_delay(const SyncClocks *sc)
87 static float threshold_delay;
88 static int64_t last_realtime_clock;
91 if (icount_align_option &&
92 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
93 nb_prints < MAX_NB_PRINTS) {
94 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
95 (-sc->diff_clk / (float)1000000000LL <
96 (threshold_delay - THRESHOLD_REDUCE))) {
97 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
98 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
102 last_realtime_clock = sc->realtime_clock;
107 static void init_delay_params(SyncClocks *sc,
110 if (!icount_align_option) {
113 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
114 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
115 sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
116 if (sc->diff_clk < max_delay) {
117 max_delay = sc->diff_clk;
119 if (sc->diff_clk > max_advance) {
120 max_advance = sc->diff_clk;
123 /* Print every 2s max if the guest is late. We limit the number
124 of printed messages to NB_PRINT_MAX(currently 100) */
128 static void align_clocks(SyncClocks *sc, const CPUState *cpu)
132 static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
135 #endif /* CONFIG USER ONLY */
137 /* Execute a TB, and fix up the CPU state afterwards if necessary */
138 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
140 CPUArchState *env = cpu->env_ptr;
142 uint8_t *tb_ptr = itb->tc_ptr;
144 qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
145 "Trace %p [" TARGET_FMT_lx "] %s\n",
146 itb->tc_ptr, itb->pc, lookup_symbol(itb->pc));
148 #if defined(DEBUG_DISAS)
149 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
150 #if defined(TARGET_I386)
151 log_cpu_state(cpu, CPU_DUMP_CCOP);
152 #elif defined(TARGET_M68K)
153 /* ??? Should not modify env state for dumping. */
154 cpu_m68k_flush_flags(env, env->cc_op);
155 env->cc_op = CC_OP_FLAGS;
156 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
157 log_cpu_state(cpu, 0);
159 log_cpu_state(cpu, 0);
162 #endif /* DEBUG_DISAS */
164 cpu->can_do_io = !use_icount;
165 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
167 trace_exec_tb_exit((void *) (next_tb & ~TB_EXIT_MASK),
168 next_tb & TB_EXIT_MASK);
170 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
171 /* We didn't start executing this TB (eg because the instruction
172 * counter hit zero); we must restore the guest PC to the address
173 * of the start of the TB.
175 CPUClass *cc = CPU_GET_CLASS(cpu);
176 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
177 qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
178 "Stopped execution of TB chain before %p ["
179 TARGET_FMT_lx "] %s\n",
180 itb->tc_ptr, itb->pc, lookup_symbol(itb->pc));
181 if (cc->synchronize_from_tb) {
182 cc->synchronize_from_tb(cpu, tb);
185 cc->set_pc(cpu, tb->pc);
188 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
189 /* We were asked to stop executing TBs (probably a pending
190 * interrupt. We've now stopped, so clear the flag.
192 cpu->tcg_exit_req = 0;
197 /* Execute the code without caching the generated code. An interpreter
198 could be used if available. */
199 static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
200 TranslationBlock *orig_tb, bool ignore_icount)
202 TranslationBlock *tb;
204 /* Should never happen.
205 We only end up here when an existing TB is too long. */
206 if (max_cycles > CF_COUNT_MASK)
207 max_cycles = CF_COUNT_MASK;
209 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
210 max_cycles | CF_NOCACHE
211 | (ignore_icount ? CF_IGNORE_ICOUNT : 0));
212 tb->orig_tb = tcg_ctx.tb_ctx.tb_invalidated_flag ? NULL : orig_tb;
213 cpu->current_tb = tb;
214 /* execute the generated code */
215 trace_exec_tb_nocache(tb, tb->pc);
216 cpu_tb_exec(cpu, tb);
217 cpu->current_tb = NULL;
218 tb_phys_invalidate(tb, -1);
222 static TranslationBlock *tb_find_physical(CPUState *cpu,
224 target_ulong cs_base,
227 CPUArchState *env = (CPUArchState *)cpu->env_ptr;
228 TranslationBlock *tb, **ptb1;
230 tb_page_addr_t phys_pc, phys_page1;
231 target_ulong virt_page2;
233 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
235 /* find translated block using physical mappings */
236 phys_pc = get_page_addr_code(env, pc);
237 phys_page1 = phys_pc & TARGET_PAGE_MASK;
238 h = tb_phys_hash_func(phys_pc);
239 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
246 tb->page_addr[0] == phys_page1 &&
247 tb->cs_base == cs_base &&
248 tb->flags == flags) {
249 /* check next page if needed */
250 if (tb->page_addr[1] != -1) {
251 tb_page_addr_t phys_page2;
253 virt_page2 = (pc & TARGET_PAGE_MASK) +
255 phys_page2 = get_page_addr_code(env, virt_page2);
256 if (tb->page_addr[1] == phys_page2) {
263 ptb1 = &tb->phys_hash_next;
266 /* Move the TB to the head of the list */
267 *ptb1 = tb->phys_hash_next;
268 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
269 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
273 static TranslationBlock *tb_find_slow(CPUState *cpu,
275 target_ulong cs_base,
278 TranslationBlock *tb;
280 tb = tb_find_physical(cpu, pc, cs_base, flags);
285 #ifdef CONFIG_USER_ONLY
286 /* mmap_lock is needed by tb_gen_code, and mmap_lock must be
287 * taken outside tb_lock. Since we're momentarily dropping
288 * tb_lock, there's a chance that our desired tb has been
294 tb = tb_find_physical(cpu, pc, cs_base, flags);
301 /* if no translated code available, then translate it now */
302 tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
304 #ifdef CONFIG_USER_ONLY
309 /* we add the TB in the virtual pc hash table */
310 cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
314 static inline TranslationBlock *tb_find_fast(CPUState *cpu)
316 CPUArchState *env = (CPUArchState *)cpu->env_ptr;
317 TranslationBlock *tb;
318 target_ulong cs_base, pc;
321 /* we record a subset of the CPU state. It will
322 always be the same before a given translated block
324 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
325 tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
326 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
327 tb->flags != flags)) {
328 tb = tb_find_slow(cpu, pc, cs_base, flags);
333 static void cpu_handle_debug_exception(CPUState *cpu)
335 CPUClass *cc = CPU_GET_CLASS(cpu);
338 if (!cpu->watchpoint_hit) {
339 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
340 wp->flags &= ~BP_WATCHPOINT_HIT;
344 cc->debug_excp_handler(cpu);
347 /* main execution loop */
349 int cpu_exec(CPUState *cpu)
351 CPUClass *cc = CPU_GET_CLASS(cpu);
353 X86CPU *x86_cpu = X86_CPU(cpu);
354 CPUArchState *env = &x86_cpu->env;
356 int ret, interrupt_request;
357 TranslationBlock *tb;
361 /* replay_interrupt may need current_cpu */
365 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
366 if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
367 && replay_interrupt()) {
368 apic_poll_irq(x86_cpu->apic_state);
369 cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
372 if (!cpu_has_work(cpu)) {
380 atomic_mb_set(&tcg_current_cpu, cpu);
383 if (unlikely(atomic_mb_read(&exit_request))) {
384 cpu->exit_request = 1;
387 cc->cpu_exec_enter(cpu);
389 /* Calculate difference between guest clock and host clock.
390 * This delay includes the delay of the last cycle, so
391 * what we have to do is sleep until it is 0. As for the
392 * advance/delay we gain here, we try to fix it next time.
394 init_delay_params(&sc, cpu);
396 /* prepare setjmp context for exception handling */
398 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
399 /* if an exception is pending, we execute it here */
400 if (cpu->exception_index >= 0) {
401 if (cpu->exception_index >= EXCP_INTERRUPT) {
402 /* exit request from the cpu execution loop */
403 ret = cpu->exception_index;
404 if (ret == EXCP_DEBUG) {
405 cpu_handle_debug_exception(cpu);
407 cpu->exception_index = -1;
410 #if defined(CONFIG_USER_ONLY)
411 /* if user mode only, we simulate a fake exception
412 which will be handled outside the cpu execution
414 #if defined(TARGET_I386)
415 cc->do_interrupt(cpu);
417 ret = cpu->exception_index;
418 cpu->exception_index = -1;
421 if (replay_exception()) {
422 cc->do_interrupt(cpu);
423 cpu->exception_index = -1;
424 } else if (!replay_has_interrupt()) {
425 /* give a chance to iothread in replay mode */
426 ret = EXCP_INTERRUPT;
431 } else if (replay_has_exception()
432 && cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
433 /* try to cause an exception pending in the log */
434 cpu_exec_nocache(cpu, 1, tb_find_fast(cpu), true);
440 if (hax_enabled() && !hax_vcpu_exec(cpu))
441 longjmp(cpu->jmp_env, 1);
444 next_tb = 0; /* force lookup of first TB */
446 interrupt_request = cpu->interrupt_request;
447 if (unlikely(interrupt_request)) {
448 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
449 /* Mask out external interrupts for this step. */
450 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
452 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
453 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
454 cpu->exception_index = EXCP_DEBUG;
457 if (replay_mode == REPLAY_MODE_PLAY
458 && !replay_has_interrupt()) {
460 } else if (interrupt_request & CPU_INTERRUPT_HALT) {
462 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
464 cpu->exception_index = EXCP_HLT;
467 #if defined(TARGET_I386)
468 else if (interrupt_request & CPU_INTERRUPT_INIT) {
470 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
471 do_cpu_init(x86_cpu);
472 cpu->exception_index = EXCP_HALTED;
476 else if (interrupt_request & CPU_INTERRUPT_RESET) {
482 /* The target hook has 3 exit conditions:
483 False when the interrupt isn't processed,
484 True when it is, and we should restart on a new TB,
485 and via longjmp via cpu_loop_exit. */
488 if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
492 /* Don't use the cached interrupt_request value,
493 do_interrupt may have updated the EXITTB flag. */
494 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
495 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
496 /* ensure that no TB jump will be modified as
497 the program flow was changed */
501 if (unlikely(cpu->exit_request
502 || replay_has_interrupt())) {
503 cpu->exit_request = 0;
504 cpu->exception_index = EXCP_INTERRUPT;
508 tb = tb_find_fast(cpu);
509 /* Note: we do it here to avoid a gcc bug on Mac OS X when
510 doing it in tb_find_slow */
511 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
512 /* as some TB could have been invalidated because
513 of memory exceptions while generating the code, we
514 must recompute the hash index here */
516 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
518 /* see if we can patch the calling TB. When the TB
519 spans two pages, we cannot safely do a direct
521 if (next_tb != 0 && tb->page_addr[1] == -1
522 && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
523 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
524 next_tb & TB_EXIT_MASK, tb);
527 if (likely(!cpu->exit_request)) {
528 trace_exec_tb(tb, tb->pc);
529 /* execute the generated code */
530 cpu->current_tb = tb;
531 next_tb = cpu_tb_exec(cpu, tb);
532 cpu->current_tb = NULL;
533 switch (next_tb & TB_EXIT_MASK) {
534 case TB_EXIT_REQUESTED:
535 /* Something asked us to stop executing
536 * chained TBs; just continue round the main
537 * loop. Whatever requested the exit will also
538 * have set something else (eg exit_request or
539 * interrupt_request) which we will handle
540 * next time around the loop. But we need to
541 * ensure the tcg_exit_req read in generated code
542 * comes before the next read of cpu->exit_request
543 * or cpu->interrupt_request.
548 case TB_EXIT_ICOUNT_EXPIRED:
550 /* Instruction counter expired. */
551 int insns_left = cpu->icount_decr.u32;
552 if (cpu->icount_extra && insns_left >= 0) {
553 /* Refill decrementer and continue execution. */
554 cpu->icount_extra += insns_left;
555 insns_left = MIN(0xffff, cpu->icount_extra);
556 cpu->icount_extra -= insns_left;
557 cpu->icount_decr.u16.low = insns_left;
559 if (insns_left > 0) {
560 /* Execute remaining instructions. */
561 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
562 cpu_exec_nocache(cpu, insns_left, tb, false);
563 align_clocks(&sc, cpu);
565 cpu->exception_index = EXCP_INTERRUPT;
576 if (hax_enabled() && hax_stop_emulation(cpu))
579 /* Try to align the host and virtual clocks
580 if the guest is in advance */
581 align_clocks(&sc, cpu);
582 /* reset soft MMU for next block (it can currently
583 only be set by a memory fault) */
586 #if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
587 /* Some compilers wrongly smash all local variables after
588 * siglongjmp. There were bug reports for gcc 4.5.0 and clang.
589 * Reload essential local variables here for those compilers.
590 * Newer versions of gcc would complain about this code (-Wclobbered). */
592 cc = CPU_GET_CLASS(cpu);
594 x86_cpu = X86_CPU(cpu);
597 #else /* buggy compiler */
598 /* Assert that the compiler does not smash local variables. */
599 g_assert(cpu == current_cpu);
600 g_assert(cc == CPU_GET_CLASS(cpu));
602 g_assert(x86_cpu == X86_CPU(cpu));
603 g_assert(env == &x86_cpu->env);
605 #endif /* buggy compiler */
611 cc->cpu_exec_exit(cpu);
614 /* fail safe : never use current_cpu outside cpu_exec() */
617 /* Does not need atomic_mb_set because a spurious wakeup is okay. */
618 atomic_set(&tcg_current_cpu, NULL);