2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "exec/exec-all.h"
25 #include "qemu/atomic.h"
26 #include "sysemu/qtest.h"
27 #include "qemu/timer.h"
28 #include "exec/address-spaces.h"
30 #include "exec/tb-hash.h"
32 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
33 #include "hw/i386/apic.h"
35 #include "sysemu/replay.h"
37 /* -icount align implementation. */
39 typedef struct SyncClocks {
41 int64_t last_cpu_icount;
42 int64_t realtime_clock;
45 #if !defined(CONFIG_USER_ONLY)
46 /* Allow the guest to have a max 3ms advance.
47 * The difference between the 2 clocks could therefore
50 #define VM_CLOCK_ADVANCE 3000000
51 #define THRESHOLD_REDUCE 1.5
52 #define MAX_DELAY_PRINT_RATE 2000000000LL
53 #define MAX_NB_PRINTS 100
55 static void align_clocks(SyncClocks *sc, const CPUState *cpu)
59 if (!icount_align_option) {
63 cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
64 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
65 sc->last_cpu_icount = cpu_icount;
67 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
69 struct timespec sleep_delay, rem_delay;
70 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
71 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
72 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
73 sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
78 Sleep(sc->diff_clk / SCALE_MS);
84 static void print_delay(const SyncClocks *sc)
86 static float threshold_delay;
87 static int64_t last_realtime_clock;
90 if (icount_align_option &&
91 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
92 nb_prints < MAX_NB_PRINTS) {
93 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
94 (-sc->diff_clk / (float)1000000000LL <
95 (threshold_delay - THRESHOLD_REDUCE))) {
96 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
97 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
101 last_realtime_clock = sc->realtime_clock;
106 static void init_delay_params(SyncClocks *sc,
109 if (!icount_align_option) {
112 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
113 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
114 sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
115 if (sc->diff_clk < max_delay) {
116 max_delay = sc->diff_clk;
118 if (sc->diff_clk > max_advance) {
119 max_advance = sc->diff_clk;
122 /* Print every 2s max if the guest is late. We limit the number
123 of printed messages to NB_PRINT_MAX(currently 100) */
127 static void align_clocks(SyncClocks *sc, const CPUState *cpu)
131 static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
134 #endif /* CONFIG USER ONLY */
136 /* Execute a TB, and fix up the CPU state afterwards if necessary */
137 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
139 CPUArchState *env = cpu->env_ptr;
141 TranslationBlock *last_tb;
143 uint8_t *tb_ptr = itb->tc_ptr;
145 qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
146 "Trace %p [" TARGET_FMT_lx "] %s\n",
147 itb->tc_ptr, itb->pc, lookup_symbol(itb->pc));
149 #if defined(DEBUG_DISAS)
150 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)
151 && qemu_log_in_addr_range(itb->pc)) {
152 #if defined(TARGET_I386)
153 log_cpu_state(cpu, CPU_DUMP_CCOP);
154 #elif defined(TARGET_M68K)
155 /* ??? Should not modify env state for dumping. */
156 cpu_m68k_flush_flags(env, env->cc_op);
157 env->cc_op = CC_OP_FLAGS;
158 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
159 log_cpu_state(cpu, 0);
161 log_cpu_state(cpu, 0);
164 #endif /* DEBUG_DISAS */
166 cpu->can_do_io = !use_icount;
167 ret = tcg_qemu_tb_exec(env, tb_ptr);
169 last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
170 tb_exit = ret & TB_EXIT_MASK;
171 trace_exec_tb_exit(last_tb, tb_exit);
173 if (tb_exit > TB_EXIT_IDX1) {
174 /* We didn't start executing this TB (eg because the instruction
175 * counter hit zero); we must restore the guest PC to the address
176 * of the start of the TB.
178 CPUClass *cc = CPU_GET_CLASS(cpu);
179 qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
180 "Stopped execution of TB chain before %p ["
181 TARGET_FMT_lx "] %s\n",
182 last_tb->tc_ptr, last_tb->pc,
183 lookup_symbol(last_tb->pc));
184 if (cc->synchronize_from_tb) {
185 cc->synchronize_from_tb(cpu, last_tb);
188 cc->set_pc(cpu, last_tb->pc);
191 if (tb_exit == TB_EXIT_REQUESTED) {
192 /* We were asked to stop executing TBs (probably a pending
193 * interrupt. We've now stopped, so clear the flag.
195 cpu->tcg_exit_req = 0;
200 #ifndef CONFIG_USER_ONLY
201 /* Execute the code without caching the generated code. An interpreter
202 could be used if available. */
203 static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
204 TranslationBlock *orig_tb, bool ignore_icount)
206 TranslationBlock *tb;
209 /* Should never happen.
210 We only end up here when an existing TB is too long. */
211 if (max_cycles > CF_COUNT_MASK)
212 max_cycles = CF_COUNT_MASK;
214 old_tb_flushed = cpu->tb_flushed;
215 cpu->tb_flushed = false;
216 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
217 max_cycles | CF_NOCACHE
218 | (ignore_icount ? CF_IGNORE_ICOUNT : 0));
219 tb->orig_tb = cpu->tb_flushed ? NULL : orig_tb;
220 cpu->tb_flushed |= old_tb_flushed;
221 /* execute the generated code */
222 trace_exec_tb_nocache(tb, tb->pc);
223 cpu_tb_exec(cpu, tb);
224 tb_phys_invalidate(tb, -1);
231 target_ulong cs_base;
233 tb_page_addr_t phys_page1;
237 static bool tb_cmp(const void *p, const void *d)
239 const TranslationBlock *tb = p;
240 const struct tb_desc *desc = d;
242 if (tb->pc == desc->pc &&
243 tb->page_addr[0] == desc->phys_page1 &&
244 tb->cs_base == desc->cs_base &&
245 tb->flags == desc->flags &&
246 !atomic_read(&tb->invalid)) {
247 /* check next page if needed */
248 if (tb->page_addr[1] == -1) {
251 tb_page_addr_t phys_page2;
252 target_ulong virt_page2;
254 virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
255 phys_page2 = get_page_addr_code(desc->env, virt_page2);
256 if (tb->page_addr[1] == phys_page2) {
264 static TranslationBlock *tb_htable_lookup(CPUState *cpu,
266 target_ulong cs_base,
269 tb_page_addr_t phys_pc;
273 desc.env = (CPUArchState *)cpu->env_ptr;
274 desc.cs_base = cs_base;
277 phys_pc = get_page_addr_code(desc.env, pc);
278 desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
279 h = tb_hash_func(phys_pc, pc, flags);
280 return qht_lookup(&tcg_ctx.tb_ctx.htable, tb_cmp, &desc, h);
283 static inline TranslationBlock *tb_find(CPUState *cpu,
284 TranslationBlock *last_tb,
287 CPUArchState *env = (CPUArchState *)cpu->env_ptr;
288 TranslationBlock *tb;
289 target_ulong cs_base, pc;
291 bool have_tb_lock = false;
293 /* we record a subset of the CPU state. It will
294 always be the same before a given translated block
296 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
297 tb = atomic_rcu_read(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]);
298 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
299 tb->flags != flags)) {
300 tb = tb_htable_lookup(cpu, pc, cs_base, flags);
303 /* mmap_lock is needed by tb_gen_code, and mmap_lock must be
304 * taken outside tb_lock. As system emulation is currently
305 * single threaded the locks are NOPs.
311 /* There's a chance that our desired tb has been translated while
312 * taking the locks so we check again inside the lock.
314 tb = tb_htable_lookup(cpu, pc, cs_base, flags);
316 /* if no translated code available, then translate it now */
317 tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
323 /* We add the TB in the virtual pc hash table for the fast lookup */
324 atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
326 #ifndef CONFIG_USER_ONLY
327 /* We don't take care of direct jumps when address mapping changes in
328 * system emulation. So it's not safe to make a direct jump to a TB
329 * spanning two pages because the mapping for the second page can change.
331 if (tb->page_addr[1] != -1) {
335 /* See if we can patch the calling TB. */
336 if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
341 /* Check if translation buffer has been flushed */
342 if (cpu->tb_flushed) {
343 cpu->tb_flushed = false;
344 } else if (!tb->invalid) {
345 tb_add_jump(last_tb, tb_exit, tb);
354 static inline bool cpu_handle_halt(CPUState *cpu)
357 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
358 if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
359 && replay_interrupt()) {
360 X86CPU *x86_cpu = X86_CPU(cpu);
361 apic_poll_irq(x86_cpu->apic_state);
362 cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
365 if (!cpu_has_work(cpu)) {
376 static inline void cpu_handle_debug_exception(CPUState *cpu)
378 CPUClass *cc = CPU_GET_CLASS(cpu);
381 if (!cpu->watchpoint_hit) {
382 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
383 wp->flags &= ~BP_WATCHPOINT_HIT;
387 cc->debug_excp_handler(cpu);
390 static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
392 if (cpu->exception_index >= 0) {
393 if (cpu->exception_index >= EXCP_INTERRUPT) {
394 /* exit request from the cpu execution loop */
395 *ret = cpu->exception_index;
396 if (*ret == EXCP_DEBUG) {
397 cpu_handle_debug_exception(cpu);
399 cpu->exception_index = -1;
402 #if defined(CONFIG_USER_ONLY)
403 /* if user mode only, we simulate a fake exception
404 which will be handled outside the cpu execution
406 #if defined(TARGET_I386)
407 CPUClass *cc = CPU_GET_CLASS(cpu);
408 cc->do_interrupt(cpu);
410 *ret = cpu->exception_index;
411 cpu->exception_index = -1;
414 if (replay_exception()) {
415 CPUClass *cc = CPU_GET_CLASS(cpu);
416 cc->do_interrupt(cpu);
417 cpu->exception_index = -1;
418 } else if (!replay_has_interrupt()) {
419 /* give a chance to iothread in replay mode */
420 *ret = EXCP_INTERRUPT;
425 #ifndef CONFIG_USER_ONLY
426 } else if (replay_has_exception()
427 && cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
428 /* try to cause an exception pending in the log */
429 cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0), true);
438 static inline void cpu_handle_interrupt(CPUState *cpu,
439 TranslationBlock **last_tb)
441 CPUClass *cc = CPU_GET_CLASS(cpu);
442 int interrupt_request = cpu->interrupt_request;
444 if (unlikely(interrupt_request)) {
445 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
446 /* Mask out external interrupts for this step. */
447 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
449 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
450 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
451 cpu->exception_index = EXCP_DEBUG;
454 if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
456 } else if (interrupt_request & CPU_INTERRUPT_HALT) {
458 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
460 cpu->exception_index = EXCP_HLT;
463 #if defined(TARGET_I386)
464 else if (interrupt_request & CPU_INTERRUPT_INIT) {
465 X86CPU *x86_cpu = X86_CPU(cpu);
466 CPUArchState *env = &x86_cpu->env;
468 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
469 do_cpu_init(x86_cpu);
470 cpu->exception_index = EXCP_HALTED;
474 else if (interrupt_request & CPU_INTERRUPT_RESET) {
480 /* The target hook has 3 exit conditions:
481 False when the interrupt isn't processed,
482 True when it is, and we should restart on a new TB,
483 and via longjmp via cpu_loop_exit. */
486 if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
489 /* The target hook may have updated the 'cpu->interrupt_request';
490 * reload the 'interrupt_request' value */
491 interrupt_request = cpu->interrupt_request;
493 if (interrupt_request & CPU_INTERRUPT_EXITTB) {
494 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
495 /* ensure that no TB jump will be modified as
496 the program flow was changed */
500 if (unlikely(cpu->exit_request || replay_has_interrupt())) {
501 cpu->exit_request = 0;
502 cpu->exception_index = EXCP_INTERRUPT;
507 static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
508 TranslationBlock **last_tb, int *tb_exit,
513 if (unlikely(cpu->exit_request)) {
517 trace_exec_tb(tb, tb->pc);
518 ret = cpu_tb_exec(cpu, tb);
519 *last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
520 *tb_exit = ret & TB_EXIT_MASK;
522 case TB_EXIT_REQUESTED:
523 /* Something asked us to stop executing
524 * chained TBs; just continue round the main
525 * loop. Whatever requested the exit will also
526 * have set something else (eg exit_request or
527 * interrupt_request) which we will handle
528 * next time around the loop. But we need to
529 * ensure the tcg_exit_req read in generated code
530 * comes before the next read of cpu->exit_request
531 * or cpu->interrupt_request.
536 case TB_EXIT_ICOUNT_EXPIRED:
538 /* Instruction counter expired. */
539 #ifdef CONFIG_USER_ONLY
542 int insns_left = cpu->icount_decr.u32;
543 if (cpu->icount_extra && insns_left >= 0) {
544 /* Refill decrementer and continue execution. */
545 cpu->icount_extra += insns_left;
546 insns_left = MIN(0xffff, cpu->icount_extra);
547 cpu->icount_extra -= insns_left;
548 cpu->icount_decr.u16.low = insns_left;
550 if (insns_left > 0) {
551 /* Execute remaining instructions. */
552 cpu_exec_nocache(cpu, insns_left, *last_tb, false);
553 align_clocks(sc, cpu);
555 cpu->exception_index = EXCP_INTERRUPT;
567 /* main execution loop */
569 int cpu_exec(CPUState *cpu)
571 CPUClass *cc = CPU_GET_CLASS(cpu);
575 /* replay_interrupt may need current_cpu */
578 if (cpu_handle_halt(cpu)) {
582 atomic_mb_set(&tcg_current_cpu, cpu);
585 if (unlikely(atomic_mb_read(&exit_request))) {
586 cpu->exit_request = 1;
589 cc->cpu_exec_enter(cpu);
591 /* Calculate difference between guest clock and host clock.
592 * This delay includes the delay of the last cycle, so
593 * what we have to do is sleep until it is 0. As for the
594 * advance/delay we gain here, we try to fix it next time.
596 init_delay_params(&sc, cpu);
599 /* prepare setjmp context for exception handling */
600 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
601 TranslationBlock *tb, *last_tb = NULL;
604 /* if an exception is pending, we execute it here */
605 if (cpu_handle_exception(cpu, &ret)) {
609 atomic_mb_set(&cpu->tb_flushed, false); /* reset before first TB lookup */
611 cpu_handle_interrupt(cpu, &last_tb);
612 tb = tb_find(cpu, last_tb, tb_exit);
613 cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit, &sc);
614 /* Try to align the host and virtual clocks
615 if the guest is in advance */
616 align_clocks(&sc, cpu);
619 #if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
620 /* Some compilers wrongly smash all local variables after
621 * siglongjmp. There were bug reports for gcc 4.5.0 and clang.
622 * Reload essential local variables here for those compilers.
623 * Newer versions of gcc would complain about this code (-Wclobbered). */
625 cc = CPU_GET_CLASS(cpu);
626 #else /* buggy compiler */
627 /* Assert that the compiler does not smash local variables. */
628 g_assert(cpu == current_cpu);
629 g_assert(cc == CPU_GET_CLASS(cpu));
630 #endif /* buggy compiler */
636 cc->cpu_exec_exit(cpu);
639 /* fail safe : never use current_cpu outside cpu_exec() */
642 /* Does not need atomic_mb_set because a spurious wakeup is okay. */
643 atomic_set(&tcg_current_cpu, NULL);