2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "exec/exec-all.h"
25 #include "qemu/atomic.h"
26 #include "sysemu/qtest.h"
27 #include "qemu/timer.h"
28 #include "exec/address-spaces.h"
30 #include "exec/tb-hash.h"
32 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
33 #include "hw/i386/apic.h"
35 #include "sysemu/replay.h"
37 #include "sysemu/hax.h"
39 /* -icount align implementation. */
41 typedef struct SyncClocks {
43 int64_t last_cpu_icount;
44 int64_t realtime_clock;
47 #if !defined(CONFIG_USER_ONLY)
48 /* Allow the guest to have a max 3ms advance.
49 * The difference between the 2 clocks could therefore
52 #define VM_CLOCK_ADVANCE 3000000
53 #define THRESHOLD_REDUCE 1.5
54 #define MAX_DELAY_PRINT_RATE 2000000000LL
55 #define MAX_NB_PRINTS 100
57 static void align_clocks(SyncClocks *sc, const CPUState *cpu)
61 if (!icount_align_option) {
65 cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
66 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
67 sc->last_cpu_icount = cpu_icount;
69 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
71 struct timespec sleep_delay, rem_delay;
72 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
73 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
74 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
75 sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
80 Sleep(sc->diff_clk / SCALE_MS);
86 static void print_delay(const SyncClocks *sc)
88 static float threshold_delay;
89 static int64_t last_realtime_clock;
92 if (icount_align_option &&
93 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
94 nb_prints < MAX_NB_PRINTS) {
95 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
96 (-sc->diff_clk / (float)1000000000LL <
97 (threshold_delay - THRESHOLD_REDUCE))) {
98 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
99 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
103 last_realtime_clock = sc->realtime_clock;
108 static void init_delay_params(SyncClocks *sc,
111 if (!icount_align_option) {
114 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
115 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
116 sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
117 if (sc->diff_clk < max_delay) {
118 max_delay = sc->diff_clk;
120 if (sc->diff_clk > max_advance) {
121 max_advance = sc->diff_clk;
124 /* Print every 2s max if the guest is late. We limit the number
125 of printed messages to NB_PRINT_MAX(currently 100) */
129 static void align_clocks(SyncClocks *sc, const CPUState *cpu)
133 static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
136 #endif /* CONFIG USER ONLY */
138 /* Execute a TB, and fix up the CPU state afterwards if necessary */
139 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
141 CPUArchState *env = cpu->env_ptr;
143 TranslationBlock *last_tb;
145 uint8_t *tb_ptr = itb->tc_ptr;
147 qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
148 "Trace %p [%d: " TARGET_FMT_lx "] %s\n",
149 itb->tc_ptr, cpu->cpu_index, itb->pc,
150 lookup_symbol(itb->pc));
152 #if defined(DEBUG_DISAS)
153 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)
154 && qemu_log_in_addr_range(itb->pc)) {
156 #if defined(TARGET_I386)
157 log_cpu_state(cpu, CPU_DUMP_CCOP);
159 log_cpu_state(cpu, 0);
163 #endif /* DEBUG_DISAS */
165 cpu->can_do_io = !use_icount;
166 ret = tcg_qemu_tb_exec(env, tb_ptr);
168 last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
169 tb_exit = ret & TB_EXIT_MASK;
170 trace_exec_tb_exit(last_tb, tb_exit);
172 if (tb_exit > TB_EXIT_IDX1) {
173 /* We didn't start executing this TB (eg because the instruction
174 * counter hit zero); we must restore the guest PC to the address
175 * of the start of the TB.
177 CPUClass *cc = CPU_GET_CLASS(cpu);
178 qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
179 "Stopped execution of TB chain before %p ["
180 TARGET_FMT_lx "] %s\n",
181 last_tb->tc_ptr, last_tb->pc,
182 lookup_symbol(last_tb->pc));
183 if (cc->synchronize_from_tb) {
184 cc->synchronize_from_tb(cpu, last_tb);
187 cc->set_pc(cpu, last_tb->pc);
190 if (tb_exit == TB_EXIT_REQUESTED) {
191 /* We were asked to stop executing TBs (probably a pending
192 * interrupt. We've now stopped, so clear the flag.
194 atomic_set(&cpu->tcg_exit_req, 0);
199 #ifndef CONFIG_USER_ONLY
200 /* Execute the code without caching the generated code. An interpreter
201 could be used if available. */
202 static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
203 TranslationBlock *orig_tb, bool ignore_icount)
205 TranslationBlock *tb;
207 /* Should never happen.
208 We only end up here when an existing TB is too long. */
209 if (max_cycles > CF_COUNT_MASK)
210 max_cycles = CF_COUNT_MASK;
213 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
214 max_cycles | CF_NOCACHE
215 | (ignore_icount ? CF_IGNORE_ICOUNT : 0));
216 tb->orig_tb = orig_tb;
219 /* execute the generated code */
220 trace_exec_tb_nocache(tb, tb->pc);
221 cpu_tb_exec(cpu, tb);
224 tb_phys_invalidate(tb, -1);
230 static void cpu_exec_step(CPUState *cpu)
232 CPUArchState *env = (CPUArchState *)cpu->env_ptr;
233 TranslationBlock *tb;
234 target_ulong cs_base, pc;
237 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
238 tb = tb_gen_code(cpu, pc, cs_base, flags,
239 1 | CF_NOCACHE | CF_IGNORE_ICOUNT);
241 /* execute the generated code */
242 trace_exec_tb_nocache(tb, pc);
243 cpu_tb_exec(cpu, tb);
244 tb_phys_invalidate(tb, -1);
248 void cpu_exec_step_atomic(CPUState *cpu)
252 /* Since we got here, we know that parallel_cpus must be true. */
253 parallel_cpus = false;
255 parallel_cpus = true;
262 target_ulong cs_base;
264 tb_page_addr_t phys_page1;
268 static bool tb_cmp(const void *p, const void *d)
270 const TranslationBlock *tb = p;
271 const struct tb_desc *desc = d;
273 if (tb->pc == desc->pc &&
274 tb->page_addr[0] == desc->phys_page1 &&
275 tb->cs_base == desc->cs_base &&
276 tb->flags == desc->flags &&
277 !atomic_read(&tb->invalid)) {
278 /* check next page if needed */
279 if (tb->page_addr[1] == -1) {
282 tb_page_addr_t phys_page2;
283 target_ulong virt_page2;
285 virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
286 phys_page2 = get_page_addr_code(desc->env, virt_page2);
287 if (tb->page_addr[1] == phys_page2) {
295 static TranslationBlock *tb_htable_lookup(CPUState *cpu,
297 target_ulong cs_base,
300 tb_page_addr_t phys_pc;
304 desc.env = (CPUArchState *)cpu->env_ptr;
305 desc.cs_base = cs_base;
308 phys_pc = get_page_addr_code(desc.env, pc);
309 desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
310 h = tb_hash_func(phys_pc, pc, flags);
311 return qht_lookup(&tcg_ctx.tb_ctx.htable, tb_cmp, &desc, h);
314 static inline TranslationBlock *tb_find(CPUState *cpu,
315 TranslationBlock *last_tb,
318 CPUArchState *env = (CPUArchState *)cpu->env_ptr;
319 TranslationBlock *tb;
320 target_ulong cs_base, pc;
322 bool have_tb_lock = false;
324 /* we record a subset of the CPU state. It will
325 always be the same before a given translated block
327 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
328 tb = atomic_rcu_read(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]);
329 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
330 tb->flags != flags)) {
331 tb = tb_htable_lookup(cpu, pc, cs_base, flags);
334 /* mmap_lock is needed by tb_gen_code, and mmap_lock must be
335 * taken outside tb_lock. As system emulation is currently
336 * single threaded the locks are NOPs.
342 /* There's a chance that our desired tb has been translated while
343 * taking the locks so we check again inside the lock.
345 tb = tb_htable_lookup(cpu, pc, cs_base, flags);
347 /* if no translated code available, then translate it now */
348 tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
354 /* We add the TB in the virtual pc hash table for the fast lookup */
355 atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
357 #ifndef CONFIG_USER_ONLY
358 /* We don't take care of direct jumps when address mapping changes in
359 * system emulation. So it's not safe to make a direct jump to a TB
360 * spanning two pages because the mapping for the second page can change.
362 if (tb->page_addr[1] != -1) {
366 /* See if we can patch the calling TB. */
367 if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
373 tb_add_jump(last_tb, tb_exit, tb);
382 static inline bool cpu_handle_halt(CPUState *cpu)
385 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
386 if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
387 && replay_interrupt()) {
388 X86CPU *x86_cpu = X86_CPU(cpu);
389 apic_poll_irq(x86_cpu->apic_state);
390 cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
393 if (!cpu_has_work(cpu)) {
404 static inline void cpu_handle_debug_exception(CPUState *cpu)
406 CPUClass *cc = CPU_GET_CLASS(cpu);
409 if (!cpu->watchpoint_hit) {
410 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
411 wp->flags &= ~BP_WATCHPOINT_HIT;
415 cc->debug_excp_handler(cpu);
418 static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
420 if (cpu->exception_index >= 0) {
421 if (cpu->exception_index >= EXCP_INTERRUPT) {
422 /* exit request from the cpu execution loop */
423 *ret = cpu->exception_index;
424 if (*ret == EXCP_DEBUG) {
425 cpu_handle_debug_exception(cpu);
427 cpu->exception_index = -1;
430 #if defined(CONFIG_USER_ONLY)
431 /* if user mode only, we simulate a fake exception
432 which will be handled outside the cpu execution
434 #if defined(TARGET_I386)
435 CPUClass *cc = CPU_GET_CLASS(cpu);
436 cc->do_interrupt(cpu);
438 *ret = cpu->exception_index;
439 cpu->exception_index = -1;
442 if (replay_exception()) {
443 CPUClass *cc = CPU_GET_CLASS(cpu);
444 cc->do_interrupt(cpu);
445 cpu->exception_index = -1;
446 } else if (!replay_has_interrupt()) {
447 /* give a chance to iothread in replay mode */
448 *ret = EXCP_INTERRUPT;
453 #ifndef CONFIG_USER_ONLY
454 } else if (replay_has_exception()
455 && cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
456 /* try to cause an exception pending in the log */
457 cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0), true);
466 static inline void cpu_handle_interrupt(CPUState *cpu,
467 TranslationBlock **last_tb)
469 CPUClass *cc = CPU_GET_CLASS(cpu);
470 int interrupt_request = cpu->interrupt_request;
472 if (unlikely(interrupt_request)) {
473 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
474 /* Mask out external interrupts for this step. */
475 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
477 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
478 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
479 cpu->exception_index = EXCP_DEBUG;
482 if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
484 } else if (interrupt_request & CPU_INTERRUPT_HALT) {
486 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
488 cpu->exception_index = EXCP_HLT;
491 #if defined(TARGET_I386)
492 else if (interrupt_request & CPU_INTERRUPT_INIT) {
493 X86CPU *x86_cpu = X86_CPU(cpu);
494 CPUArchState *env = &x86_cpu->env;
496 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
497 do_cpu_init(x86_cpu);
498 cpu->exception_index = EXCP_HALTED;
502 else if (interrupt_request & CPU_INTERRUPT_RESET) {
508 /* The target hook has 3 exit conditions:
509 False when the interrupt isn't processed,
510 True when it is, and we should restart on a new TB,
511 and via longjmp via cpu_loop_exit. */
514 if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
517 /* The target hook may have updated the 'cpu->interrupt_request';
518 * reload the 'interrupt_request' value */
519 interrupt_request = cpu->interrupt_request;
521 if (interrupt_request & CPU_INTERRUPT_EXITTB) {
522 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
523 /* ensure that no TB jump will be modified as
524 the program flow was changed */
528 if (unlikely(atomic_read(&cpu->exit_request) || replay_has_interrupt())) {
529 atomic_set(&cpu->exit_request, 0);
530 cpu->exception_index = EXCP_INTERRUPT;
535 static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
536 TranslationBlock **last_tb, int *tb_exit,
541 if (unlikely(atomic_read(&cpu->exit_request))) {
545 trace_exec_tb(tb, tb->pc);
546 ret = cpu_tb_exec(cpu, tb);
547 *last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
548 *tb_exit = ret & TB_EXIT_MASK;
550 case TB_EXIT_REQUESTED:
551 /* Something asked us to stop executing
552 * chained TBs; just continue round the main
553 * loop. Whatever requested the exit will also
554 * have set something else (eg exit_request or
555 * interrupt_request) which we will handle
556 * next time around the loop. But we need to
557 * ensure the tcg_exit_req read in generated code
558 * comes before the next read of cpu->exit_request
559 * or cpu->interrupt_request.
564 case TB_EXIT_ICOUNT_EXPIRED:
566 /* Instruction counter expired. */
567 #ifdef CONFIG_USER_ONLY
570 int insns_left = cpu->icount_decr.u32;
571 if (cpu->icount_extra && insns_left >= 0) {
572 /* Refill decrementer and continue execution. */
573 cpu->icount_extra += insns_left;
574 insns_left = MIN(0xffff, cpu->icount_extra);
575 cpu->icount_extra -= insns_left;
576 cpu->icount_decr.u16.low = insns_left;
578 if (insns_left > 0) {
579 /* Execute remaining instructions. */
580 cpu_exec_nocache(cpu, insns_left, *last_tb, false);
581 align_clocks(sc, cpu);
583 cpu->exception_index = EXCP_INTERRUPT;
595 /* main execution loop */
597 int cpu_exec(CPUState *cpu)
599 CPUClass *cc = CPU_GET_CLASS(cpu);
603 /* replay_interrupt may need current_cpu */
606 if (cpu_handle_halt(cpu)) {
610 atomic_mb_set(&tcg_current_cpu, cpu);
613 if (unlikely(atomic_mb_read(&exit_request))) {
614 cpu->exit_request = 1;
617 cc->cpu_exec_enter(cpu);
619 /* Calculate difference between guest clock and host clock.
620 * This delay includes the delay of the last cycle, so
621 * what we have to do is sleep until it is 0. As for the
622 * advance/delay we gain here, we try to fix it next time.
624 init_delay_params(&sc, cpu);
627 /* prepare setjmp context for exception handling */
628 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
629 TranslationBlock *tb, *last_tb = NULL;
632 /* if an exception is pending, we execute it here */
633 if (cpu_handle_exception(cpu, &ret)) {
637 if (hax_enabled() && !hax_vcpu_exec(cpu)) {
643 cpu_handle_interrupt(cpu, &last_tb);
644 tb = tb_find(cpu, last_tb, tb_exit);
645 cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit, &sc);
647 if (hax_enabled() && hax_stop_emulation(cpu))
650 /* Try to align the host and virtual clocks
651 if the guest is in advance */
652 align_clocks(&sc, cpu);
655 #if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
656 /* Some compilers wrongly smash all local variables after
657 * siglongjmp. There were bug reports for gcc 4.5.0 and clang.
658 * Reload essential local variables here for those compilers.
659 * Newer versions of gcc would complain about this code (-Wclobbered). */
661 cc = CPU_GET_CLASS(cpu);
662 #else /* buggy compiler */
663 /* Assert that the compiler does not smash local variables. */
664 g_assert(cpu == current_cpu);
665 g_assert(cc == CPU_GET_CLASS(cpu));
666 #endif /* buggy compiler */
672 cc->cpu_exec_exit(cpu);
675 /* fail safe : never use current_cpu outside cpu_exec() */
678 /* Does not need atomic_mb_set because a spurious wakeup is okay. */
679 atomic_set(&tcg_current_cpu, NULL);