2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu-barrier.h"
27 int tb_invalidated_flag;
29 //#define CONFIG_DEBUG_EXEC
31 bool qemu_cpu_has_work(CPUArchState *env)
33 return cpu_has_work(env);
36 void cpu_loop_exit(CPUArchState *env)
38 env->current_tb = NULL;
39 longjmp(env->jmp_env, 1);
42 /* exit the current TB from a signal handler. The host registers are
43 restored in a state compatible with the CPU emulator
45 #if defined(CONFIG_SOFTMMU)
46 void cpu_resume_from_signal(CPUArchState *env, void *puc)
48 /* XXX: restore cpu registers saved in host registers */
50 env->exception_index = -1;
51 longjmp(env->jmp_env, 1);
55 /* Execute the code without caching the generated code. An interpreter
56 could be used if available. */
57 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
58 TranslationBlock *orig_tb)
60 tcg_target_ulong next_tb;
63 /* Should never happen.
64 We only end up here when an existing TB is too long. */
65 if (max_cycles > CF_COUNT_MASK)
66 max_cycles = CF_COUNT_MASK;
68 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
71 /* execute the generated code */
72 next_tb = tcg_qemu_tb_exec(env, tb->tc_ptr);
73 env->current_tb = NULL;
75 if ((next_tb & 3) == 2) {
76 /* Restore PC. This may happen if async event occurs before
77 the TB starts executing. */
78 cpu_pc_from_tb(env, tb);
80 tb_phys_invalidate(tb, -1);
84 static TranslationBlock *tb_find_slow(CPUArchState *env,
89 TranslationBlock *tb, **ptb1;
91 tb_page_addr_t phys_pc, phys_page1;
92 target_ulong virt_page2;
94 tb_invalidated_flag = 0;
96 /* find translated block using physical mappings */
97 phys_pc = get_page_addr_code(env, pc);
98 phys_page1 = phys_pc & TARGET_PAGE_MASK;
99 h = tb_phys_hash_func(phys_pc);
100 ptb1 = &tb_phys_hash[h];
106 tb->page_addr[0] == phys_page1 &&
107 tb->cs_base == cs_base &&
108 tb->flags == flags) {
109 /* check next page if needed */
110 if (tb->page_addr[1] != -1) {
111 tb_page_addr_t phys_page2;
113 virt_page2 = (pc & TARGET_PAGE_MASK) +
115 phys_page2 = get_page_addr_code(env, virt_page2);
116 if (tb->page_addr[1] == phys_page2)
122 ptb1 = &tb->phys_hash_next;
125 /* if no translated code available, then translate it now */
126 tb = tb_gen_code(env, pc, cs_base, flags, 0);
129 /* Move the last found TB to the head of the list */
131 *ptb1 = tb->phys_hash_next;
132 tb->phys_hash_next = tb_phys_hash[h];
133 tb_phys_hash[h] = tb;
135 /* we add the TB in the virtual pc hash table */
136 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
140 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
142 TranslationBlock *tb;
143 target_ulong cs_base, pc;
146 /* we record a subset of the CPU state. It will
147 always be the same before a given translated block
149 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
150 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
151 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
152 tb->flags != flags)) {
153 tb = tb_find_slow(env, pc, cs_base, flags);
158 static CPUDebugExcpHandler *debug_excp_handler;
160 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
162 debug_excp_handler = handler;
165 static void cpu_handle_debug_exception(CPUArchState *env)
169 if (!env->watchpoint_hit) {
170 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
171 wp->flags &= ~BP_WATCHPOINT_HIT;
174 if (debug_excp_handler) {
175 debug_excp_handler(env);
179 /* main execution loop */
181 volatile sig_atomic_t exit_request;
184 * QEMU emulate can happens because of MMIO or emulation mode, i.e. non-PG mode,
185 * when it's because of MMIO, the MMIO, the interrupt should not be emulated,
186 * because MMIO is emulated for only one instruction now and then back to
189 int need_handle_intr_request(CPUArchState *env)
192 if (!hax_enabled() || hax_vcpu_emulation_mode(env))
193 return env->interrupt_request;
196 return env->interrupt_request;
201 int cpu_exec(CPUArchState *env)
204 CPUState *cpu = ENV_GET_CPU(env);
206 int ret, interrupt_request;
207 TranslationBlock *tb;
209 tcg_target_ulong next_tb;
212 if (!cpu_has_work(env)) {
219 cpu_single_env = env;
221 if (unlikely(exit_request)) {
222 env->exit_request = 1;
225 #if defined(TARGET_I386)
226 /* put eflags in CPU temporary format */
227 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
228 DF = 1 - (2 * ((env->eflags >> 10) & 1));
229 CC_OP = CC_OP_EFLAGS;
230 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
231 #elif defined(TARGET_SPARC)
232 #elif defined(TARGET_M68K)
233 env->cc_op = CC_OP_FLAGS;
234 env->cc_dest = env->sr & 0xf;
235 env->cc_x = (env->sr >> 4) & 1;
236 #elif defined(TARGET_ALPHA)
237 #elif defined(TARGET_ARM)
238 #elif defined(TARGET_UNICORE32)
239 #elif defined(TARGET_PPC)
240 env->reserve_addr = -1;
241 #elif defined(TARGET_LM32)
242 #elif defined(TARGET_MICROBLAZE)
243 #elif defined(TARGET_MIPS)
244 #elif defined(TARGET_OPENRISC)
245 #elif defined(TARGET_SH4)
246 #elif defined(TARGET_CRIS)
247 #elif defined(TARGET_S390X)
248 #elif defined(TARGET_XTENSA)
251 #error unsupported target CPU
253 env->exception_index = -1;
255 /* prepare setjmp context for exception handling */
257 if (setjmp(env->jmp_env) == 0) {
258 /* if an exception is pending, we execute it here */
259 if (env->exception_index >= 0) {
260 if (env->exception_index >= EXCP_INTERRUPT) {
261 /* exit request from the cpu execution loop */
262 ret = env->exception_index;
263 if (ret == EXCP_DEBUG) {
264 cpu_handle_debug_exception(env);
268 #if defined(CONFIG_USER_ONLY)
269 /* if user mode only, we simulate a fake exception
270 which will be handled outside the cpu execution
272 #if defined(TARGET_I386)
275 ret = env->exception_index;
279 env->exception_index = -1;
285 if (hax_enabled() && !hax_vcpu_exec(env))
286 longjmp(env->jmp_env, 1);
289 next_tb = 0; /* force lookup of first TB */
291 interrupt_request = env->interrupt_request;
292 if (unlikely(interrupt_request)) {
293 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
294 /* Mask out external interrupts for this step. */
295 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
297 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
298 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
299 env->exception_index = EXCP_DEBUG;
302 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
303 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
304 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
305 if (interrupt_request & CPU_INTERRUPT_HALT) {
306 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
308 env->exception_index = EXCP_HLT;
312 #if defined(TARGET_I386)
313 #if !defined(CONFIG_USER_ONLY)
314 if (interrupt_request & CPU_INTERRUPT_POLL) {
315 env->interrupt_request &= ~CPU_INTERRUPT_POLL;
316 apic_poll_irq(env->apic_state);
319 if (interrupt_request & CPU_INTERRUPT_INIT) {
320 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
322 do_cpu_init(x86_env_get_cpu(env));
323 env->exception_index = EXCP_HALTED;
325 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
326 do_cpu_sipi(x86_env_get_cpu(env));
328 } else if (env->hflags2 & HF2_GIF_MASK) {
329 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
330 !(env->hflags & HF_SMM_MASK)) {
331 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
333 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
336 env->hax_vcpu->resync = 1;
340 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
341 !(env->hflags2 & HF2_NMI_MASK)) {
342 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
343 env->hflags2 |= HF2_NMI_MASK;
344 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
346 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
347 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
348 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
350 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
351 (((env->hflags2 & HF2_VINTR_MASK) &&
352 (env->hflags2 & HF2_HIF_MASK)) ||
353 (!(env->hflags2 & HF2_VINTR_MASK) &&
354 (env->eflags & IF_MASK &&
355 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
357 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
359 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
360 intno = cpu_get_pic_interrupt(env);
361 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
362 do_interrupt_x86_hardirq(env, intno, 1);
363 /* ensure that no TB jump will be modified as
364 the program flow was changed */
366 #if !defined(CONFIG_USER_ONLY)
367 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
368 (env->eflags & IF_MASK) &&
369 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
371 /* FIXME: this should respect TPR */
372 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
374 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
375 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
376 do_interrupt_x86_hardirq(env, intno, 1);
377 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
382 #elif defined(TARGET_PPC)
383 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
386 if (interrupt_request & CPU_INTERRUPT_HARD) {
387 ppc_hw_interrupt(env);
388 if (env->pending_interrupts == 0)
389 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
392 #elif defined(TARGET_LM32)
393 if ((interrupt_request & CPU_INTERRUPT_HARD)
394 && (env->ie & IE_IE)) {
395 env->exception_index = EXCP_IRQ;
399 #elif defined(TARGET_MICROBLAZE)
400 if ((interrupt_request & CPU_INTERRUPT_HARD)
401 && (env->sregs[SR_MSR] & MSR_IE)
402 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
403 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
404 env->exception_index = EXCP_IRQ;
408 #elif defined(TARGET_MIPS)
409 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
410 cpu_mips_hw_interrupts_pending(env)) {
412 env->exception_index = EXCP_EXT_INTERRUPT;
417 #elif defined(TARGET_OPENRISC)
420 if ((interrupt_request & CPU_INTERRUPT_HARD)
421 && (env->sr & SR_IEE)) {
424 if ((interrupt_request & CPU_INTERRUPT_TIMER)
425 && (env->sr & SR_TEE)) {
429 env->exception_index = idx;
434 #elif defined(TARGET_SPARC)
435 if (interrupt_request & CPU_INTERRUPT_HARD) {
436 if (cpu_interrupts_enabled(env) &&
437 env->interrupt_index > 0) {
438 int pil = env->interrupt_index & 0xf;
439 int type = env->interrupt_index & 0xf0;
441 if (((type == TT_EXTINT) &&
442 cpu_pil_allowed(env, pil)) ||
444 env->exception_index = env->interrupt_index;
450 #elif defined(TARGET_ARM)
451 if (interrupt_request & CPU_INTERRUPT_FIQ
452 && !(env->uncached_cpsr & CPSR_F)) {
453 env->exception_index = EXCP_FIQ;
457 /* ARMv7-M interrupt return works by loading a magic value
458 into the PC. On real hardware the load causes the
459 return to occur. The qemu implementation performs the
460 jump normally, then does the exception return when the
461 CPU tries to execute code at the magic address.
462 This will cause the magic PC value to be pushed to
463 the stack if an interrupt occurred at the wrong time.
464 We avoid this by disabling interrupts when
465 pc contains a magic address. */
466 if (interrupt_request & CPU_INTERRUPT_HARD
467 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
468 || !(env->uncached_cpsr & CPSR_I))) {
469 env->exception_index = EXCP_IRQ;
473 #elif defined(TARGET_UNICORE32)
474 if (interrupt_request & CPU_INTERRUPT_HARD
475 && !(env->uncached_asr & ASR_I)) {
476 env->exception_index = UC32_EXCP_INTR;
480 #elif defined(TARGET_SH4)
481 if (interrupt_request & CPU_INTERRUPT_HARD) {
485 #elif defined(TARGET_ALPHA)
488 /* ??? This hard-codes the OSF/1 interrupt levels. */
489 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
491 if (interrupt_request & CPU_INTERRUPT_HARD) {
492 idx = EXCP_DEV_INTERRUPT;
496 if (interrupt_request & CPU_INTERRUPT_TIMER) {
497 idx = EXCP_CLK_INTERRUPT;
501 if (interrupt_request & CPU_INTERRUPT_SMP) {
502 idx = EXCP_SMP_INTERRUPT;
506 if (interrupt_request & CPU_INTERRUPT_MCHK) {
511 env->exception_index = idx;
517 #elif defined(TARGET_CRIS)
518 if (interrupt_request & CPU_INTERRUPT_HARD
519 && (env->pregs[PR_CCS] & I_FLAG)
520 && !env->locked_irq) {
521 env->exception_index = EXCP_IRQ;
525 if (interrupt_request & CPU_INTERRUPT_NMI) {
526 unsigned int m_flag_archval;
527 if (env->pregs[PR_VR] < 32) {
528 m_flag_archval = M_FLAG_V10;
530 m_flag_archval = M_FLAG_V32;
532 if ((env->pregs[PR_CCS] & m_flag_archval)) {
533 env->exception_index = EXCP_NMI;
538 #elif defined(TARGET_M68K)
539 if (interrupt_request & CPU_INTERRUPT_HARD
540 && ((env->sr & SR_I) >> SR_I_SHIFT)
541 < env->pending_level) {
542 /* Real hardware gets the interrupt vector via an
543 IACK cycle at this point. Current emulated
544 hardware doesn't rely on this, so we
545 provide/save the vector when the interrupt is
547 env->exception_index = env->pending_vector;
548 do_interrupt_m68k_hardirq(env);
551 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
552 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
553 (env->psw.mask & PSW_MASK_EXT)) {
557 #elif defined(TARGET_XTENSA)
558 if (interrupt_request & CPU_INTERRUPT_HARD) {
559 env->exception_index = EXC_IRQ;
564 /* Don't use the cached interrupt_request value,
565 do_interrupt may have updated the EXITTB flag. */
566 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
567 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
568 /* ensure that no TB jump will be modified as
569 the program flow was changed */
573 if (unlikely(env->exit_request)) {
574 env->exit_request = 0;
575 env->exception_index = EXCP_INTERRUPT;
578 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
579 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
580 /* restore flags in standard format */
581 #if defined(TARGET_I386)
582 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
584 log_cpu_state(env, X86_DUMP_CCOP);
585 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
586 #elif defined(TARGET_M68K)
587 cpu_m68k_flush_flags(env, env->cc_op);
588 env->cc_op = CC_OP_FLAGS;
589 env->sr = (env->sr & 0xffe0)
590 | env->cc_dest | (env->cc_x << 4);
591 log_cpu_state(env, 0);
593 log_cpu_state(env, 0);
596 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
598 tb = tb_find_fast(env);
599 /* Note: we do it here to avoid a gcc bug on Mac OS X when
600 doing it in tb_find_slow */
601 if (tb_invalidated_flag) {
602 /* as some TB could have been invalidated because
603 of memory exceptions while generating the code, we
604 must recompute the hash index here */
606 tb_invalidated_flag = 0;
608 #ifdef CONFIG_DEBUG_EXEC
609 qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
611 lookup_symbol(tb->pc));
613 /* see if we can patch the calling TB. When the TB
614 spans two pages, we cannot safely do a direct
616 if (next_tb != 0 && tb->page_addr[1] == -1) {
617 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
619 spin_unlock(&tb_lock);
621 /* cpu_interrupt might be called while translating the
622 TB, but before it is linked into a potentially
623 infinite loop and becomes env->current_tb. Avoid
624 starting execution if there is a pending interrupt. */
625 env->current_tb = tb;
627 if (likely(!env->exit_request)) {
629 /* execute the generated code */
630 next_tb = tcg_qemu_tb_exec(env, tc_ptr);
631 if ((next_tb & 3) == 2) {
632 /* Instruction counter expired. */
634 tb = (TranslationBlock *)(next_tb & ~3);
636 cpu_pc_from_tb(env, tb);
637 insns_left = env->icount_decr.u32;
638 if (env->icount_extra && insns_left >= 0) {
639 /* Refill decrementer and continue execution. */
640 env->icount_extra += insns_left;
641 if (env->icount_extra > 0xffff) {
644 insns_left = env->icount_extra;
646 env->icount_extra -= insns_left;
647 env->icount_decr.u16.low = insns_left;
649 if (insns_left > 0) {
650 /* Execute remaining instructions. */
651 cpu_exec_nocache(env, insns_left, tb);
653 env->exception_index = EXCP_INTERRUPT;
659 env->current_tb = NULL;
661 if (hax_enabled() && hax_stop_emulation(env))
665 /* reset soft MMU for next block (it can currently
666 only be set by a memory fault) */
669 /* Reload env after longjmp - the compiler may have smashed all
670 * local variables as longjmp is marked 'noreturn'. */
671 env = cpu_single_env;
676 #if defined(TARGET_I386)
677 /* restore flags in standard format */
678 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
680 #elif defined(TARGET_ARM)
681 /* XXX: Save/restore host fpu exception state?. */
682 #elif defined(TARGET_UNICORE32)
683 #elif defined(TARGET_SPARC)
684 #elif defined(TARGET_PPC)
685 #elif defined(TARGET_LM32)
686 #elif defined(TARGET_M68K)
687 cpu_m68k_flush_flags(env, env->cc_op);
688 env->cc_op = CC_OP_FLAGS;
689 env->sr = (env->sr & 0xffe0)
690 | env->cc_dest | (env->cc_x << 4);
691 #elif defined(TARGET_MICROBLAZE)
692 #elif defined(TARGET_MIPS)
693 #elif defined(TARGET_OPENRISC)
694 #elif defined(TARGET_SH4)
695 #elif defined(TARGET_ALPHA)
696 #elif defined(TARGET_CRIS)
697 #elif defined(TARGET_S390X)
698 #elif defined(TARGET_XTENSA)
701 #error unsupported target CPU
704 /* fail safe : never use cpu_single_env outside cpu_exec() */
705 cpu_single_env = NULL;