2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include "qemu-barrier.h"
25 int tb_invalidated_flag;
27 //#define CONFIG_DEBUG_EXEC
29 bool qemu_cpu_has_work(CPUState *env)
31 return cpu_has_work(env);
34 void cpu_loop_exit(CPUState *env)
36 env->current_tb = NULL;
37 longjmp(env->jmp_env, 1);
40 /* exit the current TB from a signal handler. The host registers are
41 restored in a state compatible with the CPU emulator
43 #if defined(CONFIG_SOFTMMU)
44 void cpu_resume_from_signal(CPUState *env, void *puc)
46 /* XXX: restore cpu registers saved in host registers */
48 env->exception_index = -1;
49 longjmp(env->jmp_env, 1);
53 /* Execute the code without caching the generated code. An interpreter
54 could be used if available. */
55 static void cpu_exec_nocache(CPUState *env, int max_cycles,
56 TranslationBlock *orig_tb)
58 unsigned long next_tb;
61 /* Should never happen.
62 We only end up here when an existing TB is too long. */
63 if (max_cycles > CF_COUNT_MASK)
64 max_cycles = CF_COUNT_MASK;
66 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
69 /* execute the generated code */
70 next_tb = tcg_qemu_tb_exec(env, tb->tc_ptr);
71 env->current_tb = NULL;
73 if ((next_tb & 3) == 2) {
74 /* Restore PC. This may happen if async event occurs before
75 the TB starts executing. */
76 cpu_pc_from_tb(env, tb);
78 tb_phys_invalidate(tb, -1);
82 static TranslationBlock *tb_find_slow(CPUState *env,
87 TranslationBlock *tb, **ptb1;
89 tb_page_addr_t phys_pc, phys_page1, phys_page2;
90 target_ulong virt_page2;
92 tb_invalidated_flag = 0;
94 /* find translated block using physical mappings */
95 phys_pc = get_page_addr_code(env, pc);
96 phys_page1 = phys_pc & TARGET_PAGE_MASK;
98 h = tb_phys_hash_func(phys_pc);
99 ptb1 = &tb_phys_hash[h];
105 tb->page_addr[0] == phys_page1 &&
106 tb->cs_base == cs_base &&
107 tb->flags == flags) {
108 /* check next page if needed */
109 if (tb->page_addr[1] != -1) {
110 virt_page2 = (pc & TARGET_PAGE_MASK) +
112 phys_page2 = get_page_addr_code(env, virt_page2);
113 if (tb->page_addr[1] == phys_page2)
119 ptb1 = &tb->phys_hash_next;
122 /* if no translated code available, then translate it now */
123 tb = tb_gen_code(env, pc, cs_base, flags, 0);
126 /* Move the last found TB to the head of the list */
128 *ptb1 = tb->phys_hash_next;
129 tb->phys_hash_next = tb_phys_hash[h];
130 tb_phys_hash[h] = tb;
132 /* we add the TB in the virtual pc hash table */
133 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
137 static inline TranslationBlock *tb_find_fast(CPUState *env)
139 TranslationBlock *tb;
140 target_ulong cs_base, pc;
143 /* we record a subset of the CPU state. It will
144 always be the same before a given translated block
146 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
147 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
148 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
149 tb->flags != flags)) {
150 tb = tb_find_slow(env, pc, cs_base, flags);
155 static CPUDebugExcpHandler *debug_excp_handler;
157 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
159 CPUDebugExcpHandler *old_handler = debug_excp_handler;
161 debug_excp_handler = handler;
165 static void cpu_handle_debug_exception(CPUState *env)
169 if (!env->watchpoint_hit) {
170 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
171 wp->flags &= ~BP_WATCHPOINT_HIT;
174 if (debug_excp_handler) {
175 debug_excp_handler(env);
179 /* main execution loop */
181 volatile sig_atomic_t exit_request;
183 int cpu_exec(CPUState *env)
185 int ret, interrupt_request;
186 TranslationBlock *tb;
188 unsigned long next_tb;
191 if (!cpu_has_work(env)) {
198 cpu_single_env = env;
200 if (unlikely(exit_request)) {
201 env->exit_request = 1;
204 #if defined(TARGET_I386)
205 /* put eflags in CPU temporary format */
206 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
207 DF = 1 - (2 * ((env->eflags >> 10) & 1));
208 CC_OP = CC_OP_EFLAGS;
209 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
210 #elif defined(TARGET_SPARC)
211 #elif defined(TARGET_M68K)
212 env->cc_op = CC_OP_FLAGS;
213 env->cc_dest = env->sr & 0xf;
214 env->cc_x = (env->sr >> 4) & 1;
215 #elif defined(TARGET_ALPHA)
216 #elif defined(TARGET_ARM)
217 #elif defined(TARGET_UNICORE32)
218 #elif defined(TARGET_PPC)
219 #elif defined(TARGET_LM32)
220 #elif defined(TARGET_MICROBLAZE)
221 #elif defined(TARGET_MIPS)
222 #elif defined(TARGET_SH4)
223 #elif defined(TARGET_CRIS)
224 #elif defined(TARGET_S390X)
227 #error unsupported target CPU
229 env->exception_index = -1;
231 /* prepare setjmp context for exception handling */
233 if (setjmp(env->jmp_env) == 0) {
234 /* if an exception is pending, we execute it here */
235 if (env->exception_index >= 0) {
236 if (env->exception_index >= EXCP_INTERRUPT) {
237 /* exit request from the cpu execution loop */
238 ret = env->exception_index;
239 if (ret == EXCP_DEBUG) {
240 cpu_handle_debug_exception(env);
244 #if defined(CONFIG_USER_ONLY)
245 /* if user mode only, we simulate a fake exception
246 which will be handled outside the cpu execution
248 #if defined(TARGET_I386)
251 ret = env->exception_index;
255 env->exception_index = -1;
260 next_tb = 0; /* force lookup of first TB */
262 interrupt_request = env->interrupt_request;
263 if (unlikely(interrupt_request)) {
264 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
265 /* Mask out external interrupts for this step. */
266 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
268 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
269 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
270 env->exception_index = EXCP_DEBUG;
273 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
274 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
275 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
276 if (interrupt_request & CPU_INTERRUPT_HALT) {
277 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
279 env->exception_index = EXCP_HLT;
283 #if defined(TARGET_I386)
284 if (interrupt_request & CPU_INTERRUPT_INIT) {
285 svm_check_intercept(env, SVM_EXIT_INIT);
287 env->exception_index = EXCP_HALTED;
289 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
291 } else if (env->hflags2 & HF2_GIF_MASK) {
292 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
293 !(env->hflags & HF_SMM_MASK)) {
294 svm_check_intercept(env, SVM_EXIT_SMI);
295 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
298 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
299 !(env->hflags2 & HF2_NMI_MASK)) {
300 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
301 env->hflags2 |= HF2_NMI_MASK;
302 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
304 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
305 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
306 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
308 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
309 (((env->hflags2 & HF2_VINTR_MASK) &&
310 (env->hflags2 & HF2_HIF_MASK)) ||
311 (!(env->hflags2 & HF2_VINTR_MASK) &&
312 (env->eflags & IF_MASK &&
313 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
315 svm_check_intercept(env, SVM_EXIT_INTR);
316 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
317 intno = cpu_get_pic_interrupt(env);
318 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
319 do_interrupt_x86_hardirq(env, intno, 1);
320 /* ensure that no TB jump will be modified as
321 the program flow was changed */
323 #if !defined(CONFIG_USER_ONLY)
324 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
325 (env->eflags & IF_MASK) &&
326 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
328 /* FIXME: this should respect TPR */
329 svm_check_intercept(env, SVM_EXIT_VINTR);
330 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
331 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
332 do_interrupt_x86_hardirq(env, intno, 1);
333 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
338 #elif defined(TARGET_PPC)
340 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
344 if (interrupt_request & CPU_INTERRUPT_HARD) {
345 ppc_hw_interrupt(env);
346 if (env->pending_interrupts == 0)
347 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
350 #elif defined(TARGET_LM32)
351 if ((interrupt_request & CPU_INTERRUPT_HARD)
352 && (env->ie & IE_IE)) {
353 env->exception_index = EXCP_IRQ;
357 #elif defined(TARGET_MICROBLAZE)
358 if ((interrupt_request & CPU_INTERRUPT_HARD)
359 && (env->sregs[SR_MSR] & MSR_IE)
360 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
361 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
362 env->exception_index = EXCP_IRQ;
366 #elif defined(TARGET_MIPS)
367 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
368 cpu_mips_hw_interrupts_pending(env)) {
370 env->exception_index = EXCP_EXT_INTERRUPT;
375 #elif defined(TARGET_SPARC)
376 if (interrupt_request & CPU_INTERRUPT_HARD) {
377 if (cpu_interrupts_enabled(env) &&
378 env->interrupt_index > 0) {
379 int pil = env->interrupt_index & 0xf;
380 int type = env->interrupt_index & 0xf0;
382 if (((type == TT_EXTINT) &&
383 cpu_pil_allowed(env, pil)) ||
385 env->exception_index = env->interrupt_index;
391 #elif defined(TARGET_ARM)
392 if (interrupt_request & CPU_INTERRUPT_FIQ
393 && !(env->uncached_cpsr & CPSR_F)) {
394 env->exception_index = EXCP_FIQ;
398 /* ARMv7-M interrupt return works by loading a magic value
399 into the PC. On real hardware the load causes the
400 return to occur. The qemu implementation performs the
401 jump normally, then does the exception return when the
402 CPU tries to execute code at the magic address.
403 This will cause the magic PC value to be pushed to
404 the stack if an interrupt occurred at the wrong time.
405 We avoid this by disabling interrupts when
406 pc contains a magic address. */
407 if (interrupt_request & CPU_INTERRUPT_HARD
408 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
409 || !(env->uncached_cpsr & CPSR_I))) {
410 env->exception_index = EXCP_IRQ;
414 #elif defined(TARGET_UNICORE32)
415 if (interrupt_request & CPU_INTERRUPT_HARD
416 && !(env->uncached_asr & ASR_I)) {
420 #elif defined(TARGET_SH4)
421 if (interrupt_request & CPU_INTERRUPT_HARD) {
425 #elif defined(TARGET_ALPHA)
428 /* ??? This hard-codes the OSF/1 interrupt levels. */
429 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
431 if (interrupt_request & CPU_INTERRUPT_HARD) {
432 idx = EXCP_DEV_INTERRUPT;
436 if (interrupt_request & CPU_INTERRUPT_TIMER) {
437 idx = EXCP_CLK_INTERRUPT;
441 if (interrupt_request & CPU_INTERRUPT_SMP) {
442 idx = EXCP_SMP_INTERRUPT;
446 if (interrupt_request & CPU_INTERRUPT_MCHK) {
451 env->exception_index = idx;
457 #elif defined(TARGET_CRIS)
458 if (interrupt_request & CPU_INTERRUPT_HARD
459 && (env->pregs[PR_CCS] & I_FLAG)
460 && !env->locked_irq) {
461 env->exception_index = EXCP_IRQ;
465 if (interrupt_request & CPU_INTERRUPT_NMI
466 && (env->pregs[PR_CCS] & M_FLAG)) {
467 env->exception_index = EXCP_NMI;
471 #elif defined(TARGET_M68K)
472 if (interrupt_request & CPU_INTERRUPT_HARD
473 && ((env->sr & SR_I) >> SR_I_SHIFT)
474 < env->pending_level) {
475 /* Real hardware gets the interrupt vector via an
476 IACK cycle at this point. Current emulated
477 hardware doesn't rely on this, so we
478 provide/save the vector when the interrupt is
480 env->exception_index = env->pending_vector;
481 do_interrupt_m68k_hardirq(env);
484 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
485 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
486 (env->psw.mask & PSW_MASK_EXT)) {
491 /* Don't use the cached interrupt_request value,
492 do_interrupt may have updated the EXITTB flag. */
493 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
494 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
495 /* ensure that no TB jump will be modified as
496 the program flow was changed */
500 if (unlikely(env->exit_request)) {
501 env->exit_request = 0;
502 env->exception_index = EXCP_INTERRUPT;
505 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
506 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
507 /* restore flags in standard format */
508 #if defined(TARGET_I386)
509 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
511 log_cpu_state(env, X86_DUMP_CCOP);
512 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
513 #elif defined(TARGET_M68K)
514 cpu_m68k_flush_flags(env, env->cc_op);
515 env->cc_op = CC_OP_FLAGS;
516 env->sr = (env->sr & 0xffe0)
517 | env->cc_dest | (env->cc_x << 4);
518 log_cpu_state(env, 0);
520 log_cpu_state(env, 0);
523 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
525 tb = tb_find_fast(env);
526 /* Note: we do it here to avoid a gcc bug on Mac OS X when
527 doing it in tb_find_slow */
528 if (tb_invalidated_flag) {
529 /* as some TB could have been invalidated because
530 of memory exceptions while generating the code, we
531 must recompute the hash index here */
533 tb_invalidated_flag = 0;
535 #ifdef CONFIG_DEBUG_EXEC
536 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
537 (long)tb->tc_ptr, tb->pc,
538 lookup_symbol(tb->pc));
540 /* see if we can patch the calling TB. When the TB
541 spans two pages, we cannot safely do a direct
543 if (next_tb != 0 && tb->page_addr[1] == -1) {
544 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
546 spin_unlock(&tb_lock);
548 /* cpu_interrupt might be called while translating the
549 TB, but before it is linked into a potentially
550 infinite loop and becomes env->current_tb. Avoid
551 starting execution if there is a pending interrupt. */
552 env->current_tb = tb;
554 if (likely(!env->exit_request)) {
556 /* execute the generated code */
557 next_tb = tcg_qemu_tb_exec(env, tc_ptr);
558 if ((next_tb & 3) == 2) {
559 /* Instruction counter expired. */
561 tb = (TranslationBlock *)(long)(next_tb & ~3);
563 cpu_pc_from_tb(env, tb);
564 insns_left = env->icount_decr.u32;
565 if (env->icount_extra && insns_left >= 0) {
566 /* Refill decrementer and continue execution. */
567 env->icount_extra += insns_left;
568 if (env->icount_extra > 0xffff) {
571 insns_left = env->icount_extra;
573 env->icount_extra -= insns_left;
574 env->icount_decr.u16.low = insns_left;
576 if (insns_left > 0) {
577 /* Execute remaining instructions. */
578 cpu_exec_nocache(env, insns_left, tb);
580 env->exception_index = EXCP_INTERRUPT;
586 env->current_tb = NULL;
587 /* reset soft MMU for next block (it can currently
588 only be set by a memory fault) */
591 /* Reload env after longjmp - the compiler may have smashed all
592 * local variables as longjmp is marked 'noreturn'. */
593 env = cpu_single_env;
598 #if defined(TARGET_I386)
599 /* restore flags in standard format */
600 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
602 #elif defined(TARGET_ARM)
603 /* XXX: Save/restore host fpu exception state?. */
604 #elif defined(TARGET_UNICORE32)
605 #elif defined(TARGET_SPARC)
606 #elif defined(TARGET_PPC)
607 #elif defined(TARGET_LM32)
608 #elif defined(TARGET_M68K)
609 cpu_m68k_flush_flags(env, env->cc_op);
610 env->cc_op = CC_OP_FLAGS;
611 env->sr = (env->sr & 0xffe0)
612 | env->cc_dest | (env->cc_x << 4);
613 #elif defined(TARGET_MICROBLAZE)
614 #elif defined(TARGET_MIPS)
615 #elif defined(TARGET_SH4)
616 #elif defined(TARGET_ALPHA)
617 #elif defined(TARGET_CRIS)
618 #elif defined(TARGET_S390X)
621 #error unsupported target CPU
624 /* fail safe : never use cpu_single_env outside cpu_exec() */
625 cpu_single_env = NULL;