2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "disas/disas.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
25 #include "sysemu/hax.h"
27 void cpu_loop_exit(CPUState *cpu)
29 cpu->current_tb = NULL;
30 siglongjmp(cpu->jmp_env, 1);
33 /* exit the current TB from a signal handler. The host registers are
34 restored in a state compatible with the CPU emulator
36 #if defined(CONFIG_SOFTMMU)
37 void cpu_resume_from_signal(CPUState *cpu, void *puc)
39 /* XXX: restore cpu registers saved in host registers */
41 cpu->exception_index = -1;
42 siglongjmp(cpu->jmp_env, 1);
46 /* Execute a TB, and fix up the CPU state afterwards if necessary */
47 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
49 CPUArchState *env = cpu->env_ptr;
52 #if defined(DEBUG_DISAS)
53 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
54 #if defined(TARGET_I386)
55 log_cpu_state(cpu, CPU_DUMP_CCOP);
56 #elif defined(TARGET_M68K)
57 /* ??? Should not modify env state for dumping. */
58 cpu_m68k_flush_flags(env, env->cc_op);
59 env->cc_op = CC_OP_FLAGS;
60 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
61 log_cpu_state(cpu, 0);
63 log_cpu_state(cpu, 0);
66 #endif /* DEBUG_DISAS */
68 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
69 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
70 /* We didn't start executing this TB (eg because the instruction
71 * counter hit zero); we must restore the guest PC to the address
72 * of the start of the TB.
74 CPUClass *cc = CPU_GET_CLASS(cpu);
75 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
76 if (cc->synchronize_from_tb) {
77 cc->synchronize_from_tb(cpu, tb);
80 cc->set_pc(cpu, tb->pc);
83 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
84 /* We were asked to stop executing TBs (probably a pending
85 * interrupt. We've now stopped, so clear the flag.
87 cpu->tcg_exit_req = 0;
92 /* Execute the code without caching the generated code. An interpreter
93 could be used if available. */
94 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
95 TranslationBlock *orig_tb)
97 CPUState *cpu = ENV_GET_CPU(env);
100 /* Should never happen.
101 We only end up here when an existing TB is too long. */
102 if (max_cycles > CF_COUNT_MASK)
103 max_cycles = CF_COUNT_MASK;
105 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
107 cpu->current_tb = tb;
108 /* execute the generated code */
109 cpu_tb_exec(cpu, tb->tc_ptr);
110 cpu->current_tb = NULL;
111 tb_phys_invalidate(tb, -1);
115 static TranslationBlock *tb_find_slow(CPUArchState *env,
117 target_ulong cs_base,
120 CPUState *cpu = ENV_GET_CPU(env);
121 TranslationBlock *tb, **ptb1;
123 tb_page_addr_t phys_pc, phys_page1;
124 target_ulong virt_page2;
126 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
128 /* find translated block using physical mappings */
129 phys_pc = get_page_addr_code(env, pc);
130 phys_page1 = phys_pc & TARGET_PAGE_MASK;
131 h = tb_phys_hash_func(phys_pc);
132 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
138 tb->page_addr[0] == phys_page1 &&
139 tb->cs_base == cs_base &&
140 tb->flags == flags) {
141 /* check next page if needed */
142 if (tb->page_addr[1] != -1) {
143 tb_page_addr_t phys_page2;
145 virt_page2 = (pc & TARGET_PAGE_MASK) +
147 phys_page2 = get_page_addr_code(env, virt_page2);
148 if (tb->page_addr[1] == phys_page2)
154 ptb1 = &tb->phys_hash_next;
157 /* if no translated code available, then translate it now */
158 tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
161 /* Move the last found TB to the head of the list */
163 *ptb1 = tb->phys_hash_next;
164 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
165 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
167 /* we add the TB in the virtual pc hash table */
168 cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
172 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
174 CPUState *cpu = ENV_GET_CPU(env);
175 TranslationBlock *tb;
176 target_ulong cs_base, pc;
179 /* we record a subset of the CPU state. It will
180 always be the same before a given translated block
182 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
183 tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
184 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
185 tb->flags != flags)) {
186 tb = tb_find_slow(env, pc, cs_base, flags);
191 static CPUDebugExcpHandler *debug_excp_handler;
193 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
195 debug_excp_handler = handler;
198 static void cpu_handle_debug_exception(CPUArchState *env)
200 CPUState *cpu = ENV_GET_CPU(env);
203 if (!cpu->watchpoint_hit) {
204 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
205 wp->flags &= ~BP_WATCHPOINT_HIT;
208 if (debug_excp_handler) {
209 debug_excp_handler(env);
213 /* main execution loop */
215 volatile sig_atomic_t exit_request;
218 * QEMU emulate can happens because of MMIO or emulation mode, i.e. non-PG mode,
219 * when it's because of MMIO, the MMIO, the interrupt should not be emulated,
220 * because MMIO is emulated for only one instruction now and then back to
223 static int need_handle_intr_request(CPUState *cpu)
226 if (!hax_enabled() || hax_vcpu_emulation_mode(cpu))
227 return cpu->interrupt_request;
230 return cpu->interrupt_request;
235 int cpu_exec(CPUArchState *env)
237 CPUState *cpu = ENV_GET_CPU(env);
238 #if !(defined(CONFIG_USER_ONLY) && \
239 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
240 CPUClass *cc = CPU_GET_CLASS(cpu);
243 X86CPU *x86_cpu = X86_CPU(cpu);
245 int ret, interrupt_request;
246 TranslationBlock *tb;
249 /* This must be volatile so it is not trashed by longjmp() */
250 volatile bool have_tb_lock = false;
253 if (!cpu_has_work(cpu)) {
262 /* As long as current_cpu is null, up to the assignment just above,
263 * requests by other threads to exit the execution loop are expected to
264 * be issued using the exit_request global. We must make sure that our
265 * evaluation of the global value is performed past the current_cpu
266 * value transition point, which requires a memory barrier as well as
267 * an instruction scheduling constraint on modern architectures. */
270 if (unlikely(exit_request)) {
271 cpu->exit_request = 1;
274 #if defined(TARGET_I386)
275 /* put eflags in CPU temporary format */
276 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
277 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
278 CC_OP = CC_OP_EFLAGS;
279 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
280 #elif defined(TARGET_SPARC)
281 #elif defined(TARGET_M68K)
282 env->cc_op = CC_OP_FLAGS;
283 env->cc_dest = env->sr & 0xf;
284 env->cc_x = (env->sr >> 4) & 1;
285 #elif defined(TARGET_ALPHA)
286 #elif defined(TARGET_ARM)
287 #elif defined(TARGET_UNICORE32)
288 #elif defined(TARGET_PPC)
289 env->reserve_addr = -1;
290 #elif defined(TARGET_LM32)
291 #elif defined(TARGET_MICROBLAZE)
292 #elif defined(TARGET_MIPS)
293 #elif defined(TARGET_MOXIE)
294 #elif defined(TARGET_OPENRISC)
295 #elif defined(TARGET_SH4)
296 #elif defined(TARGET_CRIS)
297 #elif defined(TARGET_S390X)
298 #elif defined(TARGET_XTENSA)
301 #error unsupported target CPU
303 cpu->exception_index = -1;
305 /* prepare setjmp context for exception handling */
307 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
308 /* if an exception is pending, we execute it here */
309 if (cpu->exception_index >= 0) {
310 if (cpu->exception_index >= EXCP_INTERRUPT) {
311 /* exit request from the cpu execution loop */
312 ret = cpu->exception_index;
313 if (ret == EXCP_DEBUG) {
314 cpu_handle_debug_exception(env);
318 #if defined(CONFIG_USER_ONLY)
319 /* if user mode only, we simulate a fake exception
320 which will be handled outside the cpu execution
322 #if defined(TARGET_I386)
323 cc->do_interrupt(cpu);
325 ret = cpu->exception_index;
328 cc->do_interrupt(cpu);
329 cpu->exception_index = -1;
335 if (hax_enabled() && !hax_vcpu_exec(cpu))
336 longjmp(cpu->jmp_env, 1);
339 next_tb = 0; /* force lookup of first TB */
341 interrupt_request = need_handle_intr_request(cpu);
342 if (unlikely(interrupt_request)) {
343 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
344 /* Mask out external interrupts for this step. */
345 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
347 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
348 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
349 cpu->exception_index = EXCP_DEBUG;
352 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
353 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
354 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
355 if (interrupt_request & CPU_INTERRUPT_HALT) {
356 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
358 cpu->exception_index = EXCP_HLT;
362 #if defined(TARGET_I386)
363 #if !defined(CONFIG_USER_ONLY)
364 if (interrupt_request & CPU_INTERRUPT_POLL) {
365 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
366 apic_poll_irq(x86_cpu->apic_state);
369 if (interrupt_request & CPU_INTERRUPT_INIT) {
370 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
372 do_cpu_init(x86_cpu);
373 cpu->exception_index = EXCP_HALTED;
375 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
376 do_cpu_sipi(x86_cpu);
377 } else if (env->hflags2 & HF2_GIF_MASK) {
378 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
379 !(env->hflags & HF_SMM_MASK)) {
380 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
382 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
385 cpu->hax_vcpu->resync = 1;
387 do_smm_enter(x86_cpu);
389 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
390 !(env->hflags2 & HF2_NMI_MASK)) {
391 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
392 env->hflags2 |= HF2_NMI_MASK;
393 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
395 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
396 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
397 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
399 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
400 (((env->hflags2 & HF2_VINTR_MASK) &&
401 (env->hflags2 & HF2_HIF_MASK)) ||
402 (!(env->hflags2 & HF2_VINTR_MASK) &&
403 (env->eflags & IF_MASK &&
404 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
406 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
408 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
410 intno = cpu_get_pic_interrupt(env);
411 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
412 do_interrupt_x86_hardirq(env, intno, 1);
413 /* ensure that no TB jump will be modified as
414 the program flow was changed */
416 #if !defined(CONFIG_USER_ONLY)
417 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
418 (env->eflags & IF_MASK) &&
419 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
421 /* FIXME: this should respect TPR */
422 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
424 intno = ldl_phys(cpu->as,
426 + offsetof(struct vmcb,
427 control.int_vector));
428 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
429 do_interrupt_x86_hardirq(env, intno, 1);
430 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
435 #elif defined(TARGET_PPC)
436 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
439 if (interrupt_request & CPU_INTERRUPT_HARD) {
440 ppc_hw_interrupt(env);
441 if (env->pending_interrupts == 0) {
442 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
446 #elif defined(TARGET_LM32)
447 if ((interrupt_request & CPU_INTERRUPT_HARD)
448 && (env->ie & IE_IE)) {
449 cpu->exception_index = EXCP_IRQ;
450 cc->do_interrupt(cpu);
453 #elif defined(TARGET_MICROBLAZE)
454 if ((interrupt_request & CPU_INTERRUPT_HARD)
455 && (env->sregs[SR_MSR] & MSR_IE)
456 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
457 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
458 cpu->exception_index = EXCP_IRQ;
459 cc->do_interrupt(cpu);
462 #elif defined(TARGET_MIPS)
463 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
464 cpu_mips_hw_interrupts_pending(env)) {
466 cpu->exception_index = EXCP_EXT_INTERRUPT;
468 cc->do_interrupt(cpu);
471 #elif defined(TARGET_OPENRISC)
474 if ((interrupt_request & CPU_INTERRUPT_HARD)
475 && (env->sr & SR_IEE)) {
478 if ((interrupt_request & CPU_INTERRUPT_TIMER)
479 && (env->sr & SR_TEE)) {
483 cpu->exception_index = idx;
484 cc->do_interrupt(cpu);
488 #elif defined(TARGET_SPARC)
489 if (interrupt_request & CPU_INTERRUPT_HARD) {
490 if (cpu_interrupts_enabled(env) &&
491 env->interrupt_index > 0) {
492 int pil = env->interrupt_index & 0xf;
493 int type = env->interrupt_index & 0xf0;
495 if (((type == TT_EXTINT) &&
496 cpu_pil_allowed(env, pil)) ||
498 cpu->exception_index = env->interrupt_index;
499 cc->do_interrupt(cpu);
504 #elif defined(TARGET_ARM)
505 if (interrupt_request & CPU_INTERRUPT_FIQ
506 && !(env->daif & PSTATE_F)) {
507 cpu->exception_index = EXCP_FIQ;
508 cc->do_interrupt(cpu);
511 /* ARMv7-M interrupt return works by loading a magic value
512 into the PC. On real hardware the load causes the
513 return to occur. The qemu implementation performs the
514 jump normally, then does the exception return when the
515 CPU tries to execute code at the magic address.
516 This will cause the magic PC value to be pushed to
517 the stack if an interrupt occurred at the wrong time.
518 We avoid this by disabling interrupts when
519 pc contains a magic address. */
520 if (interrupt_request & CPU_INTERRUPT_HARD
521 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
522 || !(env->daif & PSTATE_I))) {
523 cpu->exception_index = EXCP_IRQ;
524 cc->do_interrupt(cpu);
527 #elif defined(TARGET_UNICORE32)
528 if (interrupt_request & CPU_INTERRUPT_HARD
529 && !(env->uncached_asr & ASR_I)) {
530 cpu->exception_index = UC32_EXCP_INTR;
531 cc->do_interrupt(cpu);
534 #elif defined(TARGET_SH4)
535 if (interrupt_request & CPU_INTERRUPT_HARD) {
536 cc->do_interrupt(cpu);
539 #elif defined(TARGET_ALPHA)
542 /* ??? This hard-codes the OSF/1 interrupt levels. */
543 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
545 if (interrupt_request & CPU_INTERRUPT_HARD) {
546 idx = EXCP_DEV_INTERRUPT;
550 if (interrupt_request & CPU_INTERRUPT_TIMER) {
551 idx = EXCP_CLK_INTERRUPT;
555 if (interrupt_request & CPU_INTERRUPT_SMP) {
556 idx = EXCP_SMP_INTERRUPT;
560 if (interrupt_request & CPU_INTERRUPT_MCHK) {
565 cpu->exception_index = idx;
567 cc->do_interrupt(cpu);
571 #elif defined(TARGET_CRIS)
572 if (interrupt_request & CPU_INTERRUPT_HARD
573 && (env->pregs[PR_CCS] & I_FLAG)
574 && !env->locked_irq) {
575 cpu->exception_index = EXCP_IRQ;
576 cc->do_interrupt(cpu);
579 if (interrupt_request & CPU_INTERRUPT_NMI) {
580 unsigned int m_flag_archval;
581 if (env->pregs[PR_VR] < 32) {
582 m_flag_archval = M_FLAG_V10;
584 m_flag_archval = M_FLAG_V32;
586 if ((env->pregs[PR_CCS] & m_flag_archval)) {
587 cpu->exception_index = EXCP_NMI;
588 cc->do_interrupt(cpu);
592 #elif defined(TARGET_M68K)
593 if (interrupt_request & CPU_INTERRUPT_HARD
594 && ((env->sr & SR_I) >> SR_I_SHIFT)
595 < env->pending_level) {
596 /* Real hardware gets the interrupt vector via an
597 IACK cycle at this point. Current emulated
598 hardware doesn't rely on this, so we
599 provide/save the vector when the interrupt is
601 cpu->exception_index = env->pending_vector;
602 do_interrupt_m68k_hardirq(env);
605 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
606 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
607 (env->psw.mask & PSW_MASK_EXT)) {
608 cc->do_interrupt(cpu);
611 #elif defined(TARGET_XTENSA)
612 if (interrupt_request & CPU_INTERRUPT_HARD) {
613 cpu->exception_index = EXC_IRQ;
614 cc->do_interrupt(cpu);
618 /* Don't use the cached interrupt_request value,
619 do_interrupt may have updated the EXITTB flag. */
620 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
621 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
622 /* ensure that no TB jump will be modified as
623 the program flow was changed */
627 if (unlikely(cpu->exit_request)) {
628 cpu->exit_request = 0;
629 cpu->exception_index = EXCP_INTERRUPT;
632 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
634 tb = tb_find_fast(env);
635 /* Note: we do it here to avoid a gcc bug on Mac OS X when
636 doing it in tb_find_slow */
637 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
638 /* as some TB could have been invalidated because
639 of memory exceptions while generating the code, we
640 must recompute the hash index here */
642 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
644 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
645 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
646 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
648 /* see if we can patch the calling TB. When the TB
649 spans two pages, we cannot safely do a direct
651 if (next_tb != 0 && tb->page_addr[1] == -1) {
652 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
653 next_tb & TB_EXIT_MASK, tb);
655 have_tb_lock = false;
656 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
658 /* cpu_interrupt might be called while translating the
659 TB, but before it is linked into a potentially
660 infinite loop and becomes env->current_tb. Avoid
661 starting execution if there is a pending interrupt. */
662 cpu->current_tb = tb;
664 if (likely(!cpu->exit_request)) {
666 /* execute the generated code */
667 next_tb = cpu_tb_exec(cpu, tc_ptr);
668 switch (next_tb & TB_EXIT_MASK) {
669 case TB_EXIT_REQUESTED:
670 /* Something asked us to stop executing
671 * chained TBs; just continue round the main
672 * loop. Whatever requested the exit will also
673 * have set something else (eg exit_request or
674 * interrupt_request) which we will handle
675 * next time around the loop.
677 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
680 case TB_EXIT_ICOUNT_EXPIRED:
682 /* Instruction counter expired. */
684 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
685 insns_left = cpu->icount_decr.u32;
686 if (cpu->icount_extra && insns_left >= 0) {
687 /* Refill decrementer and continue execution. */
688 cpu->icount_extra += insns_left;
689 if (cpu->icount_extra > 0xffff) {
692 insns_left = cpu->icount_extra;
694 cpu->icount_extra -= insns_left;
695 cpu->icount_decr.u16.low = insns_left;
697 if (insns_left > 0) {
698 /* Execute remaining instructions. */
699 cpu_exec_nocache(env, insns_left, tb);
701 cpu->exception_index = EXCP_INTERRUPT;
711 cpu->current_tb = NULL;
713 if (hax_enabled() && hax_stop_emulation(cpu))
716 /* reset soft MMU for next block (it can currently
717 only be set by a memory fault) */
720 /* Reload env after longjmp - the compiler may have smashed all
721 * local variables as longjmp is marked 'noreturn'. */
724 #if !(defined(CONFIG_USER_ONLY) && \
725 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
726 cc = CPU_GET_CLASS(cpu);
729 x86_cpu = X86_CPU(cpu);
732 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
733 have_tb_lock = false;
739 #if defined(TARGET_I386)
740 /* restore flags in standard format */
741 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
742 | (env->df & DF_MASK);
743 #elif defined(TARGET_ARM)
744 /* XXX: Save/restore host fpu exception state?. */
745 #elif defined(TARGET_UNICORE32)
746 #elif defined(TARGET_SPARC)
747 #elif defined(TARGET_PPC)
748 #elif defined(TARGET_LM32)
749 #elif defined(TARGET_M68K)
750 cpu_m68k_flush_flags(env, env->cc_op);
751 env->cc_op = CC_OP_FLAGS;
752 env->sr = (env->sr & 0xffe0)
753 | env->cc_dest | (env->cc_x << 4);
754 #elif defined(TARGET_MICROBLAZE)
755 #elif defined(TARGET_MIPS)
756 #elif defined(TARGET_MOXIE)
757 #elif defined(TARGET_OPENRISC)
758 #elif defined(TARGET_SH4)
759 #elif defined(TARGET_ALPHA)
760 #elif defined(TARGET_CRIS)
761 #elif defined(TARGET_S390X)
762 #elif defined(TARGET_XTENSA)
765 #error unsupported target CPU
768 /* fail safe : never use current_cpu outside cpu_exec() */