2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "disas/disas.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
25 #include "sysemu/hax.h"
27 void cpu_loop_exit(CPUState *cpu)
29 cpu->current_tb = NULL;
30 siglongjmp(cpu->jmp_env, 1);
33 /* exit the current TB from a signal handler. The host registers are
34 restored in a state compatible with the CPU emulator
36 #if defined(CONFIG_SOFTMMU)
37 void cpu_resume_from_signal(CPUState *cpu, void *puc)
39 /* XXX: restore cpu registers saved in host registers */
41 cpu->exception_index = -1;
42 siglongjmp(cpu->jmp_env, 1);
46 /* Execute a TB, and fix up the CPU state afterwards if necessary */
47 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
49 CPUArchState *env = cpu->env_ptr;
52 #if defined(DEBUG_DISAS)
53 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
54 #if defined(TARGET_I386)
55 log_cpu_state(cpu, CPU_DUMP_CCOP);
56 #elif defined(TARGET_M68K)
57 /* ??? Should not modify env state for dumping. */
58 cpu_m68k_flush_flags(env, env->cc_op);
59 env->cc_op = CC_OP_FLAGS;
60 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
61 log_cpu_state(cpu, 0);
63 log_cpu_state(cpu, 0);
66 #endif /* DEBUG_DISAS */
68 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
69 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
70 /* We didn't start executing this TB (eg because the instruction
71 * counter hit zero); we must restore the guest PC to the address
72 * of the start of the TB.
74 CPUClass *cc = CPU_GET_CLASS(cpu);
75 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
76 if (cc->synchronize_from_tb) {
77 cc->synchronize_from_tb(cpu, tb);
80 cc->set_pc(cpu, tb->pc);
83 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
84 /* We were asked to stop executing TBs (probably a pending
85 * interrupt. We've now stopped, so clear the flag.
87 cpu->tcg_exit_req = 0;
92 /* Execute the code without caching the generated code. An interpreter
93 could be used if available. */
94 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
95 TranslationBlock *orig_tb)
97 CPUState *cpu = ENV_GET_CPU(env);
100 /* Should never happen.
101 We only end up here when an existing TB is too long. */
102 if (max_cycles > CF_COUNT_MASK)
103 max_cycles = CF_COUNT_MASK;
105 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
107 cpu->current_tb = tb;
108 /* execute the generated code */
109 cpu_tb_exec(cpu, tb->tc_ptr);
110 cpu->current_tb = NULL;
111 tb_phys_invalidate(tb, -1);
115 static TranslationBlock *tb_find_slow(CPUArchState *env,
117 target_ulong cs_base,
120 CPUState *cpu = ENV_GET_CPU(env);
121 TranslationBlock *tb, **ptb1;
123 tb_page_addr_t phys_pc, phys_page1;
124 target_ulong virt_page2;
126 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
128 /* find translated block using physical mappings */
129 phys_pc = get_page_addr_code(env, pc);
130 phys_page1 = phys_pc & TARGET_PAGE_MASK;
131 h = tb_phys_hash_func(phys_pc);
132 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
138 tb->page_addr[0] == phys_page1 &&
139 tb->cs_base == cs_base &&
140 tb->flags == flags) {
141 /* check next page if needed */
142 if (tb->page_addr[1] != -1) {
143 tb_page_addr_t phys_page2;
145 virt_page2 = (pc & TARGET_PAGE_MASK) +
147 phys_page2 = get_page_addr_code(env, virt_page2);
148 if (tb->page_addr[1] == phys_page2)
154 ptb1 = &tb->phys_hash_next;
157 /* if no translated code available, then translate it now */
158 tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
161 /* Move the last found TB to the head of the list */
163 *ptb1 = tb->phys_hash_next;
164 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
165 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
167 /* we add the TB in the virtual pc hash table */
168 cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
172 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
174 CPUState *cpu = ENV_GET_CPU(env);
175 TranslationBlock *tb;
176 target_ulong cs_base, pc;
179 /* we record a subset of the CPU state. It will
180 always be the same before a given translated block
182 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
183 tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
184 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
185 tb->flags != flags)) {
186 tb = tb_find_slow(env, pc, cs_base, flags);
191 static CPUDebugExcpHandler *debug_excp_handler;
193 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
195 debug_excp_handler = handler;
198 static void cpu_handle_debug_exception(CPUArchState *env)
200 CPUState *cpu = ENV_GET_CPU(env);
203 if (!cpu->watchpoint_hit) {
204 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
205 wp->flags &= ~BP_WATCHPOINT_HIT;
208 if (debug_excp_handler) {
209 debug_excp_handler(env);
213 /* main execution loop */
215 volatile sig_atomic_t exit_request;
218 * QEMU emulate can happens because of MMIO or emulation mode, i.e. non-PG mode,
219 * when it's because of MMIO, the MMIO, the interrupt should not be emulated,
220 * because MMIO is emulated for only one instruction now and then back to
223 static int need_handle_intr_request(CPUState *cpu)
226 CPUArchState *env = cpu->env_ptr;
227 if (!hax_enabled() || hax_vcpu_emulation_mode(env))
228 return cpu->interrupt_request;
231 return cpu->interrupt_request;
236 int cpu_exec(CPUArchState *env)
238 CPUState *cpu = ENV_GET_CPU(env);
239 #if !(defined(CONFIG_USER_ONLY) && \
240 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
241 CPUClass *cc = CPU_GET_CLASS(cpu);
244 X86CPU *x86_cpu = X86_CPU(cpu);
246 int ret, interrupt_request;
247 TranslationBlock *tb;
250 /* This must be volatile so it is not trashed by longjmp() */
251 volatile bool have_tb_lock = false;
254 if (!cpu_has_work(cpu)) {
263 /* As long as current_cpu is null, up to the assignment just above,
264 * requests by other threads to exit the execution loop are expected to
265 * be issued using the exit_request global. We must make sure that our
266 * evaluation of the global value is performed past the current_cpu
267 * value transition point, which requires a memory barrier as well as
268 * an instruction scheduling constraint on modern architectures. */
271 if (unlikely(exit_request)) {
272 cpu->exit_request = 1;
275 #if defined(TARGET_I386)
276 /* put eflags in CPU temporary format */
277 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
278 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
279 CC_OP = CC_OP_EFLAGS;
280 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
281 #elif defined(TARGET_SPARC)
282 #elif defined(TARGET_M68K)
283 env->cc_op = CC_OP_FLAGS;
284 env->cc_dest = env->sr & 0xf;
285 env->cc_x = (env->sr >> 4) & 1;
286 #elif defined(TARGET_ALPHA)
287 #elif defined(TARGET_ARM)
288 #elif defined(TARGET_UNICORE32)
289 #elif defined(TARGET_PPC)
290 env->reserve_addr = -1;
291 #elif defined(TARGET_LM32)
292 #elif defined(TARGET_MICROBLAZE)
293 #elif defined(TARGET_MIPS)
294 #elif defined(TARGET_MOXIE)
295 #elif defined(TARGET_OPENRISC)
296 #elif defined(TARGET_SH4)
297 #elif defined(TARGET_CRIS)
298 #elif defined(TARGET_S390X)
299 #elif defined(TARGET_XTENSA)
302 #error unsupported target CPU
304 cpu->exception_index = -1;
306 /* prepare setjmp context for exception handling */
308 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
309 /* if an exception is pending, we execute it here */
310 if (cpu->exception_index >= 0) {
311 if (cpu->exception_index >= EXCP_INTERRUPT) {
312 /* exit request from the cpu execution loop */
313 ret = cpu->exception_index;
314 if (ret == EXCP_DEBUG) {
315 cpu_handle_debug_exception(env);
319 #if defined(CONFIG_USER_ONLY)
320 /* if user mode only, we simulate a fake exception
321 which will be handled outside the cpu execution
323 #if defined(TARGET_I386)
324 cc->do_interrupt(cpu);
326 ret = cpu->exception_index;
329 cc->do_interrupt(cpu);
330 cpu->exception_index = -1;
336 if (hax_enabled() && !hax_vcpu_exec(env))
337 longjmp(env->jmp_env, 1);
340 next_tb = 0; /* force lookup of first TB */
342 interrupt_request = need_handle_intr_request(cpu);
343 if (unlikely(interrupt_request)) {
344 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
345 /* Mask out external interrupts for this step. */
346 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
348 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
349 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
350 cpu->exception_index = EXCP_DEBUG;
353 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
354 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
355 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
356 if (interrupt_request & CPU_INTERRUPT_HALT) {
357 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
359 cpu->exception_index = EXCP_HLT;
363 #if defined(TARGET_I386)
364 #if !defined(CONFIG_USER_ONLY)
365 if (interrupt_request & CPU_INTERRUPT_POLL) {
366 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
367 apic_poll_irq(x86_cpu->apic_state);
370 if (interrupt_request & CPU_INTERRUPT_INIT) {
371 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
373 do_cpu_init(x86_cpu);
374 cpu->exception_index = EXCP_HALTED;
376 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
377 do_cpu_sipi(x86_cpu);
378 } else if (env->hflags2 & HF2_GIF_MASK) {
379 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
380 !(env->hflags & HF_SMM_MASK)) {
381 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
383 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
386 env->hax_vcpu->resync = 1;
388 do_smm_enter(x86_cpu);
390 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
391 !(env->hflags2 & HF2_NMI_MASK)) {
392 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
393 env->hflags2 |= HF2_NMI_MASK;
394 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
396 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
397 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
398 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
400 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
401 (((env->hflags2 & HF2_VINTR_MASK) &&
402 (env->hflags2 & HF2_HIF_MASK)) ||
403 (!(env->hflags2 & HF2_VINTR_MASK) &&
404 (env->eflags & IF_MASK &&
405 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
407 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
409 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
411 intno = cpu_get_pic_interrupt(env);
412 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
413 do_interrupt_x86_hardirq(env, intno, 1);
414 /* ensure that no TB jump will be modified as
415 the program flow was changed */
417 #if !defined(CONFIG_USER_ONLY)
418 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
419 (env->eflags & IF_MASK) &&
420 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
422 /* FIXME: this should respect TPR */
423 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
425 intno = ldl_phys(cpu->as,
427 + offsetof(struct vmcb,
428 control.int_vector));
429 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
430 do_interrupt_x86_hardirq(env, intno, 1);
431 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
436 #elif defined(TARGET_PPC)
437 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
440 if (interrupt_request & CPU_INTERRUPT_HARD) {
441 ppc_hw_interrupt(env);
442 if (env->pending_interrupts == 0) {
443 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
447 #elif defined(TARGET_LM32)
448 if ((interrupt_request & CPU_INTERRUPT_HARD)
449 && (env->ie & IE_IE)) {
450 cpu->exception_index = EXCP_IRQ;
451 cc->do_interrupt(cpu);
454 #elif defined(TARGET_MICROBLAZE)
455 if ((interrupt_request & CPU_INTERRUPT_HARD)
456 && (env->sregs[SR_MSR] & MSR_IE)
457 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
458 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
459 cpu->exception_index = EXCP_IRQ;
460 cc->do_interrupt(cpu);
463 #elif defined(TARGET_MIPS)
464 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
465 cpu_mips_hw_interrupts_pending(env)) {
467 cpu->exception_index = EXCP_EXT_INTERRUPT;
469 cc->do_interrupt(cpu);
472 #elif defined(TARGET_OPENRISC)
475 if ((interrupt_request & CPU_INTERRUPT_HARD)
476 && (env->sr & SR_IEE)) {
479 if ((interrupt_request & CPU_INTERRUPT_TIMER)
480 && (env->sr & SR_TEE)) {
484 cpu->exception_index = idx;
485 cc->do_interrupt(cpu);
489 #elif defined(TARGET_SPARC)
490 if (interrupt_request & CPU_INTERRUPT_HARD) {
491 if (cpu_interrupts_enabled(env) &&
492 env->interrupt_index > 0) {
493 int pil = env->interrupt_index & 0xf;
494 int type = env->interrupt_index & 0xf0;
496 if (((type == TT_EXTINT) &&
497 cpu_pil_allowed(env, pil)) ||
499 cpu->exception_index = env->interrupt_index;
500 cc->do_interrupt(cpu);
505 #elif defined(TARGET_ARM)
506 if (interrupt_request & CPU_INTERRUPT_FIQ
507 && !(env->daif & PSTATE_F)) {
508 cpu->exception_index = EXCP_FIQ;
509 cc->do_interrupt(cpu);
512 /* ARMv7-M interrupt return works by loading a magic value
513 into the PC. On real hardware the load causes the
514 return to occur. The qemu implementation performs the
515 jump normally, then does the exception return when the
516 CPU tries to execute code at the magic address.
517 This will cause the magic PC value to be pushed to
518 the stack if an interrupt occurred at the wrong time.
519 We avoid this by disabling interrupts when
520 pc contains a magic address. */
521 if (interrupt_request & CPU_INTERRUPT_HARD
522 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
523 || !(env->daif & PSTATE_I))) {
524 cpu->exception_index = EXCP_IRQ;
525 cc->do_interrupt(cpu);
528 #elif defined(TARGET_UNICORE32)
529 if (interrupt_request & CPU_INTERRUPT_HARD
530 && !(env->uncached_asr & ASR_I)) {
531 cpu->exception_index = UC32_EXCP_INTR;
532 cc->do_interrupt(cpu);
535 #elif defined(TARGET_SH4)
536 if (interrupt_request & CPU_INTERRUPT_HARD) {
537 cc->do_interrupt(cpu);
540 #elif defined(TARGET_ALPHA)
543 /* ??? This hard-codes the OSF/1 interrupt levels. */
544 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
546 if (interrupt_request & CPU_INTERRUPT_HARD) {
547 idx = EXCP_DEV_INTERRUPT;
551 if (interrupt_request & CPU_INTERRUPT_TIMER) {
552 idx = EXCP_CLK_INTERRUPT;
556 if (interrupt_request & CPU_INTERRUPT_SMP) {
557 idx = EXCP_SMP_INTERRUPT;
561 if (interrupt_request & CPU_INTERRUPT_MCHK) {
566 cpu->exception_index = idx;
568 cc->do_interrupt(cpu);
572 #elif defined(TARGET_CRIS)
573 if (interrupt_request & CPU_INTERRUPT_HARD
574 && (env->pregs[PR_CCS] & I_FLAG)
575 && !env->locked_irq) {
576 cpu->exception_index = EXCP_IRQ;
577 cc->do_interrupt(cpu);
580 if (interrupt_request & CPU_INTERRUPT_NMI) {
581 unsigned int m_flag_archval;
582 if (env->pregs[PR_VR] < 32) {
583 m_flag_archval = M_FLAG_V10;
585 m_flag_archval = M_FLAG_V32;
587 if ((env->pregs[PR_CCS] & m_flag_archval)) {
588 cpu->exception_index = EXCP_NMI;
589 cc->do_interrupt(cpu);
593 #elif defined(TARGET_M68K)
594 if (interrupt_request & CPU_INTERRUPT_HARD
595 && ((env->sr & SR_I) >> SR_I_SHIFT)
596 < env->pending_level) {
597 /* Real hardware gets the interrupt vector via an
598 IACK cycle at this point. Current emulated
599 hardware doesn't rely on this, so we
600 provide/save the vector when the interrupt is
602 cpu->exception_index = env->pending_vector;
603 do_interrupt_m68k_hardirq(env);
606 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
607 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
608 (env->psw.mask & PSW_MASK_EXT)) {
609 cc->do_interrupt(cpu);
612 #elif defined(TARGET_XTENSA)
613 if (interrupt_request & CPU_INTERRUPT_HARD) {
614 cpu->exception_index = EXC_IRQ;
615 cc->do_interrupt(cpu);
619 /* Don't use the cached interrupt_request value,
620 do_interrupt may have updated the EXITTB flag. */
621 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
622 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
623 /* ensure that no TB jump will be modified as
624 the program flow was changed */
628 if (unlikely(cpu->exit_request)) {
629 cpu->exit_request = 0;
630 cpu->exception_index = EXCP_INTERRUPT;
633 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
635 tb = tb_find_fast(env);
636 /* Note: we do it here to avoid a gcc bug on Mac OS X when
637 doing it in tb_find_slow */
638 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
639 /* as some TB could have been invalidated because
640 of memory exceptions while generating the code, we
641 must recompute the hash index here */
643 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
645 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
646 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
647 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
649 /* see if we can patch the calling TB. When the TB
650 spans two pages, we cannot safely do a direct
652 if (next_tb != 0 && tb->page_addr[1] == -1) {
653 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
654 next_tb & TB_EXIT_MASK, tb);
656 have_tb_lock = false;
657 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
659 /* cpu_interrupt might be called while translating the
660 TB, but before it is linked into a potentially
661 infinite loop and becomes env->current_tb. Avoid
662 starting execution if there is a pending interrupt. */
663 cpu->current_tb = tb;
665 if (likely(!cpu->exit_request)) {
667 /* execute the generated code */
668 next_tb = cpu_tb_exec(cpu, tc_ptr);
669 switch (next_tb & TB_EXIT_MASK) {
670 case TB_EXIT_REQUESTED:
671 /* Something asked us to stop executing
672 * chained TBs; just continue round the main
673 * loop. Whatever requested the exit will also
674 * have set something else (eg exit_request or
675 * interrupt_request) which we will handle
676 * next time around the loop.
678 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
681 case TB_EXIT_ICOUNT_EXPIRED:
683 /* Instruction counter expired. */
685 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
686 insns_left = cpu->icount_decr.u32;
687 if (cpu->icount_extra && insns_left >= 0) {
688 /* Refill decrementer and continue execution. */
689 cpu->icount_extra += insns_left;
690 if (cpu->icount_extra > 0xffff) {
693 insns_left = cpu->icount_extra;
695 cpu->icount_extra -= insns_left;
696 cpu->icount_decr.u16.low = insns_left;
698 if (insns_left > 0) {
699 /* Execute remaining instructions. */
700 cpu_exec_nocache(env, insns_left, tb);
702 cpu->exception_index = EXCP_INTERRUPT;
712 cpu->current_tb = NULL;
714 if (hax_enabled() && hax_stop_emulation(env))
717 /* reset soft MMU for next block (it can currently
718 only be set by a memory fault) */
721 /* Reload env after longjmp - the compiler may have smashed all
722 * local variables as longjmp is marked 'noreturn'. */
725 #if !(defined(CONFIG_USER_ONLY) && \
726 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
727 cc = CPU_GET_CLASS(cpu);
730 x86_cpu = X86_CPU(cpu);
733 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
734 have_tb_lock = false;
740 #if defined(TARGET_I386)
741 /* restore flags in standard format */
742 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
743 | (env->df & DF_MASK);
744 #elif defined(TARGET_ARM)
745 /* XXX: Save/restore host fpu exception state?. */
746 #elif defined(TARGET_UNICORE32)
747 #elif defined(TARGET_SPARC)
748 #elif defined(TARGET_PPC)
749 #elif defined(TARGET_LM32)
750 #elif defined(TARGET_M68K)
751 cpu_m68k_flush_flags(env, env->cc_op);
752 env->cc_op = CC_OP_FLAGS;
753 env->sr = (env->sr & 0xffe0)
754 | env->cc_dest | (env->cc_x << 4);
755 #elif defined(TARGET_MICROBLAZE)
756 #elif defined(TARGET_MIPS)
757 #elif defined(TARGET_MOXIE)
758 #elif defined(TARGET_OPENRISC)
759 #elif defined(TARGET_SH4)
760 #elif defined(TARGET_ALPHA)
761 #elif defined(TARGET_CRIS)
762 #elif defined(TARGET_S390X)
763 #elif defined(TARGET_XTENSA)
766 #error unsupported target CPU
769 /* fail safe : never use current_cpu outside cpu_exec() */