2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "disas/disas.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
25 #include "sysemu/hax.h"
27 bool qemu_cpu_has_work(CPUState *cpu)
29 return cpu_has_work(cpu);
32 void cpu_loop_exit(CPUArchState *env)
34 CPUState *cpu = ENV_GET_CPU(env);
36 cpu->current_tb = NULL;
37 siglongjmp(env->jmp_env, 1);
40 /* exit the current TB from a signal handler. The host registers are
41 restored in a state compatible with the CPU emulator
43 #if defined(CONFIG_SOFTMMU)
44 void cpu_resume_from_signal(CPUArchState *env, void *puc)
46 /* XXX: restore cpu registers saved in host registers */
48 env->exception_index = -1;
49 siglongjmp(env->jmp_env, 1);
53 /* Execute a TB, and fix up the CPU state afterwards if necessary */
54 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
56 CPUArchState *env = cpu->env_ptr;
57 tcg_target_ulong next_tb = tcg_qemu_tb_exec(env, tb_ptr);
58 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
59 /* We didn't start executing this TB (eg because the instruction
60 * counter hit zero); we must restore the guest PC to the address
61 * of the start of the TB.
63 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
64 cpu_pc_from_tb(env, tb);
66 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
67 /* We were asked to stop executing TBs (probably a pending
68 * interrupt. We've now stopped, so clear the flag.
70 cpu->tcg_exit_req = 0;
75 /* Execute the code without caching the generated code. An interpreter
76 could be used if available. */
77 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
78 TranslationBlock *orig_tb)
80 CPUState *cpu = ENV_GET_CPU(env);
83 /* Should never happen.
84 We only end up here when an existing TB is too long. */
85 if (max_cycles > CF_COUNT_MASK)
86 max_cycles = CF_COUNT_MASK;
88 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
91 /* execute the generated code */
92 cpu_tb_exec(cpu, tb->tc_ptr);
93 cpu->current_tb = NULL;
94 tb_phys_invalidate(tb, -1);
98 static TranslationBlock *tb_find_slow(CPUArchState *env,
100 target_ulong cs_base,
103 TranslationBlock *tb, **ptb1;
105 tb_page_addr_t phys_pc, phys_page1;
106 target_ulong virt_page2;
108 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
110 /* find translated block using physical mappings */
111 phys_pc = get_page_addr_code(env, pc);
112 phys_page1 = phys_pc & TARGET_PAGE_MASK;
113 h = tb_phys_hash_func(phys_pc);
114 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
120 tb->page_addr[0] == phys_page1 &&
121 tb->cs_base == cs_base &&
122 tb->flags == flags) {
123 /* check next page if needed */
124 if (tb->page_addr[1] != -1) {
125 tb_page_addr_t phys_page2;
127 virt_page2 = (pc & TARGET_PAGE_MASK) +
129 phys_page2 = get_page_addr_code(env, virt_page2);
130 if (tb->page_addr[1] == phys_page2)
136 ptb1 = &tb->phys_hash_next;
139 /* if no translated code available, then translate it now */
140 tb = tb_gen_code(env, pc, cs_base, flags, 0);
143 /* Move the last found TB to the head of the list */
145 *ptb1 = tb->phys_hash_next;
146 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
147 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
149 /* we add the TB in the virtual pc hash table */
150 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
154 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
156 TranslationBlock *tb;
157 target_ulong cs_base, pc;
160 /* we record a subset of the CPU state. It will
161 always be the same before a given translated block
163 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
164 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
165 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
166 tb->flags != flags)) {
167 tb = tb_find_slow(env, pc, cs_base, flags);
172 static CPUDebugExcpHandler *debug_excp_handler;
174 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
176 debug_excp_handler = handler;
179 static void cpu_handle_debug_exception(CPUArchState *env)
183 if (!env->watchpoint_hit) {
184 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
185 wp->flags &= ~BP_WATCHPOINT_HIT;
188 if (debug_excp_handler) {
189 debug_excp_handler(env);
193 /* main execution loop */
195 volatile sig_atomic_t exit_request;
198 * QEMU emulate can happens because of MMIO or emulation mode, i.e. non-PG mode,
199 * when it's because of MMIO, the MMIO, the interrupt should not be emulated,
200 * because MMIO is emulated for only one instruction now and then back to
203 static int need_handle_intr_request(CPUState *cpu)
206 CPUArchState *env = cpu->env_ptr;
207 if (!hax_enabled() || hax_vcpu_emulation_mode(env))
208 return cpu->interrupt_request;
211 return cpu->interrupt_request;
216 int cpu_exec(CPUArchState *env)
218 CPUState *cpu = ENV_GET_CPU(env);
219 #if !(defined(CONFIG_USER_ONLY) && \
220 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
221 CPUClass *cc = CPU_GET_CLASS(cpu);
223 int ret, interrupt_request;
224 TranslationBlock *tb;
226 tcg_target_ulong next_tb;
229 if (!cpu_has_work(cpu)) {
236 cpu_single_env = env;
238 /* As long as cpu_single_env is null, up to the assignment just above,
239 * requests by other threads to exit the execution loop are expected to
240 * be issued using the exit_request global. We must make sure that our
241 * evaluation of the global value is performed past the cpu_single_env
242 * value transition point, which requires a memory barrier as well as
243 * an instruction scheduling constraint on modern architectures. */
246 if (unlikely(exit_request)) {
247 cpu->exit_request = 1;
250 #if defined(TARGET_I386)
251 /* put eflags in CPU temporary format */
252 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
253 DF = 1 - (2 * ((env->eflags >> 10) & 1));
254 CC_OP = CC_OP_EFLAGS;
255 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
256 #elif defined(TARGET_SPARC)
257 #elif defined(TARGET_M68K)
258 env->cc_op = CC_OP_FLAGS;
259 env->cc_dest = env->sr & 0xf;
260 env->cc_x = (env->sr >> 4) & 1;
261 #elif defined(TARGET_ALPHA)
262 #elif defined(TARGET_ARM)
263 #elif defined(TARGET_UNICORE32)
264 #elif defined(TARGET_PPC)
265 env->reserve_addr = -1;
266 #elif defined(TARGET_LM32)
267 #elif defined(TARGET_MICROBLAZE)
268 #elif defined(TARGET_MIPS)
269 #elif defined(TARGET_MOXIE)
270 #elif defined(TARGET_OPENRISC)
271 #elif defined(TARGET_SH4)
272 #elif defined(TARGET_CRIS)
273 #elif defined(TARGET_S390X)
274 #elif defined(TARGET_XTENSA)
277 #error unsupported target CPU
279 env->exception_index = -1;
281 /* prepare setjmp context for exception handling */
283 if (sigsetjmp(env->jmp_env, 0) == 0) {
284 /* if an exception is pending, we execute it here */
285 if (env->exception_index >= 0) {
286 if (env->exception_index >= EXCP_INTERRUPT) {
287 /* exit request from the cpu execution loop */
288 ret = env->exception_index;
289 if (ret == EXCP_DEBUG) {
290 cpu_handle_debug_exception(env);
294 #if defined(CONFIG_USER_ONLY)
295 /* if user mode only, we simulate a fake exception
296 which will be handled outside the cpu execution
298 #if defined(TARGET_I386)
299 cc->do_interrupt(cpu);
301 ret = env->exception_index;
304 cc->do_interrupt(cpu);
305 env->exception_index = -1;
311 if (hax_enabled() && !hax_vcpu_exec(env))
312 longjmp(env->jmp_env, 1);
315 next_tb = 0; /* force lookup of first TB */
317 interrupt_request = need_handle_intr_request(cpu);
318 if (unlikely(interrupt_request)) {
319 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
320 /* Mask out external interrupts for this step. */
321 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
323 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
324 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
325 env->exception_index = EXCP_DEBUG;
328 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
329 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
330 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
331 if (interrupt_request & CPU_INTERRUPT_HALT) {
332 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
334 env->exception_index = EXCP_HLT;
338 #if defined(TARGET_I386)
339 #if !defined(CONFIG_USER_ONLY)
340 if (interrupt_request & CPU_INTERRUPT_POLL) {
341 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
342 apic_poll_irq(env->apic_state);
345 if (interrupt_request & CPU_INTERRUPT_INIT) {
346 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
348 do_cpu_init(x86_env_get_cpu(env));
349 env->exception_index = EXCP_HALTED;
351 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
352 do_cpu_sipi(x86_env_get_cpu(env));
354 } else if (env->hflags2 & HF2_GIF_MASK) {
355 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
356 !(env->hflags & HF_SMM_MASK)) {
357 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
359 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
362 env->hax_vcpu->resync = 1;
366 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
367 !(env->hflags2 & HF2_NMI_MASK)) {
368 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
369 env->hflags2 |= HF2_NMI_MASK;
370 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
372 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
373 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
374 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
376 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
377 (((env->hflags2 & HF2_VINTR_MASK) &&
378 (env->hflags2 & HF2_HIF_MASK)) ||
379 (!(env->hflags2 & HF2_VINTR_MASK) &&
380 (env->eflags & IF_MASK &&
381 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
383 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
385 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
387 intno = cpu_get_pic_interrupt(env);
388 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
389 do_interrupt_x86_hardirq(env, intno, 1);
390 /* ensure that no TB jump will be modified as
391 the program flow was changed */
393 #if !defined(CONFIG_USER_ONLY)
394 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
395 (env->eflags & IF_MASK) &&
396 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
398 /* FIXME: this should respect TPR */
399 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
401 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
402 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
403 do_interrupt_x86_hardirq(env, intno, 1);
404 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
409 #elif defined(TARGET_PPC)
410 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
413 if (interrupt_request & CPU_INTERRUPT_HARD) {
414 ppc_hw_interrupt(env);
415 if (env->pending_interrupts == 0) {
416 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
420 #elif defined(TARGET_LM32)
421 if ((interrupt_request & CPU_INTERRUPT_HARD)
422 && (env->ie & IE_IE)) {
423 env->exception_index = EXCP_IRQ;
424 cc->do_interrupt(cpu);
427 #elif defined(TARGET_MICROBLAZE)
428 if ((interrupt_request & CPU_INTERRUPT_HARD)
429 && (env->sregs[SR_MSR] & MSR_IE)
430 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
431 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
432 env->exception_index = EXCP_IRQ;
433 cc->do_interrupt(cpu);
436 #elif defined(TARGET_MIPS)
437 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
438 cpu_mips_hw_interrupts_pending(env)) {
440 env->exception_index = EXCP_EXT_INTERRUPT;
442 cc->do_interrupt(cpu);
445 #elif defined(TARGET_OPENRISC)
448 if ((interrupt_request & CPU_INTERRUPT_HARD)
449 && (env->sr & SR_IEE)) {
452 if ((interrupt_request & CPU_INTERRUPT_TIMER)
453 && (env->sr & SR_TEE)) {
457 env->exception_index = idx;
458 cc->do_interrupt(cpu);
462 #elif defined(TARGET_SPARC)
463 if (interrupt_request & CPU_INTERRUPT_HARD) {
464 if (cpu_interrupts_enabled(env) &&
465 env->interrupt_index > 0) {
466 int pil = env->interrupt_index & 0xf;
467 int type = env->interrupt_index & 0xf0;
469 if (((type == TT_EXTINT) &&
470 cpu_pil_allowed(env, pil)) ||
472 env->exception_index = env->interrupt_index;
473 cc->do_interrupt(cpu);
478 #elif defined(TARGET_ARM)
479 if (interrupt_request & CPU_INTERRUPT_FIQ
480 && !(env->uncached_cpsr & CPSR_F)) {
481 env->exception_index = EXCP_FIQ;
482 cc->do_interrupt(cpu);
485 /* ARMv7-M interrupt return works by loading a magic value
486 into the PC. On real hardware the load causes the
487 return to occur. The qemu implementation performs the
488 jump normally, then does the exception return when the
489 CPU tries to execute code at the magic address.
490 This will cause the magic PC value to be pushed to
491 the stack if an interrupt occurred at the wrong time.
492 We avoid this by disabling interrupts when
493 pc contains a magic address. */
494 if (interrupt_request & CPU_INTERRUPT_HARD
495 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
496 || !(env->uncached_cpsr & CPSR_I))) {
497 env->exception_index = EXCP_IRQ;
498 cc->do_interrupt(cpu);
501 #elif defined(TARGET_UNICORE32)
502 if (interrupt_request & CPU_INTERRUPT_HARD
503 && !(env->uncached_asr & ASR_I)) {
504 env->exception_index = UC32_EXCP_INTR;
505 cc->do_interrupt(cpu);
508 #elif defined(TARGET_SH4)
509 if (interrupt_request & CPU_INTERRUPT_HARD) {
510 cc->do_interrupt(cpu);
513 #elif defined(TARGET_ALPHA)
516 /* ??? This hard-codes the OSF/1 interrupt levels. */
517 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
519 if (interrupt_request & CPU_INTERRUPT_HARD) {
520 idx = EXCP_DEV_INTERRUPT;
524 if (interrupt_request & CPU_INTERRUPT_TIMER) {
525 idx = EXCP_CLK_INTERRUPT;
529 if (interrupt_request & CPU_INTERRUPT_SMP) {
530 idx = EXCP_SMP_INTERRUPT;
534 if (interrupt_request & CPU_INTERRUPT_MCHK) {
539 env->exception_index = idx;
541 cc->do_interrupt(cpu);
545 #elif defined(TARGET_CRIS)
546 if (interrupt_request & CPU_INTERRUPT_HARD
547 && (env->pregs[PR_CCS] & I_FLAG)
548 && !env->locked_irq) {
549 env->exception_index = EXCP_IRQ;
550 cc->do_interrupt(cpu);
553 if (interrupt_request & CPU_INTERRUPT_NMI) {
554 unsigned int m_flag_archval;
555 if (env->pregs[PR_VR] < 32) {
556 m_flag_archval = M_FLAG_V10;
558 m_flag_archval = M_FLAG_V32;
560 if ((env->pregs[PR_CCS] & m_flag_archval)) {
561 env->exception_index = EXCP_NMI;
562 cc->do_interrupt(cpu);
566 #elif defined(TARGET_M68K)
567 if (interrupt_request & CPU_INTERRUPT_HARD
568 && ((env->sr & SR_I) >> SR_I_SHIFT)
569 < env->pending_level) {
570 /* Real hardware gets the interrupt vector via an
571 IACK cycle at this point. Current emulated
572 hardware doesn't rely on this, so we
573 provide/save the vector when the interrupt is
575 env->exception_index = env->pending_vector;
576 do_interrupt_m68k_hardirq(env);
579 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
580 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
581 (env->psw.mask & PSW_MASK_EXT)) {
582 cc->do_interrupt(cpu);
585 #elif defined(TARGET_XTENSA)
586 if (interrupt_request & CPU_INTERRUPT_HARD) {
587 env->exception_index = EXC_IRQ;
588 cc->do_interrupt(cpu);
592 /* Don't use the cached interrupt_request value,
593 do_interrupt may have updated the EXITTB flag. */
594 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
595 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
596 /* ensure that no TB jump will be modified as
597 the program flow was changed */
601 if (unlikely(cpu->exit_request)) {
602 cpu->exit_request = 0;
603 env->exception_index = EXCP_INTERRUPT;
606 #if defined(DEBUG_DISAS)
607 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
608 /* restore flags in standard format */
609 #if defined(TARGET_I386)
610 log_cpu_state(env, CPU_DUMP_CCOP);
611 #elif defined(TARGET_M68K)
612 cpu_m68k_flush_flags(env, env->cc_op);
613 env->cc_op = CC_OP_FLAGS;
614 env->sr = (env->sr & 0xffe0)
615 | env->cc_dest | (env->cc_x << 4);
616 log_cpu_state(env, 0);
618 log_cpu_state(env, 0);
621 #endif /* DEBUG_DISAS */
622 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
623 tb = tb_find_fast(env);
624 /* Note: we do it here to avoid a gcc bug on Mac OS X when
625 doing it in tb_find_slow */
626 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
627 /* as some TB could have been invalidated because
628 of memory exceptions while generating the code, we
629 must recompute the hash index here */
631 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
633 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
634 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
635 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
637 /* see if we can patch the calling TB. When the TB
638 spans two pages, we cannot safely do a direct
640 if (next_tb != 0 && tb->page_addr[1] == -1) {
641 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
642 next_tb & TB_EXIT_MASK, tb);
644 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
646 /* cpu_interrupt might be called while translating the
647 TB, but before it is linked into a potentially
648 infinite loop and becomes env->current_tb. Avoid
649 starting execution if there is a pending interrupt. */
650 cpu->current_tb = tb;
652 if (likely(!cpu->exit_request)) {
654 /* execute the generated code */
655 next_tb = cpu_tb_exec(cpu, tc_ptr);
656 switch (next_tb & TB_EXIT_MASK) {
657 case TB_EXIT_REQUESTED:
658 /* Something asked us to stop executing
659 * chained TBs; just continue round the main
660 * loop. Whatever requested the exit will also
661 * have set something else (eg exit_request or
662 * interrupt_request) which we will handle
663 * next time around the loop.
665 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
668 case TB_EXIT_ICOUNT_EXPIRED:
670 /* Instruction counter expired. */
672 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
673 insns_left = env->icount_decr.u32;
674 if (env->icount_extra && insns_left >= 0) {
675 /* Refill decrementer and continue execution. */
676 env->icount_extra += insns_left;
677 if (env->icount_extra > 0xffff) {
680 insns_left = env->icount_extra;
682 env->icount_extra -= insns_left;
683 env->icount_decr.u16.low = insns_left;
685 if (insns_left > 0) {
686 /* Execute remaining instructions. */
687 cpu_exec_nocache(env, insns_left, tb);
689 env->exception_index = EXCP_INTERRUPT;
699 cpu->current_tb = NULL;
701 if (hax_enabled() && hax_stop_emulation(env))
704 /* reset soft MMU for next block (it can currently
705 only be set by a memory fault) */
708 /* Reload env after longjmp - the compiler may have smashed all
709 * local variables as longjmp is marked 'noreturn'. */
710 env = cpu_single_env;
715 #if defined(TARGET_I386)
716 /* restore flags in standard format */
717 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
719 #elif defined(TARGET_ARM)
720 /* XXX: Save/restore host fpu exception state?. */
721 #elif defined(TARGET_UNICORE32)
722 #elif defined(TARGET_SPARC)
723 #elif defined(TARGET_PPC)
724 #elif defined(TARGET_LM32)
725 #elif defined(TARGET_M68K)
726 cpu_m68k_flush_flags(env, env->cc_op);
727 env->cc_op = CC_OP_FLAGS;
728 env->sr = (env->sr & 0xffe0)
729 | env->cc_dest | (env->cc_x << 4);
730 #elif defined(TARGET_MICROBLAZE)
731 #elif defined(TARGET_MIPS)
732 #elif defined(TARGET_MOXIE)
733 #elif defined(TARGET_OPENRISC)
734 #elif defined(TARGET_SH4)
735 #elif defined(TARGET_ALPHA)
736 #elif defined(TARGET_CRIS)
737 #elif defined(TARGET_S390X)
738 #elif defined(TARGET_XTENSA)
741 #error unsupported target CPU
744 /* fail safe : never use cpu_single_env outside cpu_exec() */
745 cpu_single_env = NULL;