2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "disas/disas.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
25 #include "sysemu/hax.h"
27 bool qemu_cpu_has_work(CPUState *cpu)
29 return cpu_has_work(cpu);
32 void cpu_loop_exit(CPUArchState *env)
34 CPUState *cpu = ENV_GET_CPU(env);
36 cpu->current_tb = NULL;
37 siglongjmp(env->jmp_env, 1);
40 /* exit the current TB from a signal handler. The host registers are
41 restored in a state compatible with the CPU emulator
43 #if defined(CONFIG_SOFTMMU)
44 void cpu_resume_from_signal(CPUArchState *env, void *puc)
46 /* XXX: restore cpu registers saved in host registers */
48 env->exception_index = -1;
49 siglongjmp(env->jmp_env, 1);
53 /* Execute a TB, and fix up the CPU state afterwards if necessary */
54 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
56 CPUArchState *env = cpu->env_ptr;
57 uintptr_t next_tb = tcg_qemu_tb_exec(env, tb_ptr);
58 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
59 /* We didn't start executing this TB (eg because the instruction
60 * counter hit zero); we must restore the guest PC to the address
61 * of the start of the TB.
63 CPUClass *cc = CPU_GET_CLASS(cpu);
64 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
65 if (cc->synchronize_from_tb) {
66 cc->synchronize_from_tb(cpu, tb);
69 cc->set_pc(cpu, tb->pc);
72 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
73 /* We were asked to stop executing TBs (probably a pending
74 * interrupt. We've now stopped, so clear the flag.
76 cpu->tcg_exit_req = 0;
81 /* Execute the code without caching the generated code. An interpreter
82 could be used if available. */
83 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
84 TranslationBlock *orig_tb)
86 CPUState *cpu = ENV_GET_CPU(env);
89 /* Should never happen.
90 We only end up here when an existing TB is too long. */
91 if (max_cycles > CF_COUNT_MASK)
92 max_cycles = CF_COUNT_MASK;
94 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
97 /* execute the generated code */
98 cpu_tb_exec(cpu, tb->tc_ptr);
99 cpu->current_tb = NULL;
100 tb_phys_invalidate(tb, -1);
104 static TranslationBlock *tb_find_slow(CPUArchState *env,
106 target_ulong cs_base,
109 TranslationBlock *tb, **ptb1;
111 tb_page_addr_t phys_pc, phys_page1;
112 target_ulong virt_page2;
114 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
116 /* find translated block using physical mappings */
117 phys_pc = get_page_addr_code(env, pc);
118 phys_page1 = phys_pc & TARGET_PAGE_MASK;
119 h = tb_phys_hash_func(phys_pc);
120 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
126 tb->page_addr[0] == phys_page1 &&
127 tb->cs_base == cs_base &&
128 tb->flags == flags) {
129 /* check next page if needed */
130 if (tb->page_addr[1] != -1) {
131 tb_page_addr_t phys_page2;
133 virt_page2 = (pc & TARGET_PAGE_MASK) +
135 phys_page2 = get_page_addr_code(env, virt_page2);
136 if (tb->page_addr[1] == phys_page2)
142 ptb1 = &tb->phys_hash_next;
145 /* if no translated code available, then translate it now */
146 tb = tb_gen_code(env, pc, cs_base, flags, 0);
149 /* Move the last found TB to the head of the list */
151 *ptb1 = tb->phys_hash_next;
152 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
153 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
155 /* we add the TB in the virtual pc hash table */
156 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
160 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
162 TranslationBlock *tb;
163 target_ulong cs_base, pc;
166 /* we record a subset of the CPU state. It will
167 always be the same before a given translated block
169 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
170 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
171 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
172 tb->flags != flags)) {
173 tb = tb_find_slow(env, pc, cs_base, flags);
178 static CPUDebugExcpHandler *debug_excp_handler;
180 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
182 debug_excp_handler = handler;
185 static void cpu_handle_debug_exception(CPUArchState *env)
189 if (!env->watchpoint_hit) {
190 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
191 wp->flags &= ~BP_WATCHPOINT_HIT;
194 if (debug_excp_handler) {
195 debug_excp_handler(env);
199 /* main execution loop */
201 volatile sig_atomic_t exit_request;
204 * QEMU emulate can happens because of MMIO or emulation mode, i.e. non-PG mode,
205 * when it's because of MMIO, the MMIO, the interrupt should not be emulated,
206 * because MMIO is emulated for only one instruction now and then back to
209 static int need_handle_intr_request(CPUState *cpu)
212 CPUArchState *env = cpu->env_ptr;
213 if (!hax_enabled() || hax_vcpu_emulation_mode(env))
214 return cpu->interrupt_request;
217 return cpu->interrupt_request;
222 int cpu_exec(CPUArchState *env)
224 CPUState *cpu = ENV_GET_CPU(env);
225 #if !(defined(CONFIG_USER_ONLY) && \
226 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
227 CPUClass *cc = CPU_GET_CLASS(cpu);
229 int ret, interrupt_request;
230 TranslationBlock *tb;
235 if (!cpu_has_work(cpu)) {
244 /* As long as current_cpu is null, up to the assignment just above,
245 * requests by other threads to exit the execution loop are expected to
246 * be issued using the exit_request global. We must make sure that our
247 * evaluation of the global value is performed past the current_cpu
248 * value transition point, which requires a memory barrier as well as
249 * an instruction scheduling constraint on modern architectures. */
252 if (unlikely(exit_request)) {
253 cpu->exit_request = 1;
256 #if defined(TARGET_I386)
257 /* put eflags in CPU temporary format */
258 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
259 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
260 CC_OP = CC_OP_EFLAGS;
261 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
262 #elif defined(TARGET_SPARC)
263 #elif defined(TARGET_M68K)
264 env->cc_op = CC_OP_FLAGS;
265 env->cc_dest = env->sr & 0xf;
266 env->cc_x = (env->sr >> 4) & 1;
267 #elif defined(TARGET_ALPHA)
268 #elif defined(TARGET_ARM)
269 #elif defined(TARGET_UNICORE32)
270 #elif defined(TARGET_PPC)
271 env->reserve_addr = -1;
272 #elif defined(TARGET_LM32)
273 #elif defined(TARGET_MICROBLAZE)
274 #elif defined(TARGET_MIPS)
275 #elif defined(TARGET_MOXIE)
276 #elif defined(TARGET_OPENRISC)
277 #elif defined(TARGET_SH4)
278 #elif defined(TARGET_CRIS)
279 #elif defined(TARGET_S390X)
280 #elif defined(TARGET_XTENSA)
283 #error unsupported target CPU
285 env->exception_index = -1;
287 /* prepare setjmp context for exception handling */
289 if (sigsetjmp(env->jmp_env, 0) == 0) {
290 /* if an exception is pending, we execute it here */
291 if (env->exception_index >= 0) {
292 if (env->exception_index >= EXCP_INTERRUPT) {
293 /* exit request from the cpu execution loop */
294 ret = env->exception_index;
295 if (ret == EXCP_DEBUG) {
296 cpu_handle_debug_exception(env);
300 #if defined(CONFIG_USER_ONLY)
301 /* if user mode only, we simulate a fake exception
302 which will be handled outside the cpu execution
304 #if defined(TARGET_I386)
305 cc->do_interrupt(cpu);
307 ret = env->exception_index;
310 cc->do_interrupt(cpu);
311 env->exception_index = -1;
317 if (hax_enabled() && !hax_vcpu_exec(env))
318 longjmp(env->jmp_env, 1);
321 next_tb = 0; /* force lookup of first TB */
323 interrupt_request = need_handle_intr_request(cpu);
324 if (unlikely(interrupt_request)) {
325 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
326 /* Mask out external interrupts for this step. */
327 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
329 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
330 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
331 env->exception_index = EXCP_DEBUG;
334 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
335 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
336 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
337 if (interrupt_request & CPU_INTERRUPT_HALT) {
338 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
340 env->exception_index = EXCP_HLT;
344 #if defined(TARGET_I386)
345 #if !defined(CONFIG_USER_ONLY)
346 if (interrupt_request & CPU_INTERRUPT_POLL) {
347 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
348 apic_poll_irq(env->apic_state);
351 if (interrupt_request & CPU_INTERRUPT_INIT) {
352 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
354 do_cpu_init(x86_env_get_cpu(env));
355 env->exception_index = EXCP_HALTED;
357 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
358 do_cpu_sipi(x86_env_get_cpu(env));
360 } else if (env->hflags2 & HF2_GIF_MASK) {
361 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
362 !(env->hflags & HF_SMM_MASK)) {
363 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
365 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
368 env->hax_vcpu->resync = 1;
370 do_smm_enter(x86_env_get_cpu(env));
372 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
373 !(env->hflags2 & HF2_NMI_MASK)) {
374 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
375 env->hflags2 |= HF2_NMI_MASK;
376 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
378 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
379 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
380 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
382 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
383 (((env->hflags2 & HF2_VINTR_MASK) &&
384 (env->hflags2 & HF2_HIF_MASK)) ||
385 (!(env->hflags2 & HF2_VINTR_MASK) &&
386 (env->eflags & IF_MASK &&
387 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
389 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
391 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
393 intno = cpu_get_pic_interrupt(env);
394 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
395 do_interrupt_x86_hardirq(env, intno, 1);
396 /* ensure that no TB jump will be modified as
397 the program flow was changed */
399 #if !defined(CONFIG_USER_ONLY)
400 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
401 (env->eflags & IF_MASK) &&
402 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
404 /* FIXME: this should respect TPR */
405 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
407 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
408 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
409 do_interrupt_x86_hardirq(env, intno, 1);
410 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
415 #elif defined(TARGET_PPC)
416 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
419 if (interrupt_request & CPU_INTERRUPT_HARD) {
420 ppc_hw_interrupt(env);
421 if (env->pending_interrupts == 0) {
422 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
426 #elif defined(TARGET_LM32)
427 if ((interrupt_request & CPU_INTERRUPT_HARD)
428 && (env->ie & IE_IE)) {
429 env->exception_index = EXCP_IRQ;
430 cc->do_interrupt(cpu);
433 #elif defined(TARGET_MICROBLAZE)
434 if ((interrupt_request & CPU_INTERRUPT_HARD)
435 && (env->sregs[SR_MSR] & MSR_IE)
436 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
437 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
438 env->exception_index = EXCP_IRQ;
439 cc->do_interrupt(cpu);
442 #elif defined(TARGET_MIPS)
443 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
444 cpu_mips_hw_interrupts_pending(env)) {
446 env->exception_index = EXCP_EXT_INTERRUPT;
448 cc->do_interrupt(cpu);
451 #elif defined(TARGET_OPENRISC)
454 if ((interrupt_request & CPU_INTERRUPT_HARD)
455 && (env->sr & SR_IEE)) {
458 if ((interrupt_request & CPU_INTERRUPT_TIMER)
459 && (env->sr & SR_TEE)) {
463 env->exception_index = idx;
464 cc->do_interrupt(cpu);
468 #elif defined(TARGET_SPARC)
469 if (interrupt_request & CPU_INTERRUPT_HARD) {
470 if (cpu_interrupts_enabled(env) &&
471 env->interrupt_index > 0) {
472 int pil = env->interrupt_index & 0xf;
473 int type = env->interrupt_index & 0xf0;
475 if (((type == TT_EXTINT) &&
476 cpu_pil_allowed(env, pil)) ||
478 env->exception_index = env->interrupt_index;
479 cc->do_interrupt(cpu);
484 #elif defined(TARGET_ARM)
485 if (interrupt_request & CPU_INTERRUPT_FIQ
486 && !(env->uncached_cpsr & CPSR_F)) {
487 env->exception_index = EXCP_FIQ;
488 cc->do_interrupt(cpu);
491 /* ARMv7-M interrupt return works by loading a magic value
492 into the PC. On real hardware the load causes the
493 return to occur. The qemu implementation performs the
494 jump normally, then does the exception return when the
495 CPU tries to execute code at the magic address.
496 This will cause the magic PC value to be pushed to
497 the stack if an interrupt occurred at the wrong time.
498 We avoid this by disabling interrupts when
499 pc contains a magic address. */
500 if (interrupt_request & CPU_INTERRUPT_HARD
501 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
502 || !(env->uncached_cpsr & CPSR_I))) {
503 env->exception_index = EXCP_IRQ;
504 cc->do_interrupt(cpu);
507 #elif defined(TARGET_UNICORE32)
508 if (interrupt_request & CPU_INTERRUPT_HARD
509 && !(env->uncached_asr & ASR_I)) {
510 env->exception_index = UC32_EXCP_INTR;
511 cc->do_interrupt(cpu);
514 #elif defined(TARGET_SH4)
515 if (interrupt_request & CPU_INTERRUPT_HARD) {
516 cc->do_interrupt(cpu);
519 #elif defined(TARGET_ALPHA)
522 /* ??? This hard-codes the OSF/1 interrupt levels. */
523 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
525 if (interrupt_request & CPU_INTERRUPT_HARD) {
526 idx = EXCP_DEV_INTERRUPT;
530 if (interrupt_request & CPU_INTERRUPT_TIMER) {
531 idx = EXCP_CLK_INTERRUPT;
535 if (interrupt_request & CPU_INTERRUPT_SMP) {
536 idx = EXCP_SMP_INTERRUPT;
540 if (interrupt_request & CPU_INTERRUPT_MCHK) {
545 env->exception_index = idx;
547 cc->do_interrupt(cpu);
551 #elif defined(TARGET_CRIS)
552 if (interrupt_request & CPU_INTERRUPT_HARD
553 && (env->pregs[PR_CCS] & I_FLAG)
554 && !env->locked_irq) {
555 env->exception_index = EXCP_IRQ;
556 cc->do_interrupt(cpu);
559 if (interrupt_request & CPU_INTERRUPT_NMI) {
560 unsigned int m_flag_archval;
561 if (env->pregs[PR_VR] < 32) {
562 m_flag_archval = M_FLAG_V10;
564 m_flag_archval = M_FLAG_V32;
566 if ((env->pregs[PR_CCS] & m_flag_archval)) {
567 env->exception_index = EXCP_NMI;
568 cc->do_interrupt(cpu);
572 #elif defined(TARGET_M68K)
573 if (interrupt_request & CPU_INTERRUPT_HARD
574 && ((env->sr & SR_I) >> SR_I_SHIFT)
575 < env->pending_level) {
576 /* Real hardware gets the interrupt vector via an
577 IACK cycle at this point. Current emulated
578 hardware doesn't rely on this, so we
579 provide/save the vector when the interrupt is
581 env->exception_index = env->pending_vector;
582 do_interrupt_m68k_hardirq(env);
585 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
586 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
587 (env->psw.mask & PSW_MASK_EXT)) {
588 cc->do_interrupt(cpu);
591 #elif defined(TARGET_XTENSA)
592 if (interrupt_request & CPU_INTERRUPT_HARD) {
593 env->exception_index = EXC_IRQ;
594 cc->do_interrupt(cpu);
598 /* Don't use the cached interrupt_request value,
599 do_interrupt may have updated the EXITTB flag. */
600 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
601 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
602 /* ensure that no TB jump will be modified as
603 the program flow was changed */
607 if (unlikely(cpu->exit_request)) {
608 cpu->exit_request = 0;
609 env->exception_index = EXCP_INTERRUPT;
612 #if defined(DEBUG_DISAS)
613 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
614 /* restore flags in standard format */
615 #if defined(TARGET_I386)
616 log_cpu_state(cpu, CPU_DUMP_CCOP);
617 #elif defined(TARGET_M68K)
618 cpu_m68k_flush_flags(env, env->cc_op);
619 env->cc_op = CC_OP_FLAGS;
620 env->sr = (env->sr & 0xffe0)
621 | env->cc_dest | (env->cc_x << 4);
622 log_cpu_state(cpu, 0);
624 log_cpu_state(cpu, 0);
627 #endif /* DEBUG_DISAS */
628 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
629 tb = tb_find_fast(env);
630 /* Note: we do it here to avoid a gcc bug on Mac OS X when
631 doing it in tb_find_slow */
632 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
633 /* as some TB could have been invalidated because
634 of memory exceptions while generating the code, we
635 must recompute the hash index here */
637 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
639 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
640 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
641 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
643 /* see if we can patch the calling TB. When the TB
644 spans two pages, we cannot safely do a direct
646 if (next_tb != 0 && tb->page_addr[1] == -1) {
647 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
648 next_tb & TB_EXIT_MASK, tb);
650 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
652 /* cpu_interrupt might be called while translating the
653 TB, but before it is linked into a potentially
654 infinite loop and becomes env->current_tb. Avoid
655 starting execution if there is a pending interrupt. */
656 cpu->current_tb = tb;
658 if (likely(!cpu->exit_request)) {
660 /* execute the generated code */
661 next_tb = cpu_tb_exec(cpu, tc_ptr);
662 switch (next_tb & TB_EXIT_MASK) {
663 case TB_EXIT_REQUESTED:
664 /* Something asked us to stop executing
665 * chained TBs; just continue round the main
666 * loop. Whatever requested the exit will also
667 * have set something else (eg exit_request or
668 * interrupt_request) which we will handle
669 * next time around the loop.
671 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
674 case TB_EXIT_ICOUNT_EXPIRED:
676 /* Instruction counter expired. */
678 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
679 insns_left = env->icount_decr.u32;
680 if (env->icount_extra && insns_left >= 0) {
681 /* Refill decrementer and continue execution. */
682 env->icount_extra += insns_left;
683 if (env->icount_extra > 0xffff) {
686 insns_left = env->icount_extra;
688 env->icount_extra -= insns_left;
689 env->icount_decr.u16.low = insns_left;
691 if (insns_left > 0) {
692 /* Execute remaining instructions. */
693 cpu_exec_nocache(env, insns_left, tb);
695 env->exception_index = EXCP_INTERRUPT;
705 cpu->current_tb = NULL;
707 if (hax_enabled() && hax_stop_emulation(env))
710 /* reset soft MMU for next block (it can currently
711 only be set by a memory fault) */
714 /* Reload env after longjmp - the compiler may have smashed all
715 * local variables as longjmp is marked 'noreturn'. */
718 #if !(defined(CONFIG_USER_ONLY) && \
719 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
720 cc = CPU_GET_CLASS(cpu);
726 #if defined(TARGET_I386)
727 /* restore flags in standard format */
728 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
729 | (env->df & DF_MASK);
730 #elif defined(TARGET_ARM)
731 /* XXX: Save/restore host fpu exception state?. */
732 #elif defined(TARGET_UNICORE32)
733 #elif defined(TARGET_SPARC)
734 #elif defined(TARGET_PPC)
735 #elif defined(TARGET_LM32)
736 #elif defined(TARGET_M68K)
737 cpu_m68k_flush_flags(env, env->cc_op);
738 env->cc_op = CC_OP_FLAGS;
739 env->sr = (env->sr & 0xffe0)
740 | env->cc_dest | (env->cc_x << 4);
741 #elif defined(TARGET_MICROBLAZE)
742 #elif defined(TARGET_MIPS)
743 #elif defined(TARGET_MOXIE)
744 #elif defined(TARGET_OPENRISC)
745 #elif defined(TARGET_SH4)
746 #elif defined(TARGET_ALPHA)
747 #elif defined(TARGET_CRIS)
748 #elif defined(TARGET_S390X)
749 #elif defined(TARGET_XTENSA)
752 #error unsupported target CPU
755 /* fail safe : never use current_cpu outside cpu_exec() */