2 * emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "disas/disas.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
25 #include "sysemu/hax.h"
27 void cpu_loop_exit(CPUState *cpu)
29 cpu->current_tb = NULL;
30 siglongjmp(cpu->jmp_env, 1);
33 /* exit the current TB from a signal handler. The host registers are
34 restored in a state compatible with the CPU emulator
36 #if defined(CONFIG_SOFTMMU)
37 void cpu_resume_from_signal(CPUState *cpu, void *puc)
39 /* XXX: restore cpu registers saved in host registers */
41 cpu->exception_index = -1;
42 siglongjmp(cpu->jmp_env, 1);
46 /* Execute a TB, and fix up the CPU state afterwards if necessary */
47 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
49 CPUArchState *env = cpu->env_ptr;
52 #if defined(DEBUG_DISAS)
53 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
54 #if defined(TARGET_I386)
55 log_cpu_state(cpu, CPU_DUMP_CCOP);
56 #elif defined(TARGET_M68K)
57 /* ??? Should not modify env state for dumping. */
58 cpu_m68k_flush_flags(env, env->cc_op);
59 env->cc_op = CC_OP_FLAGS;
60 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
61 log_cpu_state(cpu, 0);
63 log_cpu_state(cpu, 0);
66 #endif /* DEBUG_DISAS */
68 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
69 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
70 /* We didn't start executing this TB (eg because the instruction
71 * counter hit zero); we must restore the guest PC to the address
72 * of the start of the TB.
74 CPUClass *cc = CPU_GET_CLASS(cpu);
75 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
76 if (cc->synchronize_from_tb) {
77 cc->synchronize_from_tb(cpu, tb);
80 cc->set_pc(cpu, tb->pc);
83 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
84 /* We were asked to stop executing TBs (probably a pending
85 * interrupt. We've now stopped, so clear the flag.
87 cpu->tcg_exit_req = 0;
92 /* Execute the code without caching the generated code. An interpreter
93 could be used if available. */
94 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
95 TranslationBlock *orig_tb)
97 CPUState *cpu = ENV_GET_CPU(env);
100 /* Should never happen.
101 We only end up here when an existing TB is too long. */
102 if (max_cycles > CF_COUNT_MASK)
103 max_cycles = CF_COUNT_MASK;
105 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
107 cpu->current_tb = tb;
108 /* execute the generated code */
109 cpu_tb_exec(cpu, tb->tc_ptr);
110 cpu->current_tb = NULL;
111 tb_phys_invalidate(tb, -1);
115 static TranslationBlock *tb_find_slow(CPUArchState *env,
117 target_ulong cs_base,
120 CPUState *cpu = ENV_GET_CPU(env);
121 TranslationBlock *tb, **ptb1;
123 tb_page_addr_t phys_pc, phys_page1;
124 target_ulong virt_page2;
126 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
128 /* find translated block using physical mappings */
129 phys_pc = get_page_addr_code(env, pc);
130 phys_page1 = phys_pc & TARGET_PAGE_MASK;
131 h = tb_phys_hash_func(phys_pc);
132 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
138 tb->page_addr[0] == phys_page1 &&
139 tb->cs_base == cs_base &&
140 tb->flags == flags) {
141 /* check next page if needed */
142 if (tb->page_addr[1] != -1) {
143 tb_page_addr_t phys_page2;
145 virt_page2 = (pc & TARGET_PAGE_MASK) +
147 phys_page2 = get_page_addr_code(env, virt_page2);
148 if (tb->page_addr[1] == phys_page2)
154 ptb1 = &tb->phys_hash_next;
157 /* if no translated code available, then translate it now */
158 tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
161 /* Move the last found TB to the head of the list */
163 *ptb1 = tb->phys_hash_next;
164 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
165 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
167 /* we add the TB in the virtual pc hash table */
168 cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
172 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
174 CPUState *cpu = ENV_GET_CPU(env);
175 TranslationBlock *tb;
176 target_ulong cs_base, pc;
179 /* we record a subset of the CPU state. It will
180 always be the same before a given translated block
182 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
183 tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
184 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
185 tb->flags != flags)) {
186 tb = tb_find_slow(env, pc, cs_base, flags);
191 static CPUDebugExcpHandler *debug_excp_handler;
193 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
195 debug_excp_handler = handler;
198 static void cpu_handle_debug_exception(CPUArchState *env)
200 CPUState *cpu = ENV_GET_CPU(env);
203 if (!cpu->watchpoint_hit) {
204 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
205 wp->flags &= ~BP_WATCHPOINT_HIT;
208 if (debug_excp_handler) {
209 debug_excp_handler(env);
213 /* main execution loop */
215 volatile sig_atomic_t exit_request;
217 static int need_handle_intr_request(CPUState *cpu)
219 return cpu->interrupt_request;
223 int cpu_exec(CPUArchState *env)
228 CPUState *cpu = ENV_GET_CPU(env);
229 #if !(defined(CONFIG_USER_ONLY) && \
230 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
231 CPUClass *cc = CPU_GET_CLASS(cpu);
234 X86CPU *x86_cpu = X86_CPU(cpu);
236 int ret, interrupt_request;
237 TranslationBlock *tb;
240 /* This must be volatile so it is not trashed by longjmp() */
241 volatile bool have_tb_lock = false;
244 if (!cpu_has_work(cpu)) {
253 /* As long as current_cpu is null, up to the assignment just above,
254 * requests by other threads to exit the execution loop are expected to
255 * be issued using the exit_request global. We must make sure that our
256 * evaluation of the global value is performed past the current_cpu
257 * value transition point, which requires a memory barrier as well as
258 * an instruction scheduling constraint on modern architectures. */
261 if (unlikely(exit_request)) {
262 cpu->exit_request = 1;
265 #if defined(TARGET_I386)
266 /* put eflags in CPU temporary format */
267 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
268 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
269 CC_OP = CC_OP_EFLAGS;
270 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
271 #elif defined(TARGET_SPARC)
272 #elif defined(TARGET_M68K)
273 env->cc_op = CC_OP_FLAGS;
274 env->cc_dest = env->sr & 0xf;
275 env->cc_x = (env->sr >> 4) & 1;
276 #elif defined(TARGET_ALPHA)
277 #elif defined(TARGET_ARM)
278 #elif defined(TARGET_UNICORE32)
279 #elif defined(TARGET_PPC)
280 env->reserve_addr = -1;
281 #elif defined(TARGET_LM32)
282 #elif defined(TARGET_MICROBLAZE)
283 #elif defined(TARGET_MIPS)
284 #elif defined(TARGET_MOXIE)
285 #elif defined(TARGET_OPENRISC)
286 #elif defined(TARGET_SH4)
287 #elif defined(TARGET_CRIS)
288 #elif defined(TARGET_S390X)
289 #elif defined(TARGET_XTENSA)
292 #error unsupported target CPU
294 cpu->exception_index = -1;
296 /* prepare setjmp context for exception handling */
298 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
299 /* if an exception is pending, we execute it here */
300 if (cpu->exception_index >= 0) {
301 if (cpu->exception_index >= EXCP_INTERRUPT) {
302 /* exit request from the cpu execution loop */
303 ret = cpu->exception_index;
304 if (ret == EXCP_DEBUG) {
305 cpu_handle_debug_exception(env);
309 #if defined(CONFIG_USER_ONLY)
310 /* if user mode only, we simulate a fake exception
311 which will be handled outside the cpu execution
313 #if defined(TARGET_I386)
314 cc->do_interrupt(cpu);
316 ret = cpu->exception_index;
319 cc->do_interrupt(cpu);
320 cpu->exception_index = -1;
325 next_tb = 0; /* force lookup of first TB */
327 interrupt_request = need_handle_intr_request(cpu);
328 if (unlikely(interrupt_request)) {
329 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
330 /* Mask out external interrupts for this step. */
331 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
333 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
334 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
335 cpu->exception_index = EXCP_DEBUG;
338 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
339 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
340 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
341 if (interrupt_request & CPU_INTERRUPT_HALT) {
342 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
344 cpu->exception_index = EXCP_HLT;
348 #if defined(TARGET_I386)
349 #if !defined(CONFIG_USER_ONLY)
350 if (interrupt_request & CPU_INTERRUPT_POLL) {
351 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
352 apic_poll_irq(x86_cpu->apic_state);
355 if (interrupt_request & CPU_INTERRUPT_INIT) {
356 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
358 do_cpu_init(x86_cpu);
359 cpu->exception_index = EXCP_HALTED;
361 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
362 do_cpu_sipi(x86_cpu);
363 } else if (env->hflags2 & HF2_GIF_MASK) {
364 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
365 !(env->hflags & HF_SMM_MASK)) {
366 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
368 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
369 do_smm_enter(x86_cpu);
371 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
372 !(env->hflags2 & HF2_NMI_MASK)) {
373 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
374 env->hflags2 |= HF2_NMI_MASK;
375 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
377 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
378 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
379 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
381 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
382 (((env->hflags2 & HF2_VINTR_MASK) &&
383 (env->hflags2 & HF2_HIF_MASK)) ||
384 (!(env->hflags2 & HF2_VINTR_MASK) &&
385 (env->eflags & IF_MASK &&
386 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
388 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
390 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
392 intno = cpu_get_pic_interrupt(env);
393 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
394 do_interrupt_x86_hardirq(env, intno, 1);
395 /* ensure that no TB jump will be modified as
396 the program flow was changed */
398 #if !defined(CONFIG_USER_ONLY)
399 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
400 (env->eflags & IF_MASK) &&
401 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
403 /* FIXME: this should respect TPR */
404 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
406 intno = ldl_phys(cpu->as,
408 + offsetof(struct vmcb,
409 control.int_vector));
410 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
411 do_interrupt_x86_hardirq(env, intno, 1);
412 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
417 #elif defined(TARGET_PPC)
418 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
421 if (interrupt_request & CPU_INTERRUPT_HARD) {
422 ppc_hw_interrupt(env);
423 if (env->pending_interrupts == 0) {
424 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
428 #elif defined(TARGET_LM32)
429 if ((interrupt_request & CPU_INTERRUPT_HARD)
430 && (env->ie & IE_IE)) {
431 cpu->exception_index = EXCP_IRQ;
432 cc->do_interrupt(cpu);
435 #elif defined(TARGET_MICROBLAZE)
436 if ((interrupt_request & CPU_INTERRUPT_HARD)
437 && (env->sregs[SR_MSR] & MSR_IE)
438 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
439 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
440 cpu->exception_index = EXCP_IRQ;
441 cc->do_interrupt(cpu);
444 #elif defined(TARGET_MIPS)
445 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
446 cpu_mips_hw_interrupts_pending(env)) {
448 cpu->exception_index = EXCP_EXT_INTERRUPT;
450 cc->do_interrupt(cpu);
453 #elif defined(TARGET_OPENRISC)
456 if ((interrupt_request & CPU_INTERRUPT_HARD)
457 && (env->sr & SR_IEE)) {
460 if ((interrupt_request & CPU_INTERRUPT_TIMER)
461 && (env->sr & SR_TEE)) {
465 cpu->exception_index = idx;
466 cc->do_interrupt(cpu);
470 #elif defined(TARGET_SPARC)
471 if (interrupt_request & CPU_INTERRUPT_HARD) {
472 if (cpu_interrupts_enabled(env) &&
473 env->interrupt_index > 0) {
474 int pil = env->interrupt_index & 0xf;
475 int type = env->interrupt_index & 0xf0;
477 if (((type == TT_EXTINT) &&
478 cpu_pil_allowed(env, pil)) ||
480 cpu->exception_index = env->interrupt_index;
481 cc->do_interrupt(cpu);
486 #elif defined(TARGET_ARM)
487 if (interrupt_request & CPU_INTERRUPT_FIQ
488 && !(env->daif & PSTATE_F)) {
489 cpu->exception_index = EXCP_FIQ;
490 cc->do_interrupt(cpu);
493 /* ARMv7-M interrupt return works by loading a magic value
494 into the PC. On real hardware the load causes the
495 return to occur. The qemu implementation performs the
496 jump normally, then does the exception return when the
497 CPU tries to execute code at the magic address.
498 This will cause the magic PC value to be pushed to
499 the stack if an interrupt occurred at the wrong time.
500 We avoid this by disabling interrupts when
501 pc contains a magic address. */
502 if (interrupt_request & CPU_INTERRUPT_HARD
503 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
504 || !(env->daif & PSTATE_I))) {
505 cpu->exception_index = EXCP_IRQ;
506 cc->do_interrupt(cpu);
509 #elif defined(TARGET_UNICORE32)
510 if (interrupt_request & CPU_INTERRUPT_HARD
511 && !(env->uncached_asr & ASR_I)) {
512 cpu->exception_index = UC32_EXCP_INTR;
513 cc->do_interrupt(cpu);
516 #elif defined(TARGET_SH4)
517 if (interrupt_request & CPU_INTERRUPT_HARD) {
518 cc->do_interrupt(cpu);
521 #elif defined(TARGET_ALPHA)
524 /* ??? This hard-codes the OSF/1 interrupt levels. */
525 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
527 if (interrupt_request & CPU_INTERRUPT_HARD) {
528 idx = EXCP_DEV_INTERRUPT;
532 if (interrupt_request & CPU_INTERRUPT_TIMER) {
533 idx = EXCP_CLK_INTERRUPT;
537 if (interrupt_request & CPU_INTERRUPT_SMP) {
538 idx = EXCP_SMP_INTERRUPT;
542 if (interrupt_request & CPU_INTERRUPT_MCHK) {
547 cpu->exception_index = idx;
549 cc->do_interrupt(cpu);
553 #elif defined(TARGET_CRIS)
554 if (interrupt_request & CPU_INTERRUPT_HARD
555 && (env->pregs[PR_CCS] & I_FLAG)
556 && !env->locked_irq) {
557 cpu->exception_index = EXCP_IRQ;
558 cc->do_interrupt(cpu);
561 if (interrupt_request & CPU_INTERRUPT_NMI) {
562 unsigned int m_flag_archval;
563 if (env->pregs[PR_VR] < 32) {
564 m_flag_archval = M_FLAG_V10;
566 m_flag_archval = M_FLAG_V32;
568 if ((env->pregs[PR_CCS] & m_flag_archval)) {
569 cpu->exception_index = EXCP_NMI;
570 cc->do_interrupt(cpu);
574 #elif defined(TARGET_M68K)
575 if (interrupt_request & CPU_INTERRUPT_HARD
576 && ((env->sr & SR_I) >> SR_I_SHIFT)
577 < env->pending_level) {
578 /* Real hardware gets the interrupt vector via an
579 IACK cycle at this point. Current emulated
580 hardware doesn't rely on this, so we
581 provide/save the vector when the interrupt is
583 cpu->exception_index = env->pending_vector;
584 do_interrupt_m68k_hardirq(env);
587 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
588 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
589 (env->psw.mask & PSW_MASK_EXT)) {
590 cc->do_interrupt(cpu);
593 #elif defined(TARGET_XTENSA)
594 if (interrupt_request & CPU_INTERRUPT_HARD) {
595 cpu->exception_index = EXC_IRQ;
596 cc->do_interrupt(cpu);
600 /* Don't use the cached interrupt_request value,
601 do_interrupt may have updated the EXITTB flag. */
602 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
603 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
604 /* ensure that no TB jump will be modified as
605 the program flow was changed */
609 if (unlikely(cpu->exit_request)) {
610 cpu->exit_request = 0;
611 cpu->exception_index = EXCP_INTERRUPT;
614 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
616 tb = tb_find_fast(env);
617 /* Note: we do it here to avoid a gcc bug on Mac OS X when
618 doing it in tb_find_slow */
619 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
620 /* as some TB could have been invalidated because
621 of memory exceptions while generating the code, we
622 must recompute the hash index here */
624 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
626 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
627 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
628 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
630 /* see if we can patch the calling TB. When the TB
631 spans two pages, we cannot safely do a direct
633 if (next_tb != 0 && tb->page_addr[1] == -1) {
634 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
635 next_tb & TB_EXIT_MASK, tb);
637 have_tb_lock = false;
638 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
640 /* cpu_interrupt might be called while translating the
641 TB, but before it is linked into a potentially
642 infinite loop and becomes env->current_tb. Avoid
643 starting execution if there is a pending interrupt. */
644 cpu->current_tb = tb;
646 if (likely(!cpu->exit_request)) {
648 /* execute the generated code */
649 next_tb = cpu_tb_exec(cpu, tc_ptr);
650 switch (next_tb & TB_EXIT_MASK) {
651 case TB_EXIT_REQUESTED:
652 /* Something asked us to stop executing
653 * chained TBs; just continue round the main
654 * loop. Whatever requested the exit will also
655 * have set something else (eg exit_request or
656 * interrupt_request) which we will handle
657 * next time around the loop.
659 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
662 case TB_EXIT_ICOUNT_EXPIRED:
664 /* Instruction counter expired. */
666 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
667 insns_left = cpu->icount_decr.u32;
668 if (cpu->icount_extra && insns_left >= 0) {
669 /* Refill decrementer and continue execution. */
670 cpu->icount_extra += insns_left;
671 if (cpu->icount_extra > 0xffff) {
674 insns_left = cpu->icount_extra;
676 cpu->icount_extra -= insns_left;
677 cpu->icount_decr.u16.low = insns_left;
679 if (insns_left > 0) {
680 /* Execute remaining instructions. */
681 cpu_exec_nocache(env, insns_left, tb);
683 cpu->exception_index = EXCP_INTERRUPT;
693 cpu->current_tb = NULL;
694 /* reset soft MMU for next block (it can currently
695 only be set by a memory fault) */
698 /* Reload env after longjmp - the compiler may have smashed all
699 * local variables as longjmp is marked 'noreturn'. */
702 #if !(defined(CONFIG_USER_ONLY) && \
703 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
704 cc = CPU_GET_CLASS(cpu);
707 x86_cpu = X86_CPU(cpu);
710 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
711 have_tb_lock = false;
717 #if defined(TARGET_I386)
718 /* restore flags in standard format */
719 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
720 | (env->df & DF_MASK);
721 #elif defined(TARGET_ARM)
722 /* XXX: Save/restore host fpu exception state?. */
723 #elif defined(TARGET_UNICORE32)
724 #elif defined(TARGET_SPARC)
725 #elif defined(TARGET_PPC)
726 #elif defined(TARGET_LM32)
727 #elif defined(TARGET_M68K)
728 cpu_m68k_flush_flags(env, env->cc_op);
729 env->cc_op = CC_OP_FLAGS;
730 env->sr = (env->sr & 0xffe0)
731 | env->cc_dest | (env->cc_x << 4);
732 #elif defined(TARGET_MICROBLAZE)
733 #elif defined(TARGET_MIPS)
734 #elif defined(TARGET_MOXIE)
735 #elif defined(TARGET_OPENRISC)
736 #elif defined(TARGET_SH4)
737 #elif defined(TARGET_ALPHA)
738 #elif defined(TARGET_CRIS)
739 #elif defined(TARGET_S390X)
740 #elif defined(TARGET_XTENSA)
743 #error unsupported target CPU
746 /* fail safe : never use current_cpu outside cpu_exec() */