Merge remote-tracking branch 'bonzini/scsi.2' into staging
[sdk/emulator/qemu.git] / cpu-exec.c
1 /*
2  *  i386 emulator main execution loop
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "config.h"
20 #include "exec.h"
21 #include "disas.h"
22 #include "tcg.h"
23 #include "kvm.h"
24 #include "qemu-barrier.h"
25
26 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
27 // Work around ugly bugs in glibc that mangle global register contents
28 #undef env
29 #define env cpu_single_env
30 #endif
31
32 int tb_invalidated_flag;
33
34 //#define CONFIG_DEBUG_EXEC
35
36 int qemu_cpu_has_work(CPUState *env)
37 {
38     return cpu_has_work(env);
39 }
40
41 void cpu_loop_exit(void)
42 {
43     env->current_tb = NULL;
44     longjmp(env->jmp_env, 1);
45 }
46
47 /* exit the current TB from a signal handler. The host registers are
48    restored in a state compatible with the CPU emulator
49  */
50 #if defined(CONFIG_SOFTMMU)
51 void cpu_resume_from_signal(CPUState *env1, void *puc)
52 {
53     env = env1;
54
55     /* XXX: restore cpu registers saved in host registers */
56
57     env->exception_index = -1;
58     longjmp(env->jmp_env, 1);
59 }
60 #endif
61
62 /* Execute the code without caching the generated code. An interpreter
63    could be used if available. */
64 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
65 {
66     unsigned long next_tb;
67     TranslationBlock *tb;
68
69     /* Should never happen.
70        We only end up here when an existing TB is too long.  */
71     if (max_cycles > CF_COUNT_MASK)
72         max_cycles = CF_COUNT_MASK;
73
74     tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
75                      max_cycles);
76     env->current_tb = tb;
77     /* execute the generated code */
78     next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
79     env->current_tb = NULL;
80
81     if ((next_tb & 3) == 2) {
82         /* Restore PC.  This may happen if async event occurs before
83            the TB starts executing.  */
84         cpu_pc_from_tb(env, tb);
85     }
86     tb_phys_invalidate(tb, -1);
87     tb_free(tb);
88 }
89
90 static TranslationBlock *tb_find_slow(target_ulong pc,
91                                       target_ulong cs_base,
92                                       uint64_t flags)
93 {
94     TranslationBlock *tb, **ptb1;
95     unsigned int h;
96     tb_page_addr_t phys_pc, phys_page1, phys_page2;
97     target_ulong virt_page2;
98
99     tb_invalidated_flag = 0;
100
101     /* find translated block using physical mappings */
102     phys_pc = get_page_addr_code(env, pc);
103     phys_page1 = phys_pc & TARGET_PAGE_MASK;
104     phys_page2 = -1;
105     h = tb_phys_hash_func(phys_pc);
106     ptb1 = &tb_phys_hash[h];
107     for(;;) {
108         tb = *ptb1;
109         if (!tb)
110             goto not_found;
111         if (tb->pc == pc &&
112             tb->page_addr[0] == phys_page1 &&
113             tb->cs_base == cs_base &&
114             tb->flags == flags) {
115             /* check next page if needed */
116             if (tb->page_addr[1] != -1) {
117                 virt_page2 = (pc & TARGET_PAGE_MASK) +
118                     TARGET_PAGE_SIZE;
119                 phys_page2 = get_page_addr_code(env, virt_page2);
120                 if (tb->page_addr[1] == phys_page2)
121                     goto found;
122             } else {
123                 goto found;
124             }
125         }
126         ptb1 = &tb->phys_hash_next;
127     }
128  not_found:
129    /* if no translated code available, then translate it now */
130     tb = tb_gen_code(env, pc, cs_base, flags, 0);
131
132  found:
133     /* Move the last found TB to the head of the list */
134     if (likely(*ptb1)) {
135         *ptb1 = tb->phys_hash_next;
136         tb->phys_hash_next = tb_phys_hash[h];
137         tb_phys_hash[h] = tb;
138     }
139     /* we add the TB in the virtual pc hash table */
140     env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
141     return tb;
142 }
143
144 static inline TranslationBlock *tb_find_fast(void)
145 {
146     TranslationBlock *tb;
147     target_ulong cs_base, pc;
148     int flags;
149
150     /* we record a subset of the CPU state. It will
151        always be the same before a given translated block
152        is executed. */
153     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
154     tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
155     if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
156                  tb->flags != flags)) {
157         tb = tb_find_slow(pc, cs_base, flags);
158     }
159     return tb;
160 }
161
162 static CPUDebugExcpHandler *debug_excp_handler;
163
164 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
165 {
166     CPUDebugExcpHandler *old_handler = debug_excp_handler;
167
168     debug_excp_handler = handler;
169     return old_handler;
170 }
171
172 static void cpu_handle_debug_exception(CPUState *env)
173 {
174     CPUWatchpoint *wp;
175
176     if (!env->watchpoint_hit) {
177         QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
178             wp->flags &= ~BP_WATCHPOINT_HIT;
179         }
180     }
181     if (debug_excp_handler) {
182         debug_excp_handler(env);
183     }
184 }
185
186 /* main execution loop */
187
188 volatile sig_atomic_t exit_request;
189
190 int cpu_exec(CPUState *env1)
191 {
192     volatile host_reg_t saved_env_reg;
193     int ret, interrupt_request;
194     TranslationBlock *tb;
195     uint8_t *tc_ptr;
196     unsigned long next_tb;
197
198     if (env1->halted) {
199         if (!cpu_has_work(env1)) {
200             return EXCP_HALTED;
201         }
202
203         env1->halted = 0;
204     }
205
206     cpu_single_env = env1;
207
208     /* the access to env below is actually saving the global register's
209        value, so that files not including target-xyz/exec.h are free to
210        use it.  */
211     QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
212     saved_env_reg = (host_reg_t) env;
213     barrier();
214     env = env1;
215
216     if (unlikely(exit_request)) {
217         env->exit_request = 1;
218     }
219
220 #if defined(TARGET_I386)
221     /* put eflags in CPU temporary format */
222     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
223     DF = 1 - (2 * ((env->eflags >> 10) & 1));
224     CC_OP = CC_OP_EFLAGS;
225     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
226 #elif defined(TARGET_SPARC)
227 #elif defined(TARGET_M68K)
228     env->cc_op = CC_OP_FLAGS;
229     env->cc_dest = env->sr & 0xf;
230     env->cc_x = (env->sr >> 4) & 1;
231 #elif defined(TARGET_ALPHA)
232 #elif defined(TARGET_ARM)
233 #elif defined(TARGET_UNICORE32)
234 #elif defined(TARGET_PPC)
235 #elif defined(TARGET_LM32)
236 #elif defined(TARGET_MICROBLAZE)
237 #elif defined(TARGET_MIPS)
238 #elif defined(TARGET_SH4)
239 #elif defined(TARGET_CRIS)
240 #elif defined(TARGET_S390X)
241     /* XXXXX */
242 #else
243 #error unsupported target CPU
244 #endif
245     env->exception_index = -1;
246
247     /* prepare setjmp context for exception handling */
248     for(;;) {
249         if (setjmp(env->jmp_env) == 0) {
250 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
251 #undef env
252             env = cpu_single_env;
253 #define env cpu_single_env
254 #endif
255             /* if an exception is pending, we execute it here */
256             if (env->exception_index >= 0) {
257                 if (env->exception_index >= EXCP_INTERRUPT) {
258                     /* exit request from the cpu execution loop */
259                     ret = env->exception_index;
260                     if (ret == EXCP_DEBUG) {
261                         cpu_handle_debug_exception(env);
262                     }
263                     break;
264                 } else {
265 #if defined(CONFIG_USER_ONLY)
266                     /* if user mode only, we simulate a fake exception
267                        which will be handled outside the cpu execution
268                        loop */
269 #if defined(TARGET_I386)
270                     do_interrupt_user(env->exception_index,
271                                       env->exception_is_int,
272                                       env->error_code,
273                                       env->exception_next_eip);
274                     /* successfully delivered */
275                     env->old_exception = -1;
276 #endif
277                     ret = env->exception_index;
278                     break;
279 #else
280 #if defined(TARGET_I386)
281                     /* simulate a real cpu exception. On i386, it can
282                        trigger new exceptions, but we do not handle
283                        double or triple faults yet. */
284                     do_interrupt(env->exception_index,
285                                  env->exception_is_int,
286                                  env->error_code,
287                                  env->exception_next_eip, 0);
288                     /* successfully delivered */
289                     env->old_exception = -1;
290 #elif defined(TARGET_PPC)
291                     do_interrupt(env);
292 #elif defined(TARGET_LM32)
293                     do_interrupt(env);
294 #elif defined(TARGET_MICROBLAZE)
295                     do_interrupt(env);
296 #elif defined(TARGET_MIPS)
297                     do_interrupt(env);
298 #elif defined(TARGET_SPARC)
299                     do_interrupt(env);
300 #elif defined(TARGET_ARM)
301                     do_interrupt(env);
302 #elif defined(TARGET_UNICORE32)
303                     do_interrupt(env);
304 #elif defined(TARGET_SH4)
305                     do_interrupt(env);
306 #elif defined(TARGET_ALPHA)
307                     do_interrupt(env);
308 #elif defined(TARGET_CRIS)
309                     do_interrupt(env);
310 #elif defined(TARGET_M68K)
311                     do_interrupt(0);
312 #elif defined(TARGET_S390X)
313                     do_interrupt(env);
314 #endif
315                     env->exception_index = -1;
316 #endif
317                 }
318             }
319
320             next_tb = 0; /* force lookup of first TB */
321             for(;;) {
322                 interrupt_request = env->interrupt_request;
323                 if (unlikely(interrupt_request)) {
324                     if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
325                         /* Mask out external interrupts for this step. */
326                         interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
327                     }
328                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
329                         env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
330                         env->exception_index = EXCP_DEBUG;
331                         cpu_loop_exit();
332                     }
333 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
334     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
335     defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
336                     if (interrupt_request & CPU_INTERRUPT_HALT) {
337                         env->interrupt_request &= ~CPU_INTERRUPT_HALT;
338                         env->halted = 1;
339                         env->exception_index = EXCP_HLT;
340                         cpu_loop_exit();
341                     }
342 #endif
343 #if defined(TARGET_I386)
344                     if (interrupt_request & CPU_INTERRUPT_INIT) {
345                             svm_check_intercept(SVM_EXIT_INIT);
346                             do_cpu_init(env);
347                             env->exception_index = EXCP_HALTED;
348                             cpu_loop_exit();
349                     } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
350                             do_cpu_sipi(env);
351                     } else if (env->hflags2 & HF2_GIF_MASK) {
352                         if ((interrupt_request & CPU_INTERRUPT_SMI) &&
353                             !(env->hflags & HF_SMM_MASK)) {
354                             svm_check_intercept(SVM_EXIT_SMI);
355                             env->interrupt_request &= ~CPU_INTERRUPT_SMI;
356                             do_smm_enter();
357                             next_tb = 0;
358                         } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
359                                    !(env->hflags2 & HF2_NMI_MASK)) {
360                             env->interrupt_request &= ~CPU_INTERRUPT_NMI;
361                             env->hflags2 |= HF2_NMI_MASK;
362                             do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
363                             next_tb = 0;
364                         } else if (interrupt_request & CPU_INTERRUPT_MCE) {
365                             env->interrupt_request &= ~CPU_INTERRUPT_MCE;
366                             do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
367                             next_tb = 0;
368                         } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
369                                    (((env->hflags2 & HF2_VINTR_MASK) && 
370                                      (env->hflags2 & HF2_HIF_MASK)) ||
371                                     (!(env->hflags2 & HF2_VINTR_MASK) && 
372                                      (env->eflags & IF_MASK && 
373                                       !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
374                             int intno;
375                             svm_check_intercept(SVM_EXIT_INTR);
376                             env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
377                             intno = cpu_get_pic_interrupt(env);
378                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
379 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
380 #undef env
381                     env = cpu_single_env;
382 #define env cpu_single_env
383 #endif
384                             do_interrupt(intno, 0, 0, 0, 1);
385                             /* ensure that no TB jump will be modified as
386                                the program flow was changed */
387                             next_tb = 0;
388 #if !defined(CONFIG_USER_ONLY)
389                         } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
390                                    (env->eflags & IF_MASK) && 
391                                    !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
392                             int intno;
393                             /* FIXME: this should respect TPR */
394                             svm_check_intercept(SVM_EXIT_VINTR);
395                             intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
396                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
397                             do_interrupt(intno, 0, 0, 0, 1);
398                             env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
399                             next_tb = 0;
400 #endif
401                         }
402                     }
403 #elif defined(TARGET_PPC)
404 #if 0
405                     if ((interrupt_request & CPU_INTERRUPT_RESET)) {
406                         cpu_reset(env);
407                     }
408 #endif
409                     if (interrupt_request & CPU_INTERRUPT_HARD) {
410                         ppc_hw_interrupt(env);
411                         if (env->pending_interrupts == 0)
412                             env->interrupt_request &= ~CPU_INTERRUPT_HARD;
413                         next_tb = 0;
414                     }
415 #elif defined(TARGET_LM32)
416                     if ((interrupt_request & CPU_INTERRUPT_HARD)
417                         && (env->ie & IE_IE)) {
418                         env->exception_index = EXCP_IRQ;
419                         do_interrupt(env);
420                         next_tb = 0;
421                     }
422 #elif defined(TARGET_MICROBLAZE)
423                     if ((interrupt_request & CPU_INTERRUPT_HARD)
424                         && (env->sregs[SR_MSR] & MSR_IE)
425                         && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
426                         && !(env->iflags & (D_FLAG | IMM_FLAG))) {
427                         env->exception_index = EXCP_IRQ;
428                         do_interrupt(env);
429                         next_tb = 0;
430                     }
431 #elif defined(TARGET_MIPS)
432                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
433                         cpu_mips_hw_interrupts_pending(env)) {
434                         /* Raise it */
435                         env->exception_index = EXCP_EXT_INTERRUPT;
436                         env->error_code = 0;
437                         do_interrupt(env);
438                         next_tb = 0;
439                     }
440 #elif defined(TARGET_SPARC)
441                     if (interrupt_request & CPU_INTERRUPT_HARD) {
442                         if (cpu_interrupts_enabled(env) &&
443                             env->interrupt_index > 0) {
444                             int pil = env->interrupt_index & 0xf;
445                             int type = env->interrupt_index & 0xf0;
446
447                             if (((type == TT_EXTINT) &&
448                                   cpu_pil_allowed(env, pil)) ||
449                                   type != TT_EXTINT) {
450                                 env->exception_index = env->interrupt_index;
451                                 do_interrupt(env);
452                                 next_tb = 0;
453                             }
454                         }
455                     }
456 #elif defined(TARGET_ARM)
457                     if (interrupt_request & CPU_INTERRUPT_FIQ
458                         && !(env->uncached_cpsr & CPSR_F)) {
459                         env->exception_index = EXCP_FIQ;
460                         do_interrupt(env);
461                         next_tb = 0;
462                     }
463                     /* ARMv7-M interrupt return works by loading a magic value
464                        into the PC.  On real hardware the load causes the
465                        return to occur.  The qemu implementation performs the
466                        jump normally, then does the exception return when the
467                        CPU tries to execute code at the magic address.
468                        This will cause the magic PC value to be pushed to
469                        the stack if an interrupt occurred at the wrong time.
470                        We avoid this by disabling interrupts when
471                        pc contains a magic address.  */
472                     if (interrupt_request & CPU_INTERRUPT_HARD
473                         && ((IS_M(env) && env->regs[15] < 0xfffffff0)
474                             || !(env->uncached_cpsr & CPSR_I))) {
475                         env->exception_index = EXCP_IRQ;
476                         do_interrupt(env);
477                         next_tb = 0;
478                     }
479 #elif defined(TARGET_UNICORE32)
480                     if (interrupt_request & CPU_INTERRUPT_HARD
481                         && !(env->uncached_asr & ASR_I)) {
482                         do_interrupt(env);
483                         next_tb = 0;
484                     }
485 #elif defined(TARGET_SH4)
486                     if (interrupt_request & CPU_INTERRUPT_HARD) {
487                         do_interrupt(env);
488                         next_tb = 0;
489                     }
490 #elif defined(TARGET_ALPHA)
491                     if (interrupt_request & CPU_INTERRUPT_HARD) {
492                         do_interrupt(env);
493                         next_tb = 0;
494                     }
495 #elif defined(TARGET_CRIS)
496                     if (interrupt_request & CPU_INTERRUPT_HARD
497                         && (env->pregs[PR_CCS] & I_FLAG)
498                         && !env->locked_irq) {
499                         env->exception_index = EXCP_IRQ;
500                         do_interrupt(env);
501                         next_tb = 0;
502                     }
503                     if (interrupt_request & CPU_INTERRUPT_NMI
504                         && (env->pregs[PR_CCS] & M_FLAG)) {
505                         env->exception_index = EXCP_NMI;
506                         do_interrupt(env);
507                         next_tb = 0;
508                     }
509 #elif defined(TARGET_M68K)
510                     if (interrupt_request & CPU_INTERRUPT_HARD
511                         && ((env->sr & SR_I) >> SR_I_SHIFT)
512                             < env->pending_level) {
513                         /* Real hardware gets the interrupt vector via an
514                            IACK cycle at this point.  Current emulated
515                            hardware doesn't rely on this, so we
516                            provide/save the vector when the interrupt is
517                            first signalled.  */
518                         env->exception_index = env->pending_vector;
519                         do_interrupt(1);
520                         next_tb = 0;
521                     }
522 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
523                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
524                         (env->psw.mask & PSW_MASK_EXT)) {
525                         do_interrupt(env);
526                         next_tb = 0;
527                     }
528 #endif
529                    /* Don't use the cached interrupt_request value,
530                       do_interrupt may have updated the EXITTB flag. */
531                     if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
532                         env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
533                         /* ensure that no TB jump will be modified as
534                            the program flow was changed */
535                         next_tb = 0;
536                     }
537                 }
538                 if (unlikely(env->exit_request)) {
539                     env->exit_request = 0;
540                     env->exception_index = EXCP_INTERRUPT;
541                     cpu_loop_exit();
542                 }
543 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
544                 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
545                     /* restore flags in standard format */
546 #if defined(TARGET_I386)
547                     env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
548                     log_cpu_state(env, X86_DUMP_CCOP);
549                     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
550 #elif defined(TARGET_M68K)
551                     cpu_m68k_flush_flags(env, env->cc_op);
552                     env->cc_op = CC_OP_FLAGS;
553                     env->sr = (env->sr & 0xffe0)
554                               | env->cc_dest | (env->cc_x << 4);
555                     log_cpu_state(env, 0);
556 #else
557                     log_cpu_state(env, 0);
558 #endif
559                 }
560 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
561                 spin_lock(&tb_lock);
562                 tb = tb_find_fast();
563                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
564                    doing it in tb_find_slow */
565                 if (tb_invalidated_flag) {
566                     /* as some TB could have been invalidated because
567                        of memory exceptions while generating the code, we
568                        must recompute the hash index here */
569                     next_tb = 0;
570                     tb_invalidated_flag = 0;
571                 }
572 #ifdef CONFIG_DEBUG_EXEC
573                 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
574                              (long)tb->tc_ptr, tb->pc,
575                              lookup_symbol(tb->pc));
576 #endif
577                 /* see if we can patch the calling TB. When the TB
578                    spans two pages, we cannot safely do a direct
579                    jump. */
580                 if (next_tb != 0 && tb->page_addr[1] == -1) {
581                     tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
582                 }
583                 spin_unlock(&tb_lock);
584
585                 /* cpu_interrupt might be called while translating the
586                    TB, but before it is linked into a potentially
587                    infinite loop and becomes env->current_tb. Avoid
588                    starting execution if there is a pending interrupt. */
589                 env->current_tb = tb;
590                 barrier();
591                 if (likely(!env->exit_request)) {
592                     tc_ptr = tb->tc_ptr;
593                 /* execute the generated code */
594 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
595 #undef env
596                     env = cpu_single_env;
597 #define env cpu_single_env
598 #endif
599                     next_tb = tcg_qemu_tb_exec(tc_ptr);
600                     if ((next_tb & 3) == 2) {
601                         /* Instruction counter expired.  */
602                         int insns_left;
603                         tb = (TranslationBlock *)(long)(next_tb & ~3);
604                         /* Restore PC.  */
605                         cpu_pc_from_tb(env, tb);
606                         insns_left = env->icount_decr.u32;
607                         if (env->icount_extra && insns_left >= 0) {
608                             /* Refill decrementer and continue execution.  */
609                             env->icount_extra += insns_left;
610                             if (env->icount_extra > 0xffff) {
611                                 insns_left = 0xffff;
612                             } else {
613                                 insns_left = env->icount_extra;
614                             }
615                             env->icount_extra -= insns_left;
616                             env->icount_decr.u16.low = insns_left;
617                         } else {
618                             if (insns_left > 0) {
619                                 /* Execute remaining instructions.  */
620                                 cpu_exec_nocache(insns_left, tb);
621                             }
622                             env->exception_index = EXCP_INTERRUPT;
623                             next_tb = 0;
624                             cpu_loop_exit();
625                         }
626                     }
627                 }
628                 env->current_tb = NULL;
629                 /* reset soft MMU for next block (it can currently
630                    only be set by a memory fault) */
631             } /* for(;;) */
632         }
633     } /* for(;;) */
634
635
636 #if defined(TARGET_I386)
637     /* restore flags in standard format */
638     env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
639 #elif defined(TARGET_ARM)
640     /* XXX: Save/restore host fpu exception state?.  */
641 #elif defined(TARGET_UNICORE32)
642 #elif defined(TARGET_SPARC)
643 #elif defined(TARGET_PPC)
644 #elif defined(TARGET_LM32)
645 #elif defined(TARGET_M68K)
646     cpu_m68k_flush_flags(env, env->cc_op);
647     env->cc_op = CC_OP_FLAGS;
648     env->sr = (env->sr & 0xffe0)
649               | env->cc_dest | (env->cc_x << 4);
650 #elif defined(TARGET_MICROBLAZE)
651 #elif defined(TARGET_MIPS)
652 #elif defined(TARGET_SH4)
653 #elif defined(TARGET_ALPHA)
654 #elif defined(TARGET_CRIS)
655 #elif defined(TARGET_S390X)
656     /* XXXXX */
657 #else
658 #error unsupported target CPU
659 #endif
660
661     /* restore global registers */
662     barrier();
663     env = (void *) saved_env_reg;
664
665     /* fail safe : never use cpu_single_env outside cpu_exec() */
666     cpu_single_env = NULL;
667     return ret;
668 }