x86: use caller supplied CPUState for interrupt related stuff
[sdk/emulator/qemu.git] / cpu-exec.c
1 /*
2  *  i386 emulator main execution loop
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "config.h"
20 #include "exec.h"
21 #include "disas.h"
22 #include "tcg.h"
23 #include "qemu-barrier.h"
24
25 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
26 // Work around ugly bugs in glibc that mangle global register contents
27 #undef env
28 #define env cpu_single_env
29 #endif
30
31 int tb_invalidated_flag;
32
33 //#define CONFIG_DEBUG_EXEC
34
35 int qemu_cpu_has_work(CPUState *env)
36 {
37     return cpu_has_work(env);
38 }
39
40 void cpu_loop_exit(CPUState *env1)
41 {
42     env1->current_tb = NULL;
43     longjmp(env1->jmp_env, 1);
44 }
45
46 /* exit the current TB from a signal handler. The host registers are
47    restored in a state compatible with the CPU emulator
48  */
49 #if defined(CONFIG_SOFTMMU)
50 void cpu_resume_from_signal(CPUState *env1, void *puc)
51 {
52     env = env1;
53
54     /* XXX: restore cpu registers saved in host registers */
55
56     env->exception_index = -1;
57     longjmp(env->jmp_env, 1);
58 }
59 #endif
60
61 /* Execute the code without caching the generated code. An interpreter
62    could be used if available. */
63 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
64 {
65     unsigned long next_tb;
66     TranslationBlock *tb;
67
68     /* Should never happen.
69        We only end up here when an existing TB is too long.  */
70     if (max_cycles > CF_COUNT_MASK)
71         max_cycles = CF_COUNT_MASK;
72
73     tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
74                      max_cycles);
75     env->current_tb = tb;
76     /* execute the generated code */
77     next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
78     env->current_tb = NULL;
79
80     if ((next_tb & 3) == 2) {
81         /* Restore PC.  This may happen if async event occurs before
82            the TB starts executing.  */
83         cpu_pc_from_tb(env, tb);
84     }
85     tb_phys_invalidate(tb, -1);
86     tb_free(tb);
87 }
88
89 static TranslationBlock *tb_find_slow(target_ulong pc,
90                                       target_ulong cs_base,
91                                       uint64_t flags)
92 {
93     TranslationBlock *tb, **ptb1;
94     unsigned int h;
95     tb_page_addr_t phys_pc, phys_page1, phys_page2;
96     target_ulong virt_page2;
97
98     tb_invalidated_flag = 0;
99
100     /* find translated block using physical mappings */
101     phys_pc = get_page_addr_code(env, pc);
102     phys_page1 = phys_pc & TARGET_PAGE_MASK;
103     phys_page2 = -1;
104     h = tb_phys_hash_func(phys_pc);
105     ptb1 = &tb_phys_hash[h];
106     for(;;) {
107         tb = *ptb1;
108         if (!tb)
109             goto not_found;
110         if (tb->pc == pc &&
111             tb->page_addr[0] == phys_page1 &&
112             tb->cs_base == cs_base &&
113             tb->flags == flags) {
114             /* check next page if needed */
115             if (tb->page_addr[1] != -1) {
116                 virt_page2 = (pc & TARGET_PAGE_MASK) +
117                     TARGET_PAGE_SIZE;
118                 phys_page2 = get_page_addr_code(env, virt_page2);
119                 if (tb->page_addr[1] == phys_page2)
120                     goto found;
121             } else {
122                 goto found;
123             }
124         }
125         ptb1 = &tb->phys_hash_next;
126     }
127  not_found:
128    /* if no translated code available, then translate it now */
129     tb = tb_gen_code(env, pc, cs_base, flags, 0);
130
131  found:
132     /* Move the last found TB to the head of the list */
133     if (likely(*ptb1)) {
134         *ptb1 = tb->phys_hash_next;
135         tb->phys_hash_next = tb_phys_hash[h];
136         tb_phys_hash[h] = tb;
137     }
138     /* we add the TB in the virtual pc hash table */
139     env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
140     return tb;
141 }
142
143 static inline TranslationBlock *tb_find_fast(void)
144 {
145     TranslationBlock *tb;
146     target_ulong cs_base, pc;
147     int flags;
148
149     /* we record a subset of the CPU state. It will
150        always be the same before a given translated block
151        is executed. */
152     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
153     tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
154     if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
155                  tb->flags != flags)) {
156         tb = tb_find_slow(pc, cs_base, flags);
157     }
158     return tb;
159 }
160
161 static CPUDebugExcpHandler *debug_excp_handler;
162
163 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
164 {
165     CPUDebugExcpHandler *old_handler = debug_excp_handler;
166
167     debug_excp_handler = handler;
168     return old_handler;
169 }
170
171 static void cpu_handle_debug_exception(CPUState *env)
172 {
173     CPUWatchpoint *wp;
174
175     if (!env->watchpoint_hit) {
176         QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
177             wp->flags &= ~BP_WATCHPOINT_HIT;
178         }
179     }
180     if (debug_excp_handler) {
181         debug_excp_handler(env);
182     }
183 }
184
185 /* main execution loop */
186
187 volatile sig_atomic_t exit_request;
188
189 int cpu_exec(CPUState *env1)
190 {
191     volatile host_reg_t saved_env_reg;
192     int ret, interrupt_request;
193     TranslationBlock *tb;
194     uint8_t *tc_ptr;
195     unsigned long next_tb;
196
197     if (env1->halted) {
198         if (!cpu_has_work(env1)) {
199             return EXCP_HALTED;
200         }
201
202         env1->halted = 0;
203     }
204
205     cpu_single_env = env1;
206
207     /* the access to env below is actually saving the global register's
208        value, so that files not including target-xyz/exec.h are free to
209        use it.  */
210     QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
211     saved_env_reg = (host_reg_t) env;
212     barrier();
213     env = env1;
214
215     if (unlikely(exit_request)) {
216         env->exit_request = 1;
217     }
218
219 #if defined(TARGET_I386)
220     /* put eflags in CPU temporary format */
221     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
222     DF = 1 - (2 * ((env->eflags >> 10) & 1));
223     CC_OP = CC_OP_EFLAGS;
224     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
225 #elif defined(TARGET_SPARC)
226 #elif defined(TARGET_M68K)
227     env->cc_op = CC_OP_FLAGS;
228     env->cc_dest = env->sr & 0xf;
229     env->cc_x = (env->sr >> 4) & 1;
230 #elif defined(TARGET_ALPHA)
231 #elif defined(TARGET_ARM)
232 #elif defined(TARGET_UNICORE32)
233 #elif defined(TARGET_PPC)
234 #elif defined(TARGET_LM32)
235 #elif defined(TARGET_MICROBLAZE)
236 #elif defined(TARGET_MIPS)
237 #elif defined(TARGET_SH4)
238 #elif defined(TARGET_CRIS)
239 #elif defined(TARGET_S390X)
240     /* XXXXX */
241 #else
242 #error unsupported target CPU
243 #endif
244     env->exception_index = -1;
245
246     /* prepare setjmp context for exception handling */
247     for(;;) {
248         if (setjmp(env->jmp_env) == 0) {
249 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
250 #undef env
251             env = cpu_single_env;
252 #define env cpu_single_env
253 #endif
254             /* if an exception is pending, we execute it here */
255             if (env->exception_index >= 0) {
256                 if (env->exception_index >= EXCP_INTERRUPT) {
257                     /* exit request from the cpu execution loop */
258                     ret = env->exception_index;
259                     if (ret == EXCP_DEBUG) {
260                         cpu_handle_debug_exception(env);
261                     }
262                     break;
263                 } else {
264 #if defined(CONFIG_USER_ONLY)
265                     /* if user mode only, we simulate a fake exception
266                        which will be handled outside the cpu execution
267                        loop */
268 #if defined(TARGET_I386)
269                     do_interrupt(env);
270 #endif
271                     ret = env->exception_index;
272                     break;
273 #else
274 #if defined(TARGET_I386)
275                     do_interrupt(env);
276 #elif defined(TARGET_PPC)
277                     do_interrupt(env);
278 #elif defined(TARGET_LM32)
279                     do_interrupt(env);
280 #elif defined(TARGET_MICROBLAZE)
281                     do_interrupt(env);
282 #elif defined(TARGET_MIPS)
283                     do_interrupt(env);
284 #elif defined(TARGET_SPARC)
285                     do_interrupt(env);
286 #elif defined(TARGET_ARM)
287                     do_interrupt(env);
288 #elif defined(TARGET_UNICORE32)
289                     do_interrupt(env);
290 #elif defined(TARGET_SH4)
291                     do_interrupt(env);
292 #elif defined(TARGET_ALPHA)
293                     do_interrupt(env);
294 #elif defined(TARGET_CRIS)
295                     do_interrupt(env);
296 #elif defined(TARGET_M68K)
297                     do_interrupt(0);
298 #elif defined(TARGET_S390X)
299                     do_interrupt(env);
300 #endif
301                     env->exception_index = -1;
302 #endif
303                 }
304             }
305
306             next_tb = 0; /* force lookup of first TB */
307             for(;;) {
308                 interrupt_request = env->interrupt_request;
309                 if (unlikely(interrupt_request)) {
310                     if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
311                         /* Mask out external interrupts for this step. */
312                         interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
313                     }
314                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
315                         env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
316                         env->exception_index = EXCP_DEBUG;
317                         cpu_loop_exit(env);
318                     }
319 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
320     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
321     defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
322                     if (interrupt_request & CPU_INTERRUPT_HALT) {
323                         env->interrupt_request &= ~CPU_INTERRUPT_HALT;
324                         env->halted = 1;
325                         env->exception_index = EXCP_HLT;
326                         cpu_loop_exit(env);
327                     }
328 #endif
329 #if defined(TARGET_I386)
330                     if (interrupt_request & CPU_INTERRUPT_INIT) {
331                             svm_check_intercept(env, SVM_EXIT_INIT);
332                             do_cpu_init(env);
333                             env->exception_index = EXCP_HALTED;
334                             cpu_loop_exit(env);
335                     } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
336                             do_cpu_sipi(env);
337                     } else if (env->hflags2 & HF2_GIF_MASK) {
338                         if ((interrupt_request & CPU_INTERRUPT_SMI) &&
339                             !(env->hflags & HF_SMM_MASK)) {
340                             svm_check_intercept(env, SVM_EXIT_SMI);
341                             env->interrupt_request &= ~CPU_INTERRUPT_SMI;
342                             do_smm_enter(env);
343                             next_tb = 0;
344                         } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
345                                    !(env->hflags2 & HF2_NMI_MASK)) {
346                             env->interrupt_request &= ~CPU_INTERRUPT_NMI;
347                             env->hflags2 |= HF2_NMI_MASK;
348                             do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
349                             next_tb = 0;
350                         } else if (interrupt_request & CPU_INTERRUPT_MCE) {
351                             env->interrupt_request &= ~CPU_INTERRUPT_MCE;
352                             do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
353                             next_tb = 0;
354                         } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
355                                    (((env->hflags2 & HF2_VINTR_MASK) && 
356                                      (env->hflags2 & HF2_HIF_MASK)) ||
357                                     (!(env->hflags2 & HF2_VINTR_MASK) && 
358                                      (env->eflags & IF_MASK && 
359                                       !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
360                             int intno;
361                             svm_check_intercept(env, SVM_EXIT_INTR);
362                             env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
363                             intno = cpu_get_pic_interrupt(env);
364                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
365 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
366 #undef env
367                     env = cpu_single_env;
368 #define env cpu_single_env
369 #endif
370                             do_interrupt_x86_hardirq(env, intno, 1);
371                             /* ensure that no TB jump will be modified as
372                                the program flow was changed */
373                             next_tb = 0;
374 #if !defined(CONFIG_USER_ONLY)
375                         } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
376                                    (env->eflags & IF_MASK) && 
377                                    !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
378                             int intno;
379                             /* FIXME: this should respect TPR */
380                             svm_check_intercept(env, SVM_EXIT_VINTR);
381                             intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
382                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
383                             do_interrupt_x86_hardirq(env, intno, 1);
384                             env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
385                             next_tb = 0;
386 #endif
387                         }
388                     }
389 #elif defined(TARGET_PPC)
390 #if 0
391                     if ((interrupt_request & CPU_INTERRUPT_RESET)) {
392                         cpu_reset(env);
393                     }
394 #endif
395                     if (interrupt_request & CPU_INTERRUPT_HARD) {
396                         ppc_hw_interrupt(env);
397                         if (env->pending_interrupts == 0)
398                             env->interrupt_request &= ~CPU_INTERRUPT_HARD;
399                         next_tb = 0;
400                     }
401 #elif defined(TARGET_LM32)
402                     if ((interrupt_request & CPU_INTERRUPT_HARD)
403                         && (env->ie & IE_IE)) {
404                         env->exception_index = EXCP_IRQ;
405                         do_interrupt(env);
406                         next_tb = 0;
407                     }
408 #elif defined(TARGET_MICROBLAZE)
409                     if ((interrupt_request & CPU_INTERRUPT_HARD)
410                         && (env->sregs[SR_MSR] & MSR_IE)
411                         && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
412                         && !(env->iflags & (D_FLAG | IMM_FLAG))) {
413                         env->exception_index = EXCP_IRQ;
414                         do_interrupt(env);
415                         next_tb = 0;
416                     }
417 #elif defined(TARGET_MIPS)
418                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
419                         cpu_mips_hw_interrupts_pending(env)) {
420                         /* Raise it */
421                         env->exception_index = EXCP_EXT_INTERRUPT;
422                         env->error_code = 0;
423                         do_interrupt(env);
424                         next_tb = 0;
425                     }
426 #elif defined(TARGET_SPARC)
427                     if (interrupt_request & CPU_INTERRUPT_HARD) {
428                         if (cpu_interrupts_enabled(env) &&
429                             env->interrupt_index > 0) {
430                             int pil = env->interrupt_index & 0xf;
431                             int type = env->interrupt_index & 0xf0;
432
433                             if (((type == TT_EXTINT) &&
434                                   cpu_pil_allowed(env, pil)) ||
435                                   type != TT_EXTINT) {
436                                 env->exception_index = env->interrupt_index;
437                                 do_interrupt(env);
438                                 next_tb = 0;
439                             }
440                         }
441                     }
442 #elif defined(TARGET_ARM)
443                     if (interrupt_request & CPU_INTERRUPT_FIQ
444                         && !(env->uncached_cpsr & CPSR_F)) {
445                         env->exception_index = EXCP_FIQ;
446                         do_interrupt(env);
447                         next_tb = 0;
448                     }
449                     /* ARMv7-M interrupt return works by loading a magic value
450                        into the PC.  On real hardware the load causes the
451                        return to occur.  The qemu implementation performs the
452                        jump normally, then does the exception return when the
453                        CPU tries to execute code at the magic address.
454                        This will cause the magic PC value to be pushed to
455                        the stack if an interrupt occurred at the wrong time.
456                        We avoid this by disabling interrupts when
457                        pc contains a magic address.  */
458                     if (interrupt_request & CPU_INTERRUPT_HARD
459                         && ((IS_M(env) && env->regs[15] < 0xfffffff0)
460                             || !(env->uncached_cpsr & CPSR_I))) {
461                         env->exception_index = EXCP_IRQ;
462                         do_interrupt(env);
463                         next_tb = 0;
464                     }
465 #elif defined(TARGET_UNICORE32)
466                     if (interrupt_request & CPU_INTERRUPT_HARD
467                         && !(env->uncached_asr & ASR_I)) {
468                         do_interrupt(env);
469                         next_tb = 0;
470                     }
471 #elif defined(TARGET_SH4)
472                     if (interrupt_request & CPU_INTERRUPT_HARD) {
473                         do_interrupt(env);
474                         next_tb = 0;
475                     }
476 #elif defined(TARGET_ALPHA)
477                     {
478                         int idx = -1;
479                         /* ??? This hard-codes the OSF/1 interrupt levels.  */
480                         switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
481                         case 0 ... 3:
482                             if (interrupt_request & CPU_INTERRUPT_HARD) {
483                                 idx = EXCP_DEV_INTERRUPT;
484                             }
485                             /* FALLTHRU */
486                         case 4:
487                             if (interrupt_request & CPU_INTERRUPT_TIMER) {
488                                 idx = EXCP_CLK_INTERRUPT;
489                             }
490                             /* FALLTHRU */
491                         case 5:
492                             if (interrupt_request & CPU_INTERRUPT_SMP) {
493                                 idx = EXCP_SMP_INTERRUPT;
494                             }
495                             /* FALLTHRU */
496                         case 6:
497                             if (interrupt_request & CPU_INTERRUPT_MCHK) {
498                                 idx = EXCP_MCHK;
499                             }
500                         }
501                         if (idx >= 0) {
502                             env->exception_index = idx;
503                             env->error_code = 0;
504                             do_interrupt(env);
505                             next_tb = 0;
506                         }
507                     }
508 #elif defined(TARGET_CRIS)
509                     if (interrupt_request & CPU_INTERRUPT_HARD
510                         && (env->pregs[PR_CCS] & I_FLAG)
511                         && !env->locked_irq) {
512                         env->exception_index = EXCP_IRQ;
513                         do_interrupt(env);
514                         next_tb = 0;
515                     }
516                     if (interrupt_request & CPU_INTERRUPT_NMI
517                         && (env->pregs[PR_CCS] & M_FLAG)) {
518                         env->exception_index = EXCP_NMI;
519                         do_interrupt(env);
520                         next_tb = 0;
521                     }
522 #elif defined(TARGET_M68K)
523                     if (interrupt_request & CPU_INTERRUPT_HARD
524                         && ((env->sr & SR_I) >> SR_I_SHIFT)
525                             < env->pending_level) {
526                         /* Real hardware gets the interrupt vector via an
527                            IACK cycle at this point.  Current emulated
528                            hardware doesn't rely on this, so we
529                            provide/save the vector when the interrupt is
530                            first signalled.  */
531                         env->exception_index = env->pending_vector;
532                         do_interrupt(1);
533                         next_tb = 0;
534                     }
535 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
536                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
537                         (env->psw.mask & PSW_MASK_EXT)) {
538                         do_interrupt(env);
539                         next_tb = 0;
540                     }
541 #endif
542                    /* Don't use the cached interrupt_request value,
543                       do_interrupt may have updated the EXITTB flag. */
544                     if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
545                         env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
546                         /* ensure that no TB jump will be modified as
547                            the program flow was changed */
548                         next_tb = 0;
549                     }
550                 }
551                 if (unlikely(env->exit_request)) {
552                     env->exit_request = 0;
553                     env->exception_index = EXCP_INTERRUPT;
554                     cpu_loop_exit(env);
555                 }
556 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
557                 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
558                     /* restore flags in standard format */
559 #if defined(TARGET_I386)
560                     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
561                         | (DF & DF_MASK);
562                     log_cpu_state(env, X86_DUMP_CCOP);
563                     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
564 #elif defined(TARGET_M68K)
565                     cpu_m68k_flush_flags(env, env->cc_op);
566                     env->cc_op = CC_OP_FLAGS;
567                     env->sr = (env->sr & 0xffe0)
568                               | env->cc_dest | (env->cc_x << 4);
569                     log_cpu_state(env, 0);
570 #else
571                     log_cpu_state(env, 0);
572 #endif
573                 }
574 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
575                 spin_lock(&tb_lock);
576                 tb = tb_find_fast();
577                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
578                    doing it in tb_find_slow */
579                 if (tb_invalidated_flag) {
580                     /* as some TB could have been invalidated because
581                        of memory exceptions while generating the code, we
582                        must recompute the hash index here */
583                     next_tb = 0;
584                     tb_invalidated_flag = 0;
585                 }
586 #ifdef CONFIG_DEBUG_EXEC
587                 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
588                              (long)tb->tc_ptr, tb->pc,
589                              lookup_symbol(tb->pc));
590 #endif
591                 /* see if we can patch the calling TB. When the TB
592                    spans two pages, we cannot safely do a direct
593                    jump. */
594                 if (next_tb != 0 && tb->page_addr[1] == -1) {
595                     tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
596                 }
597                 spin_unlock(&tb_lock);
598
599                 /* cpu_interrupt might be called while translating the
600                    TB, but before it is linked into a potentially
601                    infinite loop and becomes env->current_tb. Avoid
602                    starting execution if there is a pending interrupt. */
603                 env->current_tb = tb;
604                 barrier();
605                 if (likely(!env->exit_request)) {
606                     tc_ptr = tb->tc_ptr;
607                 /* execute the generated code */
608 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
609 #undef env
610                     env = cpu_single_env;
611 #define env cpu_single_env
612 #endif
613                     next_tb = tcg_qemu_tb_exec(tc_ptr);
614                     if ((next_tb & 3) == 2) {
615                         /* Instruction counter expired.  */
616                         int insns_left;
617                         tb = (TranslationBlock *)(long)(next_tb & ~3);
618                         /* Restore PC.  */
619                         cpu_pc_from_tb(env, tb);
620                         insns_left = env->icount_decr.u32;
621                         if (env->icount_extra && insns_left >= 0) {
622                             /* Refill decrementer and continue execution.  */
623                             env->icount_extra += insns_left;
624                             if (env->icount_extra > 0xffff) {
625                                 insns_left = 0xffff;
626                             } else {
627                                 insns_left = env->icount_extra;
628                             }
629                             env->icount_extra -= insns_left;
630                             env->icount_decr.u16.low = insns_left;
631                         } else {
632                             if (insns_left > 0) {
633                                 /* Execute remaining instructions.  */
634                                 cpu_exec_nocache(insns_left, tb);
635                             }
636                             env->exception_index = EXCP_INTERRUPT;
637                             next_tb = 0;
638                             cpu_loop_exit(env);
639                         }
640                     }
641                 }
642                 env->current_tb = NULL;
643                 /* reset soft MMU for next block (it can currently
644                    only be set by a memory fault) */
645             } /* for(;;) */
646         }
647     } /* for(;;) */
648
649
650 #if defined(TARGET_I386)
651     /* restore flags in standard format */
652     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
653         | (DF & DF_MASK);
654 #elif defined(TARGET_ARM)
655     /* XXX: Save/restore host fpu exception state?.  */
656 #elif defined(TARGET_UNICORE32)
657 #elif defined(TARGET_SPARC)
658 #elif defined(TARGET_PPC)
659 #elif defined(TARGET_LM32)
660 #elif defined(TARGET_M68K)
661     cpu_m68k_flush_flags(env, env->cc_op);
662     env->cc_op = CC_OP_FLAGS;
663     env->sr = (env->sr & 0xffe0)
664               | env->cc_dest | (env->cc_x << 4);
665 #elif defined(TARGET_MICROBLAZE)
666 #elif defined(TARGET_MIPS)
667 #elif defined(TARGET_SH4)
668 #elif defined(TARGET_ALPHA)
669 #elif defined(TARGET_CRIS)
670 #elif defined(TARGET_S390X)
671     /* XXXXX */
672 #else
673 #error unsupported target CPU
674 #endif
675
676     /* restore global registers */
677     barrier();
678     env = (void *) saved_env_reg;
679
680     /* fail safe : never use cpu_single_env outside cpu_exec() */
681     cpu_single_env = NULL;
682     return ret;
683 }