Refactor some hax related codes.
[sdk/emulator/qemu.git] / cpu-exec.c
1 /*
2  *  emulator main execution loop
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "tcg.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
25 #include "sysemu/hax.h"
26
27 bool qemu_cpu_has_work(CPUState *cpu)
28 {
29     return cpu_has_work(cpu);
30 }
31
32 void cpu_loop_exit(CPUArchState *env)
33 {
34     CPUState *cpu = ENV_GET_CPU(env);
35
36     cpu->current_tb = NULL;
37     siglongjmp(env->jmp_env, 1);
38 }
39
40 /* exit the current TB from a signal handler. The host registers are
41    restored in a state compatible with the CPU emulator
42  */
43 #if defined(CONFIG_SOFTMMU)
44 void cpu_resume_from_signal(CPUArchState *env, void *puc)
45 {
46     /* XXX: restore cpu registers saved in host registers */
47
48     env->exception_index = -1;
49     siglongjmp(env->jmp_env, 1);
50 }
51 #endif
52
53 /* Execute a TB, and fix up the CPU state afterwards if necessary */
54 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
55 {
56     CPUArchState *env = cpu->env_ptr;
57     tcg_target_ulong next_tb = tcg_qemu_tb_exec(env, tb_ptr);
58     if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
59         /* We didn't start executing this TB (eg because the instruction
60          * counter hit zero); we must restore the guest PC to the address
61          * of the start of the TB.
62          */
63         TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
64         cpu_pc_from_tb(env, tb);
65     }
66     if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
67         /* We were asked to stop executing TBs (probably a pending
68          * interrupt. We've now stopped, so clear the flag.
69          */
70         cpu->tcg_exit_req = 0;
71     }
72     return next_tb;
73 }
74
75 /* Execute the code without caching the generated code. An interpreter
76    could be used if available. */
77 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
78                              TranslationBlock *orig_tb)
79 {
80     CPUState *cpu = ENV_GET_CPU(env);
81     TranslationBlock *tb;
82
83     /* Should never happen.
84        We only end up here when an existing TB is too long.  */
85     if (max_cycles > CF_COUNT_MASK)
86         max_cycles = CF_COUNT_MASK;
87
88     tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
89                      max_cycles);
90     cpu->current_tb = tb;
91     /* execute the generated code */
92     cpu_tb_exec(cpu, tb->tc_ptr);
93     cpu->current_tb = NULL;
94     tb_phys_invalidate(tb, -1);
95     tb_free(tb);
96 }
97
98 static TranslationBlock *tb_find_slow(CPUArchState *env,
99                                       target_ulong pc,
100                                       target_ulong cs_base,
101                                       uint64_t flags)
102 {
103     TranslationBlock *tb, **ptb1;
104     unsigned int h;
105     tb_page_addr_t phys_pc, phys_page1;
106     target_ulong virt_page2;
107
108     tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
109
110     /* find translated block using physical mappings */
111     phys_pc = get_page_addr_code(env, pc);
112     phys_page1 = phys_pc & TARGET_PAGE_MASK;
113     h = tb_phys_hash_func(phys_pc);
114     ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
115     for(;;) {
116         tb = *ptb1;
117         if (!tb)
118             goto not_found;
119         if (tb->pc == pc &&
120             tb->page_addr[0] == phys_page1 &&
121             tb->cs_base == cs_base &&
122             tb->flags == flags) {
123             /* check next page if needed */
124             if (tb->page_addr[1] != -1) {
125                 tb_page_addr_t phys_page2;
126
127                 virt_page2 = (pc & TARGET_PAGE_MASK) +
128                     TARGET_PAGE_SIZE;
129                 phys_page2 = get_page_addr_code(env, virt_page2);
130                 if (tb->page_addr[1] == phys_page2)
131                     goto found;
132             } else {
133                 goto found;
134             }
135         }
136         ptb1 = &tb->phys_hash_next;
137     }
138  not_found:
139    /* if no translated code available, then translate it now */
140     tb = tb_gen_code(env, pc, cs_base, flags, 0);
141
142  found:
143     /* Move the last found TB to the head of the list */
144     if (likely(*ptb1)) {
145         *ptb1 = tb->phys_hash_next;
146         tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
147         tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
148     }
149     /* we add the TB in the virtual pc hash table */
150     env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
151     return tb;
152 }
153
154 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
155 {
156     TranslationBlock *tb;
157     target_ulong cs_base, pc;
158     int flags;
159
160     /* we record a subset of the CPU state. It will
161        always be the same before a given translated block
162        is executed. */
163     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
164     tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
165     if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
166                  tb->flags != flags)) {
167         tb = tb_find_slow(env, pc, cs_base, flags);
168     }
169     return tb;
170 }
171
172 static CPUDebugExcpHandler *debug_excp_handler;
173
174 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
175 {
176     debug_excp_handler = handler;
177 }
178
179 static void cpu_handle_debug_exception(CPUArchState *env)
180 {
181     CPUWatchpoint *wp;
182
183     if (!env->watchpoint_hit) {
184         QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
185             wp->flags &= ~BP_WATCHPOINT_HIT;
186         }
187     }
188     if (debug_excp_handler) {
189         debug_excp_handler(env);
190     }
191 }
192
193 /* main execution loop */
194
195 volatile sig_atomic_t exit_request;
196
197 /*
198  * QEMU emulate can happens because of MMIO or emulation mode, i.e. non-PG mode,
199  * when it's because of MMIO, the MMIO, the interrupt should not be emulated,
200  * because MMIO is emulated for only one instruction now and then back to
201  * HAX kernel
202  */
203 static int need_handle_intr_request(CPUState *cpu)
204 {
205 #ifdef CONFIG_HAX
206     CPUArchState *env = cpu->env_ptr;
207     if (!hax_enabled() || hax_vcpu_emulation_mode(env))
208         return cpu->interrupt_request;
209     return 0;
210 #else
211     return cpu->interrupt_request;
212 #endif
213 }
214
215
216 int cpu_exec(CPUArchState *env)
217 {
218     CPUState *cpu = ENV_GET_CPU(env);
219 #if !(defined(CONFIG_USER_ONLY) && \
220       (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
221     CPUClass *cc = CPU_GET_CLASS(cpu);
222 #endif
223     int ret, interrupt_request;
224     TranslationBlock *tb;
225     uint8_t *tc_ptr;
226     tcg_target_ulong next_tb;
227
228     if (cpu->halted) {
229         if (!cpu_has_work(cpu)) {
230             return EXCP_HALTED;
231         }
232
233         cpu->halted = 0;
234     }
235
236     cpu_single_env = env;
237
238     /* As long as cpu_single_env is null, up to the assignment just above,
239      * requests by other threads to exit the execution loop are expected to
240      * be issued using the exit_request global. We must make sure that our
241      * evaluation of the global value is performed past the cpu_single_env
242      * value transition point, which requires a memory barrier as well as
243      * an instruction scheduling constraint on modern architectures.  */
244     smp_mb();
245
246     if (unlikely(exit_request)) {
247         cpu->exit_request = 1;
248     }
249
250 #if defined(TARGET_I386)
251     /* put eflags in CPU temporary format */
252     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
253     DF = 1 - (2 * ((env->eflags >> 10) & 1));
254     CC_OP = CC_OP_EFLAGS;
255     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
256 #elif defined(TARGET_SPARC)
257 #elif defined(TARGET_M68K)
258     env->cc_op = CC_OP_FLAGS;
259     env->cc_dest = env->sr & 0xf;
260     env->cc_x = (env->sr >> 4) & 1;
261 #elif defined(TARGET_ALPHA)
262 #elif defined(TARGET_ARM)
263 #elif defined(TARGET_UNICORE32)
264 #elif defined(TARGET_PPC)
265     env->reserve_addr = -1;
266 #elif defined(TARGET_LM32)
267 #elif defined(TARGET_MICROBLAZE)
268 #elif defined(TARGET_MIPS)
269 #elif defined(TARGET_MOXIE)
270 #elif defined(TARGET_OPENRISC)
271 #elif defined(TARGET_SH4)
272 #elif defined(TARGET_CRIS)
273 #elif defined(TARGET_S390X)
274 #elif defined(TARGET_XTENSA)
275     /* XXXXX */
276 #else
277 #error unsupported target CPU
278 #endif
279     env->exception_index = -1;
280
281     /* prepare setjmp context for exception handling */
282     for(;;) {
283         if (sigsetjmp(env->jmp_env, 0) == 0) {
284             /* if an exception is pending, we execute it here */
285             if (env->exception_index >= 0) {
286                 if (env->exception_index >= EXCP_INTERRUPT) {
287                     /* exit request from the cpu execution loop */
288                     ret = env->exception_index;
289                     if (ret == EXCP_DEBUG) {
290                         cpu_handle_debug_exception(env);
291                     }
292                     break;
293                 } else {
294 #if defined(CONFIG_USER_ONLY)
295                     /* if user mode only, we simulate a fake exception
296                        which will be handled outside the cpu execution
297                        loop */
298 #if defined(TARGET_I386)
299                     cc->do_interrupt(cpu);
300 #endif
301                     ret = env->exception_index;
302                     break;
303 #else
304                     cc->do_interrupt(cpu);
305                     env->exception_index = -1;
306 #endif
307                 }
308             }
309
310 #ifdef CONFIG_HAX
311             if (hax_enabled() && !hax_vcpu_exec(env))
312                 longjmp(env->jmp_env, 1);
313 #endif
314
315             next_tb = 0; /* force lookup of first TB */
316             for(;;) {
317                 interrupt_request = need_handle_intr_request(cpu);
318                 if (unlikely(interrupt_request)) {
319                     if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
320                         /* Mask out external interrupts for this step. */
321                         interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
322                     }
323                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
324                         cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
325                         env->exception_index = EXCP_DEBUG;
326                         cpu_loop_exit(env);
327                     }
328 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
329     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
330     defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
331                     if (interrupt_request & CPU_INTERRUPT_HALT) {
332                         cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
333                         cpu->halted = 1;
334                         env->exception_index = EXCP_HLT;
335                         cpu_loop_exit(env);
336                     }
337 #endif
338 #if defined(TARGET_I386)
339 #if !defined(CONFIG_USER_ONLY)
340                     if (interrupt_request & CPU_INTERRUPT_POLL) {
341                         cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
342                         apic_poll_irq(env->apic_state);
343                     }
344 #endif
345                     if (interrupt_request & CPU_INTERRUPT_INIT) {
346                             cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
347                                                           0);
348                             do_cpu_init(x86_env_get_cpu(env));
349                             env->exception_index = EXCP_HALTED;
350                             cpu_loop_exit(env);
351                     } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
352                             do_cpu_sipi(x86_env_get_cpu(env));
353
354                     } else if (env->hflags2 & HF2_GIF_MASK) {
355                         if ((interrupt_request & CPU_INTERRUPT_SMI) &&
356                             !(env->hflags & HF_SMM_MASK)) {
357                             cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
358                                                           0);
359                             cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
360 #ifdef CONFIG_HAX
361                             if (hax_enabled())
362                                 env->hax_vcpu->resync = 1;
363 #endif
364                             do_smm_enter(env);
365                             next_tb = 0;
366                         } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
367                                    !(env->hflags2 & HF2_NMI_MASK)) {
368                             cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
369                             env->hflags2 |= HF2_NMI_MASK;
370                             do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
371                             next_tb = 0;
372                         } else if (interrupt_request & CPU_INTERRUPT_MCE) {
373                             cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
374                             do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
375                             next_tb = 0;
376                         } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
377                                    (((env->hflags2 & HF2_VINTR_MASK) && 
378                                      (env->hflags2 & HF2_HIF_MASK)) ||
379                                     (!(env->hflags2 & HF2_VINTR_MASK) && 
380                                      (env->eflags & IF_MASK && 
381                                       !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
382                             int intno;
383                             cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
384                                                           0);
385                             cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
386                                                         CPU_INTERRUPT_VIRQ);
387                             intno = cpu_get_pic_interrupt(env);
388                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
389                             do_interrupt_x86_hardirq(env, intno, 1);
390                             /* ensure that no TB jump will be modified as
391                                the program flow was changed */
392                             next_tb = 0;
393 #if !defined(CONFIG_USER_ONLY)
394                         } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
395                                    (env->eflags & IF_MASK) && 
396                                    !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
397                             int intno;
398                             /* FIXME: this should respect TPR */
399                             cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
400                                                           0);
401                             intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
402                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
403                             do_interrupt_x86_hardirq(env, intno, 1);
404                             cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
405                             next_tb = 0;
406 #endif
407                         }
408                     }
409 #elif defined(TARGET_PPC)
410                     if ((interrupt_request & CPU_INTERRUPT_RESET)) {
411                         cpu_reset(cpu);
412                     }
413                     if (interrupt_request & CPU_INTERRUPT_HARD) {
414                         ppc_hw_interrupt(env);
415                         if (env->pending_interrupts == 0) {
416                             cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
417                         }
418                         next_tb = 0;
419                     }
420 #elif defined(TARGET_LM32)
421                     if ((interrupt_request & CPU_INTERRUPT_HARD)
422                         && (env->ie & IE_IE)) {
423                         env->exception_index = EXCP_IRQ;
424                         cc->do_interrupt(cpu);
425                         next_tb = 0;
426                     }
427 #elif defined(TARGET_MICROBLAZE)
428                     if ((interrupt_request & CPU_INTERRUPT_HARD)
429                         && (env->sregs[SR_MSR] & MSR_IE)
430                         && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
431                         && !(env->iflags & (D_FLAG | IMM_FLAG))) {
432                         env->exception_index = EXCP_IRQ;
433                         cc->do_interrupt(cpu);
434                         next_tb = 0;
435                     }
436 #elif defined(TARGET_MIPS)
437                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
438                         cpu_mips_hw_interrupts_pending(env)) {
439                         /* Raise it */
440                         env->exception_index = EXCP_EXT_INTERRUPT;
441                         env->error_code = 0;
442                         cc->do_interrupt(cpu);
443                         next_tb = 0;
444                     }
445 #elif defined(TARGET_OPENRISC)
446                     {
447                         int idx = -1;
448                         if ((interrupt_request & CPU_INTERRUPT_HARD)
449                             && (env->sr & SR_IEE)) {
450                             idx = EXCP_INT;
451                         }
452                         if ((interrupt_request & CPU_INTERRUPT_TIMER)
453                             && (env->sr & SR_TEE)) {
454                             idx = EXCP_TICK;
455                         }
456                         if (idx >= 0) {
457                             env->exception_index = idx;
458                             cc->do_interrupt(cpu);
459                             next_tb = 0;
460                         }
461                     }
462 #elif defined(TARGET_SPARC)
463                     if (interrupt_request & CPU_INTERRUPT_HARD) {
464                         if (cpu_interrupts_enabled(env) &&
465                             env->interrupt_index > 0) {
466                             int pil = env->interrupt_index & 0xf;
467                             int type = env->interrupt_index & 0xf0;
468
469                             if (((type == TT_EXTINT) &&
470                                   cpu_pil_allowed(env, pil)) ||
471                                   type != TT_EXTINT) {
472                                 env->exception_index = env->interrupt_index;
473                                 cc->do_interrupt(cpu);
474                                 next_tb = 0;
475                             }
476                         }
477                     }
478 #elif defined(TARGET_ARM)
479                     if (interrupt_request & CPU_INTERRUPT_FIQ
480                         && !(env->uncached_cpsr & CPSR_F)) {
481                         env->exception_index = EXCP_FIQ;
482                         cc->do_interrupt(cpu);
483                         next_tb = 0;
484                     }
485                     /* ARMv7-M interrupt return works by loading a magic value
486                        into the PC.  On real hardware the load causes the
487                        return to occur.  The qemu implementation performs the
488                        jump normally, then does the exception return when the
489                        CPU tries to execute code at the magic address.
490                        This will cause the magic PC value to be pushed to
491                        the stack if an interrupt occurred at the wrong time.
492                        We avoid this by disabling interrupts when
493                        pc contains a magic address.  */
494                     if (interrupt_request & CPU_INTERRUPT_HARD
495                         && ((IS_M(env) && env->regs[15] < 0xfffffff0)
496                             || !(env->uncached_cpsr & CPSR_I))) {
497                         env->exception_index = EXCP_IRQ;
498                         cc->do_interrupt(cpu);
499                         next_tb = 0;
500                     }
501 #elif defined(TARGET_UNICORE32)
502                     if (interrupt_request & CPU_INTERRUPT_HARD
503                         && !(env->uncached_asr & ASR_I)) {
504                         env->exception_index = UC32_EXCP_INTR;
505                         cc->do_interrupt(cpu);
506                         next_tb = 0;
507                     }
508 #elif defined(TARGET_SH4)
509                     if (interrupt_request & CPU_INTERRUPT_HARD) {
510                         cc->do_interrupt(cpu);
511                         next_tb = 0;
512                     }
513 #elif defined(TARGET_ALPHA)
514                     {
515                         int idx = -1;
516                         /* ??? This hard-codes the OSF/1 interrupt levels.  */
517                         switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
518                         case 0 ... 3:
519                             if (interrupt_request & CPU_INTERRUPT_HARD) {
520                                 idx = EXCP_DEV_INTERRUPT;
521                             }
522                             /* FALLTHRU */
523                         case 4:
524                             if (interrupt_request & CPU_INTERRUPT_TIMER) {
525                                 idx = EXCP_CLK_INTERRUPT;
526                             }
527                             /* FALLTHRU */
528                         case 5:
529                             if (interrupt_request & CPU_INTERRUPT_SMP) {
530                                 idx = EXCP_SMP_INTERRUPT;
531                             }
532                             /* FALLTHRU */
533                         case 6:
534                             if (interrupt_request & CPU_INTERRUPT_MCHK) {
535                                 idx = EXCP_MCHK;
536                             }
537                         }
538                         if (idx >= 0) {
539                             env->exception_index = idx;
540                             env->error_code = 0;
541                             cc->do_interrupt(cpu);
542                             next_tb = 0;
543                         }
544                     }
545 #elif defined(TARGET_CRIS)
546                     if (interrupt_request & CPU_INTERRUPT_HARD
547                         && (env->pregs[PR_CCS] & I_FLAG)
548                         && !env->locked_irq) {
549                         env->exception_index = EXCP_IRQ;
550                         cc->do_interrupt(cpu);
551                         next_tb = 0;
552                     }
553                     if (interrupt_request & CPU_INTERRUPT_NMI) {
554                         unsigned int m_flag_archval;
555                         if (env->pregs[PR_VR] < 32) {
556                             m_flag_archval = M_FLAG_V10;
557                         } else {
558                             m_flag_archval = M_FLAG_V32;
559                         }
560                         if ((env->pregs[PR_CCS] & m_flag_archval)) {
561                             env->exception_index = EXCP_NMI;
562                             cc->do_interrupt(cpu);
563                             next_tb = 0;
564                         }
565                     }
566 #elif defined(TARGET_M68K)
567                     if (interrupt_request & CPU_INTERRUPT_HARD
568                         && ((env->sr & SR_I) >> SR_I_SHIFT)
569                             < env->pending_level) {
570                         /* Real hardware gets the interrupt vector via an
571                            IACK cycle at this point.  Current emulated
572                            hardware doesn't rely on this, so we
573                            provide/save the vector when the interrupt is
574                            first signalled.  */
575                         env->exception_index = env->pending_vector;
576                         do_interrupt_m68k_hardirq(env);
577                         next_tb = 0;
578                     }
579 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
580                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
581                         (env->psw.mask & PSW_MASK_EXT)) {
582                         cc->do_interrupt(cpu);
583                         next_tb = 0;
584                     }
585 #elif defined(TARGET_XTENSA)
586                     if (interrupt_request & CPU_INTERRUPT_HARD) {
587                         env->exception_index = EXC_IRQ;
588                         cc->do_interrupt(cpu);
589                         next_tb = 0;
590                     }
591 #endif
592                    /* Don't use the cached interrupt_request value,
593                       do_interrupt may have updated the EXITTB flag. */
594                     if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
595                         cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
596                         /* ensure that no TB jump will be modified as
597                            the program flow was changed */
598                         next_tb = 0;
599                     }
600                 }
601                 if (unlikely(cpu->exit_request)) {
602                     cpu->exit_request = 0;
603                     env->exception_index = EXCP_INTERRUPT;
604                     cpu_loop_exit(env);
605                 }
606 #if defined(DEBUG_DISAS)
607                 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
608                     /* restore flags in standard format */
609 #if defined(TARGET_I386)
610                     log_cpu_state(env, CPU_DUMP_CCOP);
611 #elif defined(TARGET_M68K)
612                     cpu_m68k_flush_flags(env, env->cc_op);
613                     env->cc_op = CC_OP_FLAGS;
614                     env->sr = (env->sr & 0xffe0)
615                               | env->cc_dest | (env->cc_x << 4);
616                     log_cpu_state(env, 0);
617 #else
618                     log_cpu_state(env, 0);
619 #endif
620                 }
621 #endif /* DEBUG_DISAS */
622                 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
623                 tb = tb_find_fast(env);
624                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
625                    doing it in tb_find_slow */
626                 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
627                     /* as some TB could have been invalidated because
628                        of memory exceptions while generating the code, we
629                        must recompute the hash index here */
630                     next_tb = 0;
631                     tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
632                 }
633                 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
634                     qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
635                              tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
636                 }
637                 /* see if we can patch the calling TB. When the TB
638                    spans two pages, we cannot safely do a direct
639                    jump. */
640                 if (next_tb != 0 && tb->page_addr[1] == -1) {
641                     tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
642                                 next_tb & TB_EXIT_MASK, tb);
643                 }
644                 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
645
646                 /* cpu_interrupt might be called while translating the
647                    TB, but before it is linked into a potentially
648                    infinite loop and becomes env->current_tb. Avoid
649                    starting execution if there is a pending interrupt. */
650                 cpu->current_tb = tb;
651                 barrier();
652                 if (likely(!cpu->exit_request)) {
653                     tc_ptr = tb->tc_ptr;
654                     /* execute the generated code */
655                     next_tb = cpu_tb_exec(cpu, tc_ptr);
656                     switch (next_tb & TB_EXIT_MASK) {
657                     case TB_EXIT_REQUESTED:
658                         /* Something asked us to stop executing
659                          * chained TBs; just continue round the main
660                          * loop. Whatever requested the exit will also
661                          * have set something else (eg exit_request or
662                          * interrupt_request) which we will handle
663                          * next time around the loop.
664                          */
665                         tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
666                         next_tb = 0;
667                         break;
668                     case TB_EXIT_ICOUNT_EXPIRED:
669                     {
670                         /* Instruction counter expired.  */
671                         int insns_left;
672                         tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
673                         insns_left = env->icount_decr.u32;
674                         if (env->icount_extra && insns_left >= 0) {
675                             /* Refill decrementer and continue execution.  */
676                             env->icount_extra += insns_left;
677                             if (env->icount_extra > 0xffff) {
678                                 insns_left = 0xffff;
679                             } else {
680                                 insns_left = env->icount_extra;
681                             }
682                             env->icount_extra -= insns_left;
683                             env->icount_decr.u16.low = insns_left;
684                         } else {
685                             if (insns_left > 0) {
686                                 /* Execute remaining instructions.  */
687                                 cpu_exec_nocache(env, insns_left, tb);
688                             }
689                             env->exception_index = EXCP_INTERRUPT;
690                             next_tb = 0;
691                             cpu_loop_exit(env);
692                         }
693                         break;
694                     }
695                     default:
696                         break;
697                     }
698                 }
699                 cpu->current_tb = NULL;
700 #ifdef CONFIG_HAX
701                 if (hax_enabled() && hax_stop_emulation(env))
702                     cpu_loop_exit(env);
703 #endif
704                 /* reset soft MMU for next block (it can currently
705                    only be set by a memory fault) */
706             } /* for(;;) */
707         } else {
708             /* Reload env after longjmp - the compiler may have smashed all
709              * local variables as longjmp is marked 'noreturn'. */
710             env = cpu_single_env;
711         }
712     } /* for(;;) */
713
714
715 #if defined(TARGET_I386)
716     /* restore flags in standard format */
717     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
718         | (DF & DF_MASK);
719 #elif defined(TARGET_ARM)
720     /* XXX: Save/restore host fpu exception state?.  */
721 #elif defined(TARGET_UNICORE32)
722 #elif defined(TARGET_SPARC)
723 #elif defined(TARGET_PPC)
724 #elif defined(TARGET_LM32)
725 #elif defined(TARGET_M68K)
726     cpu_m68k_flush_flags(env, env->cc_op);
727     env->cc_op = CC_OP_FLAGS;
728     env->sr = (env->sr & 0xffe0)
729               | env->cc_dest | (env->cc_x << 4);
730 #elif defined(TARGET_MICROBLAZE)
731 #elif defined(TARGET_MIPS)
732 #elif defined(TARGET_MOXIE)
733 #elif defined(TARGET_OPENRISC)
734 #elif defined(TARGET_SH4)
735 #elif defined(TARGET_ALPHA)
736 #elif defined(TARGET_CRIS)
737 #elif defined(TARGET_S390X)
738 #elif defined(TARGET_XTENSA)
739     /* XXXXX */
740 #else
741 #error unsupported target CPU
742 #endif
743
744     /* fail safe : never use cpu_single_env outside cpu_exec() */
745     cpu_single_env = NULL;
746     return ret;
747 }