suspend/resume: changed option name
[sdk/emulator/qemu.git] / cpu-exec.c
1 /*
2  *  emulator main execution loop
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "tcg.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
25 #include "sysemu/hax.h"
26
27 bool qemu_cpu_has_work(CPUState *cpu)
28 {
29     return cpu_has_work(cpu);
30 }
31
32 void cpu_loop_exit(CPUArchState *env)
33 {
34     CPUState *cpu = ENV_GET_CPU(env);
35
36     cpu->current_tb = NULL;
37     siglongjmp(env->jmp_env, 1);
38 }
39
40 /* exit the current TB from a signal handler. The host registers are
41    restored in a state compatible with the CPU emulator
42  */
43 #if defined(CONFIG_SOFTMMU)
44 void cpu_resume_from_signal(CPUArchState *env, void *puc)
45 {
46     /* XXX: restore cpu registers saved in host registers */
47
48     env->exception_index = -1;
49     siglongjmp(env->jmp_env, 1);
50 }
51 #endif
52
53 /* Execute a TB, and fix up the CPU state afterwards if necessary */
54 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
55 {
56     CPUArchState *env = cpu->env_ptr;
57     tcg_target_ulong next_tb = tcg_qemu_tb_exec(env, tb_ptr);
58     if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
59         /* We didn't start executing this TB (eg because the instruction
60          * counter hit zero); we must restore the guest PC to the address
61          * of the start of the TB.
62          */
63         CPUClass *cc = CPU_GET_CLASS(cpu);
64         TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
65         if (cc->synchronize_from_tb) {
66             cc->synchronize_from_tb(cpu, tb);
67         } else {
68             assert(cc->set_pc);
69             cc->set_pc(cpu, tb->pc);
70         }
71     }
72     if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
73         /* We were asked to stop executing TBs (probably a pending
74          * interrupt. We've now stopped, so clear the flag.
75          */
76         cpu->tcg_exit_req = 0;
77     }
78     return next_tb;
79 }
80
81 /* Execute the code without caching the generated code. An interpreter
82    could be used if available. */
83 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
84                              TranslationBlock *orig_tb)
85 {
86     CPUState *cpu = ENV_GET_CPU(env);
87     TranslationBlock *tb;
88
89     /* Should never happen.
90        We only end up here when an existing TB is too long.  */
91     if (max_cycles > CF_COUNT_MASK)
92         max_cycles = CF_COUNT_MASK;
93
94     tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
95                      max_cycles);
96     cpu->current_tb = tb;
97     /* execute the generated code */
98     cpu_tb_exec(cpu, tb->tc_ptr);
99     cpu->current_tb = NULL;
100     tb_phys_invalidate(tb, -1);
101     tb_free(tb);
102 }
103
104 static TranslationBlock *tb_find_slow(CPUArchState *env,
105                                       target_ulong pc,
106                                       target_ulong cs_base,
107                                       uint64_t flags)
108 {
109     TranslationBlock *tb, **ptb1;
110     unsigned int h;
111     tb_page_addr_t phys_pc, phys_page1;
112     target_ulong virt_page2;
113
114     tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
115
116     /* find translated block using physical mappings */
117     phys_pc = get_page_addr_code(env, pc);
118     phys_page1 = phys_pc & TARGET_PAGE_MASK;
119     h = tb_phys_hash_func(phys_pc);
120     ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
121     for(;;) {
122         tb = *ptb1;
123         if (!tb)
124             goto not_found;
125         if (tb->pc == pc &&
126             tb->page_addr[0] == phys_page1 &&
127             tb->cs_base == cs_base &&
128             tb->flags == flags) {
129             /* check next page if needed */
130             if (tb->page_addr[1] != -1) {
131                 tb_page_addr_t phys_page2;
132
133                 virt_page2 = (pc & TARGET_PAGE_MASK) +
134                     TARGET_PAGE_SIZE;
135                 phys_page2 = get_page_addr_code(env, virt_page2);
136                 if (tb->page_addr[1] == phys_page2)
137                     goto found;
138             } else {
139                 goto found;
140             }
141         }
142         ptb1 = &tb->phys_hash_next;
143     }
144  not_found:
145    /* if no translated code available, then translate it now */
146     tb = tb_gen_code(env, pc, cs_base, flags, 0);
147
148  found:
149     /* Move the last found TB to the head of the list */
150     if (likely(*ptb1)) {
151         *ptb1 = tb->phys_hash_next;
152         tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
153         tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
154     }
155     /* we add the TB in the virtual pc hash table */
156     env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
157     return tb;
158 }
159
160 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
161 {
162     TranslationBlock *tb;
163     target_ulong cs_base, pc;
164     int flags;
165
166     /* we record a subset of the CPU state. It will
167        always be the same before a given translated block
168        is executed. */
169     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
170     tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
171     if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
172                  tb->flags != flags)) {
173         tb = tb_find_slow(env, pc, cs_base, flags);
174     }
175     return tb;
176 }
177
178 static CPUDebugExcpHandler *debug_excp_handler;
179
180 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
181 {
182     debug_excp_handler = handler;
183 }
184
185 static void cpu_handle_debug_exception(CPUArchState *env)
186 {
187     CPUWatchpoint *wp;
188
189     if (!env->watchpoint_hit) {
190         QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
191             wp->flags &= ~BP_WATCHPOINT_HIT;
192         }
193     }
194     if (debug_excp_handler) {
195         debug_excp_handler(env);
196     }
197 }
198
199 /* main execution loop */
200
201 volatile sig_atomic_t exit_request;
202
203 /*
204  * QEMU emulate can happens because of MMIO or emulation mode, i.e. non-PG mode,
205  * when it's because of MMIO, the MMIO, the interrupt should not be emulated,
206  * because MMIO is emulated for only one instruction now and then back to
207  * HAX kernel
208  */
209 static int need_handle_intr_request(CPUState *cpu)
210 {
211 #ifdef CONFIG_HAX
212     CPUArchState *env = cpu->env_ptr;
213     if (!hax_enabled() || hax_vcpu_emulation_mode(env))
214         return cpu->interrupt_request;
215     return 0;
216 #else
217     return cpu->interrupt_request;
218 #endif
219 }
220
221
222 int cpu_exec(CPUArchState *env)
223 {
224     CPUState *cpu = ENV_GET_CPU(env);
225 #if !(defined(CONFIG_USER_ONLY) && \
226       (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
227     CPUClass *cc = CPU_GET_CLASS(cpu);
228 #endif
229     int ret, interrupt_request;
230     TranslationBlock *tb;
231     uint8_t *tc_ptr;
232     tcg_target_ulong next_tb;
233
234     if (cpu->halted) {
235         if (!cpu_has_work(cpu)) {
236             return EXCP_HALTED;
237         }
238
239         cpu->halted = 0;
240     }
241
242     current_cpu = cpu;
243
244     /* As long as current_cpu is null, up to the assignment just above,
245      * requests by other threads to exit the execution loop are expected to
246      * be issued using the exit_request global. We must make sure that our
247      * evaluation of the global value is performed past the current_cpu
248      * value transition point, which requires a memory barrier as well as
249      * an instruction scheduling constraint on modern architectures.  */
250     smp_mb();
251
252     if (unlikely(exit_request)) {
253         cpu->exit_request = 1;
254     }
255
256 #if defined(TARGET_I386)
257     /* put eflags in CPU temporary format */
258     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
259     env->df = 1 - (2 * ((env->eflags >> 10) & 1));
260     CC_OP = CC_OP_EFLAGS;
261     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
262 #elif defined(TARGET_SPARC)
263 #elif defined(TARGET_M68K)
264     env->cc_op = CC_OP_FLAGS;
265     env->cc_dest = env->sr & 0xf;
266     env->cc_x = (env->sr >> 4) & 1;
267 #elif defined(TARGET_ALPHA)
268 #elif defined(TARGET_ARM)
269 #elif defined(TARGET_UNICORE32)
270 #elif defined(TARGET_PPC)
271     env->reserve_addr = -1;
272 #elif defined(TARGET_LM32)
273 #elif defined(TARGET_MICROBLAZE)
274 #elif defined(TARGET_MIPS)
275 #elif defined(TARGET_MOXIE)
276 #elif defined(TARGET_OPENRISC)
277 #elif defined(TARGET_SH4)
278 #elif defined(TARGET_CRIS)
279 #elif defined(TARGET_S390X)
280 #elif defined(TARGET_XTENSA)
281     /* XXXXX */
282 #else
283 #error unsupported target CPU
284 #endif
285     env->exception_index = -1;
286
287     /* prepare setjmp context for exception handling */
288     for(;;) {
289         if (sigsetjmp(env->jmp_env, 0) == 0) {
290             /* if an exception is pending, we execute it here */
291             if (env->exception_index >= 0) {
292                 if (env->exception_index >= EXCP_INTERRUPT) {
293                     /* exit request from the cpu execution loop */
294                     ret = env->exception_index;
295                     if (ret == EXCP_DEBUG) {
296                         cpu_handle_debug_exception(env);
297                     }
298                     break;
299                 } else {
300 #if defined(CONFIG_USER_ONLY)
301                     /* if user mode only, we simulate a fake exception
302                        which will be handled outside the cpu execution
303                        loop */
304 #if defined(TARGET_I386)
305                     cc->do_interrupt(cpu);
306 #endif
307                     ret = env->exception_index;
308                     break;
309 #else
310                     cc->do_interrupt(cpu);
311                     env->exception_index = -1;
312 #endif
313                 }
314             }
315
316 #ifdef CONFIG_HAX
317             if (hax_enabled() && !hax_vcpu_exec(env))
318                 longjmp(env->jmp_env, 1);
319 #endif
320
321             next_tb = 0; /* force lookup of first TB */
322             for(;;) {
323                 interrupt_request = need_handle_intr_request(cpu);
324                 if (unlikely(interrupt_request)) {
325                     if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
326                         /* Mask out external interrupts for this step. */
327                         interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
328                     }
329                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
330                         cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
331                         env->exception_index = EXCP_DEBUG;
332                         cpu_loop_exit(env);
333                     }
334 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
335     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
336     defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
337                     if (interrupt_request & CPU_INTERRUPT_HALT) {
338                         cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
339                         cpu->halted = 1;
340                         env->exception_index = EXCP_HLT;
341                         cpu_loop_exit(env);
342                     }
343 #endif
344 #if defined(TARGET_I386)
345 #if !defined(CONFIG_USER_ONLY)
346                     if (interrupt_request & CPU_INTERRUPT_POLL) {
347                         cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
348                         apic_poll_irq(env->apic_state);
349                     }
350 #endif
351                     if (interrupt_request & CPU_INTERRUPT_INIT) {
352                             cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
353                                                           0);
354                             do_cpu_init(x86_env_get_cpu(env));
355                             env->exception_index = EXCP_HALTED;
356                             cpu_loop_exit(env);
357                     } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
358                             do_cpu_sipi(x86_env_get_cpu(env));
359
360                     } else if (env->hflags2 & HF2_GIF_MASK) {
361                         if ((interrupt_request & CPU_INTERRUPT_SMI) &&
362                             !(env->hflags & HF_SMM_MASK)) {
363                             cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
364                                                           0);
365                             cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
366 #ifdef CONFIG_HAX
367                             if (hax_enabled())
368                                 env->hax_vcpu->resync = 1;
369 #endif
370                             do_smm_enter(x86_env_get_cpu(env));
371                             next_tb = 0;
372                         } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
373                                    !(env->hflags2 & HF2_NMI_MASK)) {
374                             cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
375                             env->hflags2 |= HF2_NMI_MASK;
376                             do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
377                             next_tb = 0;
378                         } else if (interrupt_request & CPU_INTERRUPT_MCE) {
379                             cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
380                             do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
381                             next_tb = 0;
382                         } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
383                                    (((env->hflags2 & HF2_VINTR_MASK) && 
384                                      (env->hflags2 & HF2_HIF_MASK)) ||
385                                     (!(env->hflags2 & HF2_VINTR_MASK) && 
386                                      (env->eflags & IF_MASK && 
387                                       !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
388                             int intno;
389                             cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
390                                                           0);
391                             cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
392                                                         CPU_INTERRUPT_VIRQ);
393                             intno = cpu_get_pic_interrupt(env);
394                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
395                             do_interrupt_x86_hardirq(env, intno, 1);
396                             /* ensure that no TB jump will be modified as
397                                the program flow was changed */
398                             next_tb = 0;
399 #if !defined(CONFIG_USER_ONLY)
400                         } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
401                                    (env->eflags & IF_MASK) && 
402                                    !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
403                             int intno;
404                             /* FIXME: this should respect TPR */
405                             cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
406                                                           0);
407                             intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
408                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
409                             do_interrupt_x86_hardirq(env, intno, 1);
410                             cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
411                             next_tb = 0;
412 #endif
413                         }
414                     }
415 #elif defined(TARGET_PPC)
416                     if ((interrupt_request & CPU_INTERRUPT_RESET)) {
417                         cpu_reset(cpu);
418                     }
419                     if (interrupt_request & CPU_INTERRUPT_HARD) {
420                         ppc_hw_interrupt(env);
421                         if (env->pending_interrupts == 0) {
422                             cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
423                         }
424                         next_tb = 0;
425                     }
426 #elif defined(TARGET_LM32)
427                     if ((interrupt_request & CPU_INTERRUPT_HARD)
428                         && (env->ie & IE_IE)) {
429                         env->exception_index = EXCP_IRQ;
430                         cc->do_interrupt(cpu);
431                         next_tb = 0;
432                     }
433 #elif defined(TARGET_MICROBLAZE)
434                     if ((interrupt_request & CPU_INTERRUPT_HARD)
435                         && (env->sregs[SR_MSR] & MSR_IE)
436                         && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
437                         && !(env->iflags & (D_FLAG | IMM_FLAG))) {
438                         env->exception_index = EXCP_IRQ;
439                         cc->do_interrupt(cpu);
440                         next_tb = 0;
441                     }
442 #elif defined(TARGET_MIPS)
443                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
444                         cpu_mips_hw_interrupts_pending(env)) {
445                         /* Raise it */
446                         env->exception_index = EXCP_EXT_INTERRUPT;
447                         env->error_code = 0;
448                         cc->do_interrupt(cpu);
449                         next_tb = 0;
450                     }
451 #elif defined(TARGET_OPENRISC)
452                     {
453                         int idx = -1;
454                         if ((interrupt_request & CPU_INTERRUPT_HARD)
455                             && (env->sr & SR_IEE)) {
456                             idx = EXCP_INT;
457                         }
458                         if ((interrupt_request & CPU_INTERRUPT_TIMER)
459                             && (env->sr & SR_TEE)) {
460                             idx = EXCP_TICK;
461                         }
462                         if (idx >= 0) {
463                             env->exception_index = idx;
464                             cc->do_interrupt(cpu);
465                             next_tb = 0;
466                         }
467                     }
468 #elif defined(TARGET_SPARC)
469                     if (interrupt_request & CPU_INTERRUPT_HARD) {
470                         if (cpu_interrupts_enabled(env) &&
471                             env->interrupt_index > 0) {
472                             int pil = env->interrupt_index & 0xf;
473                             int type = env->interrupt_index & 0xf0;
474
475                             if (((type == TT_EXTINT) &&
476                                   cpu_pil_allowed(env, pil)) ||
477                                   type != TT_EXTINT) {
478                                 env->exception_index = env->interrupt_index;
479                                 cc->do_interrupt(cpu);
480                                 next_tb = 0;
481                             }
482                         }
483                     }
484 #elif defined(TARGET_ARM)
485                     if (interrupt_request & CPU_INTERRUPT_FIQ
486                         && !(env->uncached_cpsr & CPSR_F)) {
487                         env->exception_index = EXCP_FIQ;
488                         cc->do_interrupt(cpu);
489                         next_tb = 0;
490                     }
491                     /* ARMv7-M interrupt return works by loading a magic value
492                        into the PC.  On real hardware the load causes the
493                        return to occur.  The qemu implementation performs the
494                        jump normally, then does the exception return when the
495                        CPU tries to execute code at the magic address.
496                        This will cause the magic PC value to be pushed to
497                        the stack if an interrupt occurred at the wrong time.
498                        We avoid this by disabling interrupts when
499                        pc contains a magic address.  */
500                     if (interrupt_request & CPU_INTERRUPT_HARD
501                         && ((IS_M(env) && env->regs[15] < 0xfffffff0)
502                             || !(env->uncached_cpsr & CPSR_I))) {
503                         env->exception_index = EXCP_IRQ;
504                         cc->do_interrupt(cpu);
505                         next_tb = 0;
506                     }
507 #elif defined(TARGET_UNICORE32)
508                     if (interrupt_request & CPU_INTERRUPT_HARD
509                         && !(env->uncached_asr & ASR_I)) {
510                         env->exception_index = UC32_EXCP_INTR;
511                         cc->do_interrupt(cpu);
512                         next_tb = 0;
513                     }
514 #elif defined(TARGET_SH4)
515                     if (interrupt_request & CPU_INTERRUPT_HARD) {
516                         cc->do_interrupt(cpu);
517                         next_tb = 0;
518                     }
519 #elif defined(TARGET_ALPHA)
520                     {
521                         int idx = -1;
522                         /* ??? This hard-codes the OSF/1 interrupt levels.  */
523                         switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
524                         case 0 ... 3:
525                             if (interrupt_request & CPU_INTERRUPT_HARD) {
526                                 idx = EXCP_DEV_INTERRUPT;
527                             }
528                             /* FALLTHRU */
529                         case 4:
530                             if (interrupt_request & CPU_INTERRUPT_TIMER) {
531                                 idx = EXCP_CLK_INTERRUPT;
532                             }
533                             /* FALLTHRU */
534                         case 5:
535                             if (interrupt_request & CPU_INTERRUPT_SMP) {
536                                 idx = EXCP_SMP_INTERRUPT;
537                             }
538                             /* FALLTHRU */
539                         case 6:
540                             if (interrupt_request & CPU_INTERRUPT_MCHK) {
541                                 idx = EXCP_MCHK;
542                             }
543                         }
544                         if (idx >= 0) {
545                             env->exception_index = idx;
546                             env->error_code = 0;
547                             cc->do_interrupt(cpu);
548                             next_tb = 0;
549                         }
550                     }
551 #elif defined(TARGET_CRIS)
552                     if (interrupt_request & CPU_INTERRUPT_HARD
553                         && (env->pregs[PR_CCS] & I_FLAG)
554                         && !env->locked_irq) {
555                         env->exception_index = EXCP_IRQ;
556                         cc->do_interrupt(cpu);
557                         next_tb = 0;
558                     }
559                     if (interrupt_request & CPU_INTERRUPT_NMI) {
560                         unsigned int m_flag_archval;
561                         if (env->pregs[PR_VR] < 32) {
562                             m_flag_archval = M_FLAG_V10;
563                         } else {
564                             m_flag_archval = M_FLAG_V32;
565                         }
566                         if ((env->pregs[PR_CCS] & m_flag_archval)) {
567                             env->exception_index = EXCP_NMI;
568                             cc->do_interrupt(cpu);
569                             next_tb = 0;
570                         }
571                     }
572 #elif defined(TARGET_M68K)
573                     if (interrupt_request & CPU_INTERRUPT_HARD
574                         && ((env->sr & SR_I) >> SR_I_SHIFT)
575                             < env->pending_level) {
576                         /* Real hardware gets the interrupt vector via an
577                            IACK cycle at this point.  Current emulated
578                            hardware doesn't rely on this, so we
579                            provide/save the vector when the interrupt is
580                            first signalled.  */
581                         env->exception_index = env->pending_vector;
582                         do_interrupt_m68k_hardirq(env);
583                         next_tb = 0;
584                     }
585 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
586                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
587                         (env->psw.mask & PSW_MASK_EXT)) {
588                         cc->do_interrupt(cpu);
589                         next_tb = 0;
590                     }
591 #elif defined(TARGET_XTENSA)
592                     if (interrupt_request & CPU_INTERRUPT_HARD) {
593                         env->exception_index = EXC_IRQ;
594                         cc->do_interrupt(cpu);
595                         next_tb = 0;
596                     }
597 #endif
598                    /* Don't use the cached interrupt_request value,
599                       do_interrupt may have updated the EXITTB flag. */
600                     if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
601                         cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
602                         /* ensure that no TB jump will be modified as
603                            the program flow was changed */
604                         next_tb = 0;
605                     }
606                 }
607                 if (unlikely(cpu->exit_request)) {
608                     cpu->exit_request = 0;
609                     env->exception_index = EXCP_INTERRUPT;
610                     cpu_loop_exit(env);
611                 }
612 #if defined(DEBUG_DISAS)
613                 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
614                     /* restore flags in standard format */
615 #if defined(TARGET_I386)
616                     log_cpu_state(cpu, CPU_DUMP_CCOP);
617 #elif defined(TARGET_M68K)
618                     cpu_m68k_flush_flags(env, env->cc_op);
619                     env->cc_op = CC_OP_FLAGS;
620                     env->sr = (env->sr & 0xffe0)
621                               | env->cc_dest | (env->cc_x << 4);
622                     log_cpu_state(cpu, 0);
623 #else
624                     log_cpu_state(cpu, 0);
625 #endif
626                 }
627 #endif /* DEBUG_DISAS */
628                 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
629                 tb = tb_find_fast(env);
630                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
631                    doing it in tb_find_slow */
632                 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
633                     /* as some TB could have been invalidated because
634                        of memory exceptions while generating the code, we
635                        must recompute the hash index here */
636                     next_tb = 0;
637                     tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
638                 }
639                 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
640                     qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
641                              tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
642                 }
643                 /* see if we can patch the calling TB. When the TB
644                    spans two pages, we cannot safely do a direct
645                    jump. */
646                 if (next_tb != 0 && tb->page_addr[1] == -1) {
647                     tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
648                                 next_tb & TB_EXIT_MASK, tb);
649                 }
650                 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
651
652                 /* cpu_interrupt might be called while translating the
653                    TB, but before it is linked into a potentially
654                    infinite loop and becomes env->current_tb. Avoid
655                    starting execution if there is a pending interrupt. */
656                 cpu->current_tb = tb;
657                 barrier();
658                 if (likely(!cpu->exit_request)) {
659                     tc_ptr = tb->tc_ptr;
660                     /* execute the generated code */
661                     next_tb = cpu_tb_exec(cpu, tc_ptr);
662                     switch (next_tb & TB_EXIT_MASK) {
663                     case TB_EXIT_REQUESTED:
664                         /* Something asked us to stop executing
665                          * chained TBs; just continue round the main
666                          * loop. Whatever requested the exit will also
667                          * have set something else (eg exit_request or
668                          * interrupt_request) which we will handle
669                          * next time around the loop.
670                          */
671                         tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
672                         next_tb = 0;
673                         break;
674                     case TB_EXIT_ICOUNT_EXPIRED:
675                     {
676                         /* Instruction counter expired.  */
677                         int insns_left;
678                         tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
679                         insns_left = env->icount_decr.u32;
680                         if (env->icount_extra && insns_left >= 0) {
681                             /* Refill decrementer and continue execution.  */
682                             env->icount_extra += insns_left;
683                             if (env->icount_extra > 0xffff) {
684                                 insns_left = 0xffff;
685                             } else {
686                                 insns_left = env->icount_extra;
687                             }
688                             env->icount_extra -= insns_left;
689                             env->icount_decr.u16.low = insns_left;
690                         } else {
691                             if (insns_left > 0) {
692                                 /* Execute remaining instructions.  */
693                                 cpu_exec_nocache(env, insns_left, tb);
694                             }
695                             env->exception_index = EXCP_INTERRUPT;
696                             next_tb = 0;
697                             cpu_loop_exit(env);
698                         }
699                         break;
700                     }
701                     default:
702                         break;
703                     }
704                 }
705                 cpu->current_tb = NULL;
706 #ifdef CONFIG_HAX
707                 if (hax_enabled() && hax_stop_emulation(env))
708                     cpu_loop_exit(env);
709 #endif
710                 /* reset soft MMU for next block (it can currently
711                    only be set by a memory fault) */
712             } /* for(;;) */
713         } else {
714             /* Reload env after longjmp - the compiler may have smashed all
715              * local variables as longjmp is marked 'noreturn'. */
716             cpu = current_cpu;
717             env = cpu->env_ptr;
718         }
719     } /* for(;;) */
720
721
722 #if defined(TARGET_I386)
723     /* restore flags in standard format */
724     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
725         | (env->df & DF_MASK);
726 #elif defined(TARGET_ARM)
727     /* XXX: Save/restore host fpu exception state?.  */
728 #elif defined(TARGET_UNICORE32)
729 #elif defined(TARGET_SPARC)
730 #elif defined(TARGET_PPC)
731 #elif defined(TARGET_LM32)
732 #elif defined(TARGET_M68K)
733     cpu_m68k_flush_flags(env, env->cc_op);
734     env->cc_op = CC_OP_FLAGS;
735     env->sr = (env->sr & 0xffe0)
736               | env->cc_dest | (env->cc_x << 4);
737 #elif defined(TARGET_MICROBLAZE)
738 #elif defined(TARGET_MIPS)
739 #elif defined(TARGET_MOXIE)
740 #elif defined(TARGET_OPENRISC)
741 #elif defined(TARGET_SH4)
742 #elif defined(TARGET_ALPHA)
743 #elif defined(TARGET_CRIS)
744 #elif defined(TARGET_S390X)
745 #elif defined(TARGET_XTENSA)
746     /* XXXXX */
747 #else
748 #error unsupported target CPU
749 #endif
750
751     /* fail safe : never use current_cpu outside cpu_exec() */
752     current_cpu = NULL;
753     return ret;
754 }