sync with latest
[sdk/emulator/qemu.git] / cpu-exec.c
1 /*
2  *  emulator main execution loop
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas.h"
22 #include "tcg.h"
23 #include "hax.h"
24 #include "qemu-barrier.h"
25 #include "qtest.h"
26
27 int tb_invalidated_flag;
28
29 //#define CONFIG_DEBUG_EXEC
30
31 bool qemu_cpu_has_work(CPUArchState *env)
32 {
33     return cpu_has_work(env);
34 }
35
36 void cpu_loop_exit(CPUArchState *env)
37 {
38     env->current_tb = NULL;
39     longjmp(env->jmp_env, 1);
40 }
41
42 /* exit the current TB from a signal handler. The host registers are
43    restored in a state compatible with the CPU emulator
44  */
45 #if defined(CONFIG_SOFTMMU)
46 void cpu_resume_from_signal(CPUArchState *env, void *puc)
47 {
48     /* XXX: restore cpu registers saved in host registers */
49
50     env->exception_index = -1;
51     longjmp(env->jmp_env, 1);
52 }
53 #endif
54
55 /* Execute the code without caching the generated code. An interpreter
56    could be used if available. */
57 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
58                              TranslationBlock *orig_tb)
59 {
60     tcg_target_ulong next_tb;
61     TranslationBlock *tb;
62
63     /* Should never happen.
64        We only end up here when an existing TB is too long.  */
65     if (max_cycles > CF_COUNT_MASK)
66         max_cycles = CF_COUNT_MASK;
67
68     tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
69                      max_cycles);
70     env->current_tb = tb;
71     /* execute the generated code */
72     next_tb = tcg_qemu_tb_exec(env, tb->tc_ptr);
73     env->current_tb = NULL;
74
75     if ((next_tb & 3) == 2) {
76         /* Restore PC.  This may happen if async event occurs before
77            the TB starts executing.  */
78         cpu_pc_from_tb(env, tb);
79     }
80     tb_phys_invalidate(tb, -1);
81     tb_free(tb);
82 }
83
84 static TranslationBlock *tb_find_slow(CPUArchState *env,
85                                       target_ulong pc,
86                                       target_ulong cs_base,
87                                       uint64_t flags)
88 {
89     TranslationBlock *tb, **ptb1;
90     unsigned int h;
91     tb_page_addr_t phys_pc, phys_page1;
92     target_ulong virt_page2;
93
94     tb_invalidated_flag = 0;
95
96     /* find translated block using physical mappings */
97     phys_pc = get_page_addr_code(env, pc);
98     phys_page1 = phys_pc & TARGET_PAGE_MASK;
99     h = tb_phys_hash_func(phys_pc);
100     ptb1 = &tb_phys_hash[h];
101     for(;;) {
102         tb = *ptb1;
103         if (!tb)
104             goto not_found;
105         if (tb->pc == pc &&
106             tb->page_addr[0] == phys_page1 &&
107             tb->cs_base == cs_base &&
108             tb->flags == flags) {
109             /* check next page if needed */
110             if (tb->page_addr[1] != -1) {
111                 tb_page_addr_t phys_page2;
112
113                 virt_page2 = (pc & TARGET_PAGE_MASK) +
114                     TARGET_PAGE_SIZE;
115                 phys_page2 = get_page_addr_code(env, virt_page2);
116                 if (tb->page_addr[1] == phys_page2)
117                     goto found;
118             } else {
119                 goto found;
120             }
121         }
122         ptb1 = &tb->phys_hash_next;
123     }
124  not_found:
125    /* if no translated code available, then translate it now */
126     tb = tb_gen_code(env, pc, cs_base, flags, 0);
127
128  found:
129     /* Move the last found TB to the head of the list */
130     if (likely(*ptb1)) {
131         *ptb1 = tb->phys_hash_next;
132         tb->phys_hash_next = tb_phys_hash[h];
133         tb_phys_hash[h] = tb;
134     }
135     /* we add the TB in the virtual pc hash table */
136     env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
137     return tb;
138 }
139
140 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
141 {
142     TranslationBlock *tb;
143     target_ulong cs_base, pc;
144     int flags;
145
146     /* we record a subset of the CPU state. It will
147        always be the same before a given translated block
148        is executed. */
149     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
150     tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
151     if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
152                  tb->flags != flags)) {
153         tb = tb_find_slow(env, pc, cs_base, flags);
154     }
155     return tb;
156 }
157
158 static CPUDebugExcpHandler *debug_excp_handler;
159
160 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
161 {
162     debug_excp_handler = handler;
163 }
164
165 static void cpu_handle_debug_exception(CPUArchState *env)
166 {
167     CPUWatchpoint *wp;
168
169     if (!env->watchpoint_hit) {
170         QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
171             wp->flags &= ~BP_WATCHPOINT_HIT;
172         }
173     }
174     if (debug_excp_handler) {
175         debug_excp_handler(env);
176     }
177 }
178
179 /* main execution loop */
180
181 volatile sig_atomic_t exit_request;
182
183 /*
184  * QEMU emulate can happens because of MMIO or emulation mode, i.e. non-PG mode,
185  * when it's because of MMIO, the MMIO, the interrupt should not be emulated,
186  * because MMIO is emulated for only one instruction now and then back to
187  * HAX kernel
188  */
189 int need_handle_intr_request(CPUArchState *env)
190 {
191 #ifdef CONFIG_HAX
192     if (!hax_enabled() || hax_vcpu_emulation_mode(env))
193         return env->interrupt_request;
194     return 0;
195 #else
196     return env->interrupt_request;
197 #endif
198 }
199
200
201 int cpu_exec(CPUArchState *env)
202 {
203 #ifdef TARGET_PPC
204     CPUState *cpu = ENV_GET_CPU(env);
205 #endif
206     int ret, interrupt_request;
207     TranslationBlock *tb;
208     uint8_t *tc_ptr;
209     tcg_target_ulong next_tb;
210
211     if (env->halted) {
212         if (!cpu_has_work(env)) {
213             return EXCP_HALTED;
214         }
215
216         env->halted = 0;
217     }
218
219     cpu_single_env = env;
220
221     if (unlikely(exit_request)) {
222         env->exit_request = 1;
223     }
224
225 #if defined(TARGET_I386)
226     /* put eflags in CPU temporary format */
227     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
228     DF = 1 - (2 * ((env->eflags >> 10) & 1));
229     CC_OP = CC_OP_EFLAGS;
230     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
231 #elif defined(TARGET_SPARC)
232 #elif defined(TARGET_M68K)
233     env->cc_op = CC_OP_FLAGS;
234     env->cc_dest = env->sr & 0xf;
235     env->cc_x = (env->sr >> 4) & 1;
236 #elif defined(TARGET_ALPHA)
237 #elif defined(TARGET_ARM)
238 #elif defined(TARGET_UNICORE32)
239 #elif defined(TARGET_PPC)
240     env->reserve_addr = -1;
241 #elif defined(TARGET_LM32)
242 #elif defined(TARGET_MICROBLAZE)
243 #elif defined(TARGET_MIPS)
244 #elif defined(TARGET_OPENRISC)
245 #elif defined(TARGET_SH4)
246 #elif defined(TARGET_CRIS)
247 #elif defined(TARGET_S390X)
248 #elif defined(TARGET_XTENSA)
249     /* XXXXX */
250 #else
251 #error unsupported target CPU
252 #endif
253     env->exception_index = -1;
254
255     /* prepare setjmp context for exception handling */
256     for(;;) {
257         if (setjmp(env->jmp_env) == 0) {
258             /* if an exception is pending, we execute it here */
259             if (env->exception_index >= 0) {
260                 if (env->exception_index >= EXCP_INTERRUPT) {
261                     /* exit request from the cpu execution loop */
262                     ret = env->exception_index;
263                     if (ret == EXCP_DEBUG) {
264                         cpu_handle_debug_exception(env);
265                     }
266                     break;
267                 } else {
268 #if defined(CONFIG_USER_ONLY)
269                     /* if user mode only, we simulate a fake exception
270                        which will be handled outside the cpu execution
271                        loop */
272 #if defined(TARGET_I386)
273                     do_interrupt(env);
274 #endif
275                     ret = env->exception_index;
276                     break;
277 #else
278                     do_interrupt(env);
279                     env->exception_index = -1;
280 #endif
281                 }
282             }
283
284 #ifdef CONFIG_HAX
285             if (hax_enabled() && !hax_vcpu_exec(env))
286                 longjmp(env->jmp_env, 1);
287 #endif
288
289             next_tb = 0; /* force lookup of first TB */
290             for(;;) {
291                 interrupt_request = env->interrupt_request;
292                 if (unlikely(interrupt_request)) {
293                     if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
294                         /* Mask out external interrupts for this step. */
295                         interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
296                     }
297                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
298                         env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
299                         env->exception_index = EXCP_DEBUG;
300                         cpu_loop_exit(env);
301                     }
302 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
303     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
304     defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
305                     if (interrupt_request & CPU_INTERRUPT_HALT) {
306                         env->interrupt_request &= ~CPU_INTERRUPT_HALT;
307                         env->halted = 1;
308                         env->exception_index = EXCP_HLT;
309                         cpu_loop_exit(env);
310                     }
311 #endif
312 #if defined(TARGET_I386)
313 #if !defined(CONFIG_USER_ONLY)
314                     if (interrupt_request & CPU_INTERRUPT_POLL) {
315                         env->interrupt_request &= ~CPU_INTERRUPT_POLL;
316                         apic_poll_irq(env->apic_state);
317                     }
318 #endif
319                     if (interrupt_request & CPU_INTERRUPT_INIT) {
320                             cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
321                                                           0);
322                             do_cpu_init(x86_env_get_cpu(env));
323                             env->exception_index = EXCP_HALTED;
324                             cpu_loop_exit(env);
325                     } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
326                             do_cpu_sipi(x86_env_get_cpu(env));
327
328                     } else if (env->hflags2 & HF2_GIF_MASK) {
329                         if ((interrupt_request & CPU_INTERRUPT_SMI) &&
330                             !(env->hflags & HF_SMM_MASK)) {
331                             cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
332                                                           0);
333                             env->interrupt_request &= ~CPU_INTERRUPT_SMI;
334 #ifdef CONFIG_HAX
335                             if (hax_enabled())
336                                 env->hax_vcpu->resync = 1;
337 #endif
338                             do_smm_enter(env);
339                             next_tb = 0;
340                         } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
341                                   !(env->hflags2 & HF2_NMI_MASK)) {
342                             env->interrupt_request &= ~CPU_INTERRUPT_NMI;
343                             env->hflags2 |= HF2_NMI_MASK;
344                             do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
345                             next_tb = 0;
346                         } else if (interrupt_request & CPU_INTERRUPT_MCE) {
347                             env->interrupt_request &= ~CPU_INTERRUPT_MCE;
348                             do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
349                             next_tb = 0;
350                         } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
351                                    (((env->hflags2 & HF2_VINTR_MASK) && 
352                                      (env->hflags2 & HF2_HIF_MASK)) ||
353                                     (!(env->hflags2 & HF2_VINTR_MASK) && 
354                                      (env->eflags & IF_MASK && 
355                                       !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
356                             int intno;
357                             cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
358                                                           0);
359                             env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
360                             intno = cpu_get_pic_interrupt(env);
361                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
362                             do_interrupt_x86_hardirq(env, intno, 1);
363                             /* ensure that no TB jump will be modified as
364                                the program flow was changed */
365                             next_tb = 0;
366 #if !defined(CONFIG_USER_ONLY)
367                         } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
368                                    (env->eflags & IF_MASK) && 
369                                    !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
370                             int intno;
371                             /* FIXME: this should respect TPR */
372                             cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
373                                                           0);
374                             intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
375                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
376                             do_interrupt_x86_hardirq(env, intno, 1);
377                             env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
378                             next_tb = 0;
379 #endif
380                         }
381                     }
382 #elif defined(TARGET_PPC)
383                     if ((interrupt_request & CPU_INTERRUPT_RESET)) {
384                         cpu_reset(cpu);
385                     }
386                     if (interrupt_request & CPU_INTERRUPT_HARD) {
387                         ppc_hw_interrupt(env);
388                         if (env->pending_interrupts == 0)
389                             env->interrupt_request &= ~CPU_INTERRUPT_HARD;
390                         next_tb = 0;
391                     }
392 #elif defined(TARGET_LM32)
393                     if ((interrupt_request & CPU_INTERRUPT_HARD)
394                         && (env->ie & IE_IE)) {
395                         env->exception_index = EXCP_IRQ;
396                         do_interrupt(env);
397                         next_tb = 0;
398                     }
399 #elif defined(TARGET_MICROBLAZE)
400                     if ((interrupt_request & CPU_INTERRUPT_HARD)
401                         && (env->sregs[SR_MSR] & MSR_IE)
402                         && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
403                         && !(env->iflags & (D_FLAG | IMM_FLAG))) {
404                         env->exception_index = EXCP_IRQ;
405                         do_interrupt(env);
406                         next_tb = 0;
407                     }
408 #elif defined(TARGET_MIPS)
409                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
410                         cpu_mips_hw_interrupts_pending(env)) {
411                         /* Raise it */
412                         env->exception_index = EXCP_EXT_INTERRUPT;
413                         env->error_code = 0;
414                         do_interrupt(env);
415                         next_tb = 0;
416                     }
417 #elif defined(TARGET_OPENRISC)
418                     {
419                         int idx = -1;
420                         if ((interrupt_request & CPU_INTERRUPT_HARD)
421                             && (env->sr & SR_IEE)) {
422                             idx = EXCP_INT;
423                         }
424                         if ((interrupt_request & CPU_INTERRUPT_TIMER)
425                             && (env->sr & SR_TEE)) {
426                             idx = EXCP_TICK;
427                         }
428                         if (idx >= 0) {
429                             env->exception_index = idx;
430                             do_interrupt(env);
431                             next_tb = 0;
432                         }
433                     }
434 #elif defined(TARGET_SPARC)
435                     if (interrupt_request & CPU_INTERRUPT_HARD) {
436                         if (cpu_interrupts_enabled(env) &&
437                             env->interrupt_index > 0) {
438                             int pil = env->interrupt_index & 0xf;
439                             int type = env->interrupt_index & 0xf0;
440
441                             if (((type == TT_EXTINT) &&
442                                   cpu_pil_allowed(env, pil)) ||
443                                   type != TT_EXTINT) {
444                                 env->exception_index = env->interrupt_index;
445                                 do_interrupt(env);
446                                 next_tb = 0;
447                             }
448                         }
449                     }
450 #elif defined(TARGET_ARM)
451                     if (interrupt_request & CPU_INTERRUPT_FIQ
452                         && !(env->uncached_cpsr & CPSR_F)) {
453                         env->exception_index = EXCP_FIQ;
454                         do_interrupt(env);
455                         next_tb = 0;
456                     }
457                     /* ARMv7-M interrupt return works by loading a magic value
458                        into the PC.  On real hardware the load causes the
459                        return to occur.  The qemu implementation performs the
460                        jump normally, then does the exception return when the
461                        CPU tries to execute code at the magic address.
462                        This will cause the magic PC value to be pushed to
463                        the stack if an interrupt occurred at the wrong time.
464                        We avoid this by disabling interrupts when
465                        pc contains a magic address.  */
466                     if (interrupt_request & CPU_INTERRUPT_HARD
467                         && ((IS_M(env) && env->regs[15] < 0xfffffff0)
468                             || !(env->uncached_cpsr & CPSR_I))) {
469                         env->exception_index = EXCP_IRQ;
470                         do_interrupt(env);
471                         next_tb = 0;
472                     }
473 #elif defined(TARGET_UNICORE32)
474                     if (interrupt_request & CPU_INTERRUPT_HARD
475                         && !(env->uncached_asr & ASR_I)) {
476                         env->exception_index = UC32_EXCP_INTR;
477                         do_interrupt(env);
478                         next_tb = 0;
479                     }
480 #elif defined(TARGET_SH4)
481                     if (interrupt_request & CPU_INTERRUPT_HARD) {
482                         do_interrupt(env);
483                         next_tb = 0;
484                     }
485 #elif defined(TARGET_ALPHA)
486                     {
487                         int idx = -1;
488                         /* ??? This hard-codes the OSF/1 interrupt levels.  */
489                         switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
490                         case 0 ... 3:
491                             if (interrupt_request & CPU_INTERRUPT_HARD) {
492                                 idx = EXCP_DEV_INTERRUPT;
493                             }
494                             /* FALLTHRU */
495                         case 4:
496                             if (interrupt_request & CPU_INTERRUPT_TIMER) {
497                                 idx = EXCP_CLK_INTERRUPT;
498                             }
499                             /* FALLTHRU */
500                         case 5:
501                             if (interrupt_request & CPU_INTERRUPT_SMP) {
502                                 idx = EXCP_SMP_INTERRUPT;
503                             }
504                             /* FALLTHRU */
505                         case 6:
506                             if (interrupt_request & CPU_INTERRUPT_MCHK) {
507                                 idx = EXCP_MCHK;
508                             }
509                         }
510                         if (idx >= 0) {
511                             env->exception_index = idx;
512                             env->error_code = 0;
513                             do_interrupt(env);
514                             next_tb = 0;
515                         }
516                     }
517 #elif defined(TARGET_CRIS)
518                     if (interrupt_request & CPU_INTERRUPT_HARD
519                         && (env->pregs[PR_CCS] & I_FLAG)
520                         && !env->locked_irq) {
521                         env->exception_index = EXCP_IRQ;
522                         do_interrupt(env);
523                         next_tb = 0;
524                     }
525                     if (interrupt_request & CPU_INTERRUPT_NMI) {
526                         unsigned int m_flag_archval;
527                         if (env->pregs[PR_VR] < 32) {
528                             m_flag_archval = M_FLAG_V10;
529                         } else {
530                             m_flag_archval = M_FLAG_V32;
531                         }
532                         if ((env->pregs[PR_CCS] & m_flag_archval)) {
533                             env->exception_index = EXCP_NMI;
534                             do_interrupt(env);
535                             next_tb = 0;
536                         }
537                     }
538 #elif defined(TARGET_M68K)
539                     if (interrupt_request & CPU_INTERRUPT_HARD
540                         && ((env->sr & SR_I) >> SR_I_SHIFT)
541                             < env->pending_level) {
542                         /* Real hardware gets the interrupt vector via an
543                            IACK cycle at this point.  Current emulated
544                            hardware doesn't rely on this, so we
545                            provide/save the vector when the interrupt is
546                            first signalled.  */
547                         env->exception_index = env->pending_vector;
548                         do_interrupt_m68k_hardirq(env);
549                         next_tb = 0;
550                     }
551 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
552                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
553                         (env->psw.mask & PSW_MASK_EXT)) {
554                         do_interrupt(env);
555                         next_tb = 0;
556                     }
557 #elif defined(TARGET_XTENSA)
558                     if (interrupt_request & CPU_INTERRUPT_HARD) {
559                         env->exception_index = EXC_IRQ;
560                         do_interrupt(env);
561                         next_tb = 0;
562                     }
563 #endif
564                    /* Don't use the cached interrupt_request value,
565                       do_interrupt may have updated the EXITTB flag. */
566                     if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
567                         env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
568                         /* ensure that no TB jump will be modified as
569                            the program flow was changed */
570                         next_tb = 0;
571                     }
572                 }
573                 if (unlikely(env->exit_request)) {
574                     env->exit_request = 0;
575                     env->exception_index = EXCP_INTERRUPT;
576                     cpu_loop_exit(env);
577                 }
578 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
579                 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
580                     /* restore flags in standard format */
581 #if defined(TARGET_I386)
582                     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
583                         | (DF & DF_MASK);
584                     log_cpu_state(env, X86_DUMP_CCOP);
585                     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
586 #elif defined(TARGET_M68K)
587                     cpu_m68k_flush_flags(env, env->cc_op);
588                     env->cc_op = CC_OP_FLAGS;
589                     env->sr = (env->sr & 0xffe0)
590                               | env->cc_dest | (env->cc_x << 4);
591                     log_cpu_state(env, 0);
592 #else
593                     log_cpu_state(env, 0);
594 #endif
595                 }
596 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
597                 spin_lock(&tb_lock);
598                 tb = tb_find_fast(env);
599                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
600                    doing it in tb_find_slow */
601                 if (tb_invalidated_flag) {
602                     /* as some TB could have been invalidated because
603                        of memory exceptions while generating the code, we
604                        must recompute the hash index here */
605                     next_tb = 0;
606                     tb_invalidated_flag = 0;
607                 }
608 #ifdef CONFIG_DEBUG_EXEC
609                 qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
610                              tb->tc_ptr, tb->pc,
611                              lookup_symbol(tb->pc));
612 #endif
613                 /* see if we can patch the calling TB. When the TB
614                    spans two pages, we cannot safely do a direct
615                    jump. */
616                 if (next_tb != 0 && tb->page_addr[1] == -1) {
617                     tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
618                 }
619                 spin_unlock(&tb_lock);
620
621                 /* cpu_interrupt might be called while translating the
622                    TB, but before it is linked into a potentially
623                    infinite loop and becomes env->current_tb. Avoid
624                    starting execution if there is a pending interrupt. */
625                 env->current_tb = tb;
626                 barrier();
627                 if (likely(!env->exit_request)) {
628                     tc_ptr = tb->tc_ptr;
629                     /* execute the generated code */
630                     next_tb = tcg_qemu_tb_exec(env, tc_ptr);
631                     if ((next_tb & 3) == 2) {
632                         /* Instruction counter expired.  */
633                         int insns_left;
634                         tb = (TranslationBlock *)(next_tb & ~3);
635                         /* Restore PC.  */
636                         cpu_pc_from_tb(env, tb);
637                         insns_left = env->icount_decr.u32;
638                         if (env->icount_extra && insns_left >= 0) {
639                             /* Refill decrementer and continue execution.  */
640                             env->icount_extra += insns_left;
641                             if (env->icount_extra > 0xffff) {
642                                 insns_left = 0xffff;
643                             } else {
644                                 insns_left = env->icount_extra;
645                             }
646                             env->icount_extra -= insns_left;
647                             env->icount_decr.u16.low = insns_left;
648                         } else {
649                             if (insns_left > 0) {
650                                 /* Execute remaining instructions.  */
651                                 cpu_exec_nocache(env, insns_left, tb);
652                             }
653                             env->exception_index = EXCP_INTERRUPT;
654                             next_tb = 0;
655                             cpu_loop_exit(env);
656                         }
657                     }
658                 }
659                 env->current_tb = NULL;
660 #ifdef CONFIG_HAX
661                 if (hax_enabled() && hax_stop_emulation(env))
662                     cpu_loop_exit(env);
663 #endif
664
665                 /* reset soft MMU for next block (it can currently
666                    only be set by a memory fault) */
667             } /* for(;;) */
668         } else {
669             /* Reload env after longjmp - the compiler may have smashed all
670              * local variables as longjmp is marked 'noreturn'. */
671             env = cpu_single_env;
672         }
673     } /* for(;;) */
674
675
676 #if defined(TARGET_I386)
677     /* restore flags in standard format */
678     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
679         | (DF & DF_MASK);
680 #elif defined(TARGET_ARM)
681     /* XXX: Save/restore host fpu exception state?.  */
682 #elif defined(TARGET_UNICORE32)
683 #elif defined(TARGET_SPARC)
684 #elif defined(TARGET_PPC)
685 #elif defined(TARGET_LM32)
686 #elif defined(TARGET_M68K)
687     cpu_m68k_flush_flags(env, env->cc_op);
688     env->cc_op = CC_OP_FLAGS;
689     env->sr = (env->sr & 0xffe0)
690               | env->cc_dest | (env->cc_x << 4);
691 #elif defined(TARGET_MICROBLAZE)
692 #elif defined(TARGET_MIPS)
693 #elif defined(TARGET_OPENRISC)
694 #elif defined(TARGET_SH4)
695 #elif defined(TARGET_ALPHA)
696 #elif defined(TARGET_CRIS)
697 #elif defined(TARGET_S390X)
698 #elif defined(TARGET_XTENSA)
699     /* XXXXX */
700 #else
701 #error unsupported target CPU
702 #endif
703
704     /* fail safe : never use cpu_single_env outside cpu_exec() */
705     cpu_single_env = NULL;
706     return ret;
707 }