hax: clean up
[sdk/emulator/qemu.git] / cpu-exec.c
1 /*
2  *  emulator main execution loop
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "tcg.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
25 #include "sysemu/hax.h"
26
27 void cpu_loop_exit(CPUState *cpu)
28 {
29     cpu->current_tb = NULL;
30     siglongjmp(cpu->jmp_env, 1);
31 }
32
33 /* exit the current TB from a signal handler. The host registers are
34    restored in a state compatible with the CPU emulator
35  */
36 #if defined(CONFIG_SOFTMMU)
37 void cpu_resume_from_signal(CPUState *cpu, void *puc)
38 {
39     /* XXX: restore cpu registers saved in host registers */
40
41     cpu->exception_index = -1;
42     siglongjmp(cpu->jmp_env, 1);
43 }
44 #endif
45
46 /* Execute a TB, and fix up the CPU state afterwards if necessary */
47 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
48 {
49     CPUArchState *env = cpu->env_ptr;
50     uintptr_t next_tb;
51
52 #if defined(DEBUG_DISAS)
53     if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
54 #if defined(TARGET_I386)
55         log_cpu_state(cpu, CPU_DUMP_CCOP);
56 #elif defined(TARGET_M68K)
57         /* ??? Should not modify env state for dumping.  */
58         cpu_m68k_flush_flags(env, env->cc_op);
59         env->cc_op = CC_OP_FLAGS;
60         env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
61         log_cpu_state(cpu, 0);
62 #else
63         log_cpu_state(cpu, 0);
64 #endif
65     }
66 #endif /* DEBUG_DISAS */
67
68     next_tb = tcg_qemu_tb_exec(env, tb_ptr);
69     if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
70         /* We didn't start executing this TB (eg because the instruction
71          * counter hit zero); we must restore the guest PC to the address
72          * of the start of the TB.
73          */
74         CPUClass *cc = CPU_GET_CLASS(cpu);
75         TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
76         if (cc->synchronize_from_tb) {
77             cc->synchronize_from_tb(cpu, tb);
78         } else {
79             assert(cc->set_pc);
80             cc->set_pc(cpu, tb->pc);
81         }
82     }
83     if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
84         /* We were asked to stop executing TBs (probably a pending
85          * interrupt. We've now stopped, so clear the flag.
86          */
87         cpu->tcg_exit_req = 0;
88     }
89     return next_tb;
90 }
91
92 /* Execute the code without caching the generated code. An interpreter
93    could be used if available. */
94 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
95                              TranslationBlock *orig_tb)
96 {
97     CPUState *cpu = ENV_GET_CPU(env);
98     TranslationBlock *tb;
99
100     /* Should never happen.
101        We only end up here when an existing TB is too long.  */
102     if (max_cycles > CF_COUNT_MASK)
103         max_cycles = CF_COUNT_MASK;
104
105     tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
106                      max_cycles);
107     cpu->current_tb = tb;
108     /* execute the generated code */
109     cpu_tb_exec(cpu, tb->tc_ptr);
110     cpu->current_tb = NULL;
111     tb_phys_invalidate(tb, -1);
112     tb_free(tb);
113 }
114
115 static TranslationBlock *tb_find_slow(CPUArchState *env,
116                                       target_ulong pc,
117                                       target_ulong cs_base,
118                                       uint64_t flags)
119 {
120     CPUState *cpu = ENV_GET_CPU(env);
121     TranslationBlock *tb, **ptb1;
122     unsigned int h;
123     tb_page_addr_t phys_pc, phys_page1;
124     target_ulong virt_page2;
125
126     tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
127
128     /* find translated block using physical mappings */
129     phys_pc = get_page_addr_code(env, pc);
130     phys_page1 = phys_pc & TARGET_PAGE_MASK;
131     h = tb_phys_hash_func(phys_pc);
132     ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
133     for(;;) {
134         tb = *ptb1;
135         if (!tb)
136             goto not_found;
137         if (tb->pc == pc &&
138             tb->page_addr[0] == phys_page1 &&
139             tb->cs_base == cs_base &&
140             tb->flags == flags) {
141             /* check next page if needed */
142             if (tb->page_addr[1] != -1) {
143                 tb_page_addr_t phys_page2;
144
145                 virt_page2 = (pc & TARGET_PAGE_MASK) +
146                     TARGET_PAGE_SIZE;
147                 phys_page2 = get_page_addr_code(env, virt_page2);
148                 if (tb->page_addr[1] == phys_page2)
149                     goto found;
150             } else {
151                 goto found;
152             }
153         }
154         ptb1 = &tb->phys_hash_next;
155     }
156  not_found:
157    /* if no translated code available, then translate it now */
158     tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
159
160  found:
161     /* Move the last found TB to the head of the list */
162     if (likely(*ptb1)) {
163         *ptb1 = tb->phys_hash_next;
164         tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
165         tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
166     }
167     /* we add the TB in the virtual pc hash table */
168     cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
169     return tb;
170 }
171
172 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
173 {
174     CPUState *cpu = ENV_GET_CPU(env);
175     TranslationBlock *tb;
176     target_ulong cs_base, pc;
177     int flags;
178
179     /* we record a subset of the CPU state. It will
180        always be the same before a given translated block
181        is executed. */
182     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
183     tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
184     if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
185                  tb->flags != flags)) {
186         tb = tb_find_slow(env, pc, cs_base, flags);
187     }
188     return tb;
189 }
190
191 static CPUDebugExcpHandler *debug_excp_handler;
192
193 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
194 {
195     debug_excp_handler = handler;
196 }
197
198 static void cpu_handle_debug_exception(CPUArchState *env)
199 {
200     CPUState *cpu = ENV_GET_CPU(env);
201     CPUWatchpoint *wp;
202
203     if (!cpu->watchpoint_hit) {
204         QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
205             wp->flags &= ~BP_WATCHPOINT_HIT;
206         }
207     }
208     if (debug_excp_handler) {
209         debug_excp_handler(env);
210     }
211 }
212
213 /* main execution loop */
214
215 volatile sig_atomic_t exit_request;
216
217 /*
218  * QEMU emulate can happens because of MMIO or emulation mode, i.e. non-PG mode,
219  * when it's because of MMIO, the MMIO, the interrupt should not be emulated,
220  * because MMIO is emulated for only one instruction now and then back to
221  * HAX kernel
222  */
223 static int need_handle_intr_request(CPUState *cpu)
224 {
225 #ifdef CONFIG_HAX
226     if (!hax_enabled() || hax_vcpu_emulation_mode(cpu))
227         return cpu->interrupt_request;
228     return 0;
229 #else
230     return cpu->interrupt_request;
231 #endif
232 }
233
234
235 int cpu_exec(CPUArchState *env)
236 {
237     CPUState *cpu = ENV_GET_CPU(env);
238 #if !(defined(CONFIG_USER_ONLY) && \
239       (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
240     CPUClass *cc = CPU_GET_CLASS(cpu);
241 #endif
242 #ifdef TARGET_I386
243     X86CPU *x86_cpu = X86_CPU(cpu);
244 #endif
245     int ret, interrupt_request;
246     TranslationBlock *tb;
247     uint8_t *tc_ptr;
248     uintptr_t next_tb;
249     /* This must be volatile so it is not trashed by longjmp() */
250     volatile bool have_tb_lock = false;
251
252     if (cpu->halted) {
253         if (!cpu_has_work(cpu)) {
254             return EXCP_HALTED;
255         }
256
257         cpu->halted = 0;
258     }
259
260     current_cpu = cpu;
261
262     /* As long as current_cpu is null, up to the assignment just above,
263      * requests by other threads to exit the execution loop are expected to
264      * be issued using the exit_request global. We must make sure that our
265      * evaluation of the global value is performed past the current_cpu
266      * value transition point, which requires a memory barrier as well as
267      * an instruction scheduling constraint on modern architectures.  */
268     smp_mb();
269
270     if (unlikely(exit_request)) {
271         cpu->exit_request = 1;
272     }
273
274 #if defined(TARGET_I386)
275     /* put eflags in CPU temporary format */
276     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
277     env->df = 1 - (2 * ((env->eflags >> 10) & 1));
278     CC_OP = CC_OP_EFLAGS;
279     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
280 #elif defined(TARGET_SPARC)
281 #elif defined(TARGET_M68K)
282     env->cc_op = CC_OP_FLAGS;
283     env->cc_dest = env->sr & 0xf;
284     env->cc_x = (env->sr >> 4) & 1;
285 #elif defined(TARGET_ALPHA)
286 #elif defined(TARGET_ARM)
287 #elif defined(TARGET_UNICORE32)
288 #elif defined(TARGET_PPC)
289     env->reserve_addr = -1;
290 #elif defined(TARGET_LM32)
291 #elif defined(TARGET_MICROBLAZE)
292 #elif defined(TARGET_MIPS)
293 #elif defined(TARGET_MOXIE)
294 #elif defined(TARGET_OPENRISC)
295 #elif defined(TARGET_SH4)
296 #elif defined(TARGET_CRIS)
297 #elif defined(TARGET_S390X)
298 #elif defined(TARGET_XTENSA)
299     /* XXXXX */
300 #else
301 #error unsupported target CPU
302 #endif
303     cpu->exception_index = -1;
304
305     /* prepare setjmp context for exception handling */
306     for(;;) {
307         if (sigsetjmp(cpu->jmp_env, 0) == 0) {
308             /* if an exception is pending, we execute it here */
309             if (cpu->exception_index >= 0) {
310                 if (cpu->exception_index >= EXCP_INTERRUPT) {
311                     /* exit request from the cpu execution loop */
312                     ret = cpu->exception_index;
313                     if (ret == EXCP_DEBUG) {
314                         cpu_handle_debug_exception(env);
315                     }
316                     break;
317                 } else {
318 #if defined(CONFIG_USER_ONLY)
319                     /* if user mode only, we simulate a fake exception
320                        which will be handled outside the cpu execution
321                        loop */
322 #if defined(TARGET_I386)
323                     cc->do_interrupt(cpu);
324 #endif
325                     ret = cpu->exception_index;
326                     break;
327 #else
328                     cc->do_interrupt(cpu);
329                     cpu->exception_index = -1;
330 #endif
331                 }
332             }
333
334 #ifdef CONFIG_HAX
335             if (hax_enabled() && !hax_vcpu_exec(cpu))
336                 longjmp(cpu->jmp_env, 1);
337 #endif
338
339             next_tb = 0; /* force lookup of first TB */
340             for(;;) {
341                 interrupt_request = need_handle_intr_request(cpu);
342                 if (unlikely(interrupt_request)) {
343                     if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
344                         /* Mask out external interrupts for this step. */
345                         interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
346                     }
347                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
348                         cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
349                         cpu->exception_index = EXCP_DEBUG;
350                         cpu_loop_exit(cpu);
351                     }
352 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
353     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
354     defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
355                     if (interrupt_request & CPU_INTERRUPT_HALT) {
356                         cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
357                         cpu->halted = 1;
358                         cpu->exception_index = EXCP_HLT;
359                         cpu_loop_exit(cpu);
360                     }
361 #endif
362 #if defined(TARGET_I386)
363 #if !defined(CONFIG_USER_ONLY)
364                     if (interrupt_request & CPU_INTERRUPT_POLL) {
365                         cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
366                         apic_poll_irq(x86_cpu->apic_state);
367                     }
368 #endif
369                     if (interrupt_request & CPU_INTERRUPT_INIT) {
370                             cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
371                                                           0);
372                             do_cpu_init(x86_cpu);
373                             cpu->exception_index = EXCP_HALTED;
374                             cpu_loop_exit(cpu);
375                     } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
376                             do_cpu_sipi(x86_cpu);
377                     } else if (env->hflags2 & HF2_GIF_MASK) {
378                         if ((interrupt_request & CPU_INTERRUPT_SMI) &&
379                             !(env->hflags & HF_SMM_MASK)) {
380                             cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
381                                                           0);
382                             cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
383 #ifdef CONFIG_HAX
384                             if (hax_enabled())
385                                 cpu->hax_vcpu->resync = 1;
386 #endif
387                             do_smm_enter(x86_cpu);
388                             next_tb = 0;
389                         } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
390                                    !(env->hflags2 & HF2_NMI_MASK)) {
391                             cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
392                             env->hflags2 |= HF2_NMI_MASK;
393                             do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
394                             next_tb = 0;
395                         } else if (interrupt_request & CPU_INTERRUPT_MCE) {
396                             cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
397                             do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
398                             next_tb = 0;
399                         } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
400                                    (((env->hflags2 & HF2_VINTR_MASK) && 
401                                      (env->hflags2 & HF2_HIF_MASK)) ||
402                                     (!(env->hflags2 & HF2_VINTR_MASK) && 
403                                      (env->eflags & IF_MASK && 
404                                       !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
405                             int intno;
406                             cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
407                                                           0);
408                             cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
409                                                         CPU_INTERRUPT_VIRQ);
410                             intno = cpu_get_pic_interrupt(env);
411                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
412                             do_interrupt_x86_hardirq(env, intno, 1);
413                             /* ensure that no TB jump will be modified as
414                                the program flow was changed */
415                             next_tb = 0;
416 #if !defined(CONFIG_USER_ONLY)
417                         } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
418                                    (env->eflags & IF_MASK) && 
419                                    !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
420                             int intno;
421                             /* FIXME: this should respect TPR */
422                             cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
423                                                           0);
424                             intno = ldl_phys(cpu->as,
425                                              env->vm_vmcb
426                                              + offsetof(struct vmcb,
427                                                         control.int_vector));
428                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
429                             do_interrupt_x86_hardirq(env, intno, 1);
430                             cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
431                             next_tb = 0;
432 #endif
433                         }
434                     }
435 #elif defined(TARGET_PPC)
436                     if ((interrupt_request & CPU_INTERRUPT_RESET)) {
437                         cpu_reset(cpu);
438                     }
439                     if (interrupt_request & CPU_INTERRUPT_HARD) {
440                         ppc_hw_interrupt(env);
441                         if (env->pending_interrupts == 0) {
442                             cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
443                         }
444                         next_tb = 0;
445                     }
446 #elif defined(TARGET_LM32)
447                     if ((interrupt_request & CPU_INTERRUPT_HARD)
448                         && (env->ie & IE_IE)) {
449                         cpu->exception_index = EXCP_IRQ;
450                         cc->do_interrupt(cpu);
451                         next_tb = 0;
452                     }
453 #elif defined(TARGET_MICROBLAZE)
454                     if ((interrupt_request & CPU_INTERRUPT_HARD)
455                         && (env->sregs[SR_MSR] & MSR_IE)
456                         && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
457                         && !(env->iflags & (D_FLAG | IMM_FLAG))) {
458                         cpu->exception_index = EXCP_IRQ;
459                         cc->do_interrupt(cpu);
460                         next_tb = 0;
461                     }
462 #elif defined(TARGET_MIPS)
463                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
464                         cpu_mips_hw_interrupts_pending(env)) {
465                         /* Raise it */
466                         cpu->exception_index = EXCP_EXT_INTERRUPT;
467                         env->error_code = 0;
468                         cc->do_interrupt(cpu);
469                         next_tb = 0;
470                     }
471 #elif defined(TARGET_OPENRISC)
472                     {
473                         int idx = -1;
474                         if ((interrupt_request & CPU_INTERRUPT_HARD)
475                             && (env->sr & SR_IEE)) {
476                             idx = EXCP_INT;
477                         }
478                         if ((interrupt_request & CPU_INTERRUPT_TIMER)
479                             && (env->sr & SR_TEE)) {
480                             idx = EXCP_TICK;
481                         }
482                         if (idx >= 0) {
483                             cpu->exception_index = idx;
484                             cc->do_interrupt(cpu);
485                             next_tb = 0;
486                         }
487                     }
488 #elif defined(TARGET_SPARC)
489                     if (interrupt_request & CPU_INTERRUPT_HARD) {
490                         if (cpu_interrupts_enabled(env) &&
491                             env->interrupt_index > 0) {
492                             int pil = env->interrupt_index & 0xf;
493                             int type = env->interrupt_index & 0xf0;
494
495                             if (((type == TT_EXTINT) &&
496                                   cpu_pil_allowed(env, pil)) ||
497                                   type != TT_EXTINT) {
498                                 cpu->exception_index = env->interrupt_index;
499                                 cc->do_interrupt(cpu);
500                                 next_tb = 0;
501                             }
502                         }
503                     }
504 #elif defined(TARGET_ARM)
505                     if (interrupt_request & CPU_INTERRUPT_FIQ
506                         && !(env->daif & PSTATE_F)) {
507                         cpu->exception_index = EXCP_FIQ;
508                         cc->do_interrupt(cpu);
509                         next_tb = 0;
510                     }
511                     /* ARMv7-M interrupt return works by loading a magic value
512                        into the PC.  On real hardware the load causes the
513                        return to occur.  The qemu implementation performs the
514                        jump normally, then does the exception return when the
515                        CPU tries to execute code at the magic address.
516                        This will cause the magic PC value to be pushed to
517                        the stack if an interrupt occurred at the wrong time.
518                        We avoid this by disabling interrupts when
519                        pc contains a magic address.  */
520                     if (interrupt_request & CPU_INTERRUPT_HARD
521                         && ((IS_M(env) && env->regs[15] < 0xfffffff0)
522                             || !(env->daif & PSTATE_I))) {
523                         cpu->exception_index = EXCP_IRQ;
524                         cc->do_interrupt(cpu);
525                         next_tb = 0;
526                     }
527 #elif defined(TARGET_UNICORE32)
528                     if (interrupt_request & CPU_INTERRUPT_HARD
529                         && !(env->uncached_asr & ASR_I)) {
530                         cpu->exception_index = UC32_EXCP_INTR;
531                         cc->do_interrupt(cpu);
532                         next_tb = 0;
533                     }
534 #elif defined(TARGET_SH4)
535                     if (interrupt_request & CPU_INTERRUPT_HARD) {
536                         cc->do_interrupt(cpu);
537                         next_tb = 0;
538                     }
539 #elif defined(TARGET_ALPHA)
540                     {
541                         int idx = -1;
542                         /* ??? This hard-codes the OSF/1 interrupt levels.  */
543                         switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
544                         case 0 ... 3:
545                             if (interrupt_request & CPU_INTERRUPT_HARD) {
546                                 idx = EXCP_DEV_INTERRUPT;
547                             }
548                             /* FALLTHRU */
549                         case 4:
550                             if (interrupt_request & CPU_INTERRUPT_TIMER) {
551                                 idx = EXCP_CLK_INTERRUPT;
552                             }
553                             /* FALLTHRU */
554                         case 5:
555                             if (interrupt_request & CPU_INTERRUPT_SMP) {
556                                 idx = EXCP_SMP_INTERRUPT;
557                             }
558                             /* FALLTHRU */
559                         case 6:
560                             if (interrupt_request & CPU_INTERRUPT_MCHK) {
561                                 idx = EXCP_MCHK;
562                             }
563                         }
564                         if (idx >= 0) {
565                             cpu->exception_index = idx;
566                             env->error_code = 0;
567                             cc->do_interrupt(cpu);
568                             next_tb = 0;
569                         }
570                     }
571 #elif defined(TARGET_CRIS)
572                     if (interrupt_request & CPU_INTERRUPT_HARD
573                         && (env->pregs[PR_CCS] & I_FLAG)
574                         && !env->locked_irq) {
575                         cpu->exception_index = EXCP_IRQ;
576                         cc->do_interrupt(cpu);
577                         next_tb = 0;
578                     }
579                     if (interrupt_request & CPU_INTERRUPT_NMI) {
580                         unsigned int m_flag_archval;
581                         if (env->pregs[PR_VR] < 32) {
582                             m_flag_archval = M_FLAG_V10;
583                         } else {
584                             m_flag_archval = M_FLAG_V32;
585                         }
586                         if ((env->pregs[PR_CCS] & m_flag_archval)) {
587                             cpu->exception_index = EXCP_NMI;
588                             cc->do_interrupt(cpu);
589                             next_tb = 0;
590                         }
591                     }
592 #elif defined(TARGET_M68K)
593                     if (interrupt_request & CPU_INTERRUPT_HARD
594                         && ((env->sr & SR_I) >> SR_I_SHIFT)
595                             < env->pending_level) {
596                         /* Real hardware gets the interrupt vector via an
597                            IACK cycle at this point.  Current emulated
598                            hardware doesn't rely on this, so we
599                            provide/save the vector when the interrupt is
600                            first signalled.  */
601                         cpu->exception_index = env->pending_vector;
602                         do_interrupt_m68k_hardirq(env);
603                         next_tb = 0;
604                     }
605 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
606                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
607                         (env->psw.mask & PSW_MASK_EXT)) {
608                         cc->do_interrupt(cpu);
609                         next_tb = 0;
610                     }
611 #elif defined(TARGET_XTENSA)
612                     if (interrupt_request & CPU_INTERRUPT_HARD) {
613                         cpu->exception_index = EXC_IRQ;
614                         cc->do_interrupt(cpu);
615                         next_tb = 0;
616                     }
617 #endif
618                    /* Don't use the cached interrupt_request value,
619                       do_interrupt may have updated the EXITTB flag. */
620                     if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
621                         cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
622                         /* ensure that no TB jump will be modified as
623                            the program flow was changed */
624                         next_tb = 0;
625                     }
626                 }
627                 if (unlikely(cpu->exit_request)) {
628                     cpu->exit_request = 0;
629                     cpu->exception_index = EXCP_INTERRUPT;
630                     cpu_loop_exit(cpu);
631                 }
632                 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
633                 have_tb_lock = true;
634                 tb = tb_find_fast(env);
635                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
636                    doing it in tb_find_slow */
637                 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
638                     /* as some TB could have been invalidated because
639                        of memory exceptions while generating the code, we
640                        must recompute the hash index here */
641                     next_tb = 0;
642                     tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
643                 }
644                 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
645                     qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
646                              tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
647                 }
648                 /* see if we can patch the calling TB. When the TB
649                    spans two pages, we cannot safely do a direct
650                    jump. */
651                 if (next_tb != 0 && tb->page_addr[1] == -1) {
652                     tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
653                                 next_tb & TB_EXIT_MASK, tb);
654                 }
655                 have_tb_lock = false;
656                 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
657
658                 /* cpu_interrupt might be called while translating the
659                    TB, but before it is linked into a potentially
660                    infinite loop and becomes env->current_tb. Avoid
661                    starting execution if there is a pending interrupt. */
662                 cpu->current_tb = tb;
663                 barrier();
664                 if (likely(!cpu->exit_request)) {
665                     tc_ptr = tb->tc_ptr;
666                     /* execute the generated code */
667                     next_tb = cpu_tb_exec(cpu, tc_ptr);
668                     switch (next_tb & TB_EXIT_MASK) {
669                     case TB_EXIT_REQUESTED:
670                         /* Something asked us to stop executing
671                          * chained TBs; just continue round the main
672                          * loop. Whatever requested the exit will also
673                          * have set something else (eg exit_request or
674                          * interrupt_request) which we will handle
675                          * next time around the loop.
676                          */
677                         tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
678                         next_tb = 0;
679                         break;
680                     case TB_EXIT_ICOUNT_EXPIRED:
681                     {
682                         /* Instruction counter expired.  */
683                         int insns_left;
684                         tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
685                         insns_left = cpu->icount_decr.u32;
686                         if (cpu->icount_extra && insns_left >= 0) {
687                             /* Refill decrementer and continue execution.  */
688                             cpu->icount_extra += insns_left;
689                             if (cpu->icount_extra > 0xffff) {
690                                 insns_left = 0xffff;
691                             } else {
692                                 insns_left = cpu->icount_extra;
693                             }
694                             cpu->icount_extra -= insns_left;
695                             cpu->icount_decr.u16.low = insns_left;
696                         } else {
697                             if (insns_left > 0) {
698                                 /* Execute remaining instructions.  */
699                                 cpu_exec_nocache(env, insns_left, tb);
700                             }
701                             cpu->exception_index = EXCP_INTERRUPT;
702                             next_tb = 0;
703                             cpu_loop_exit(cpu);
704                         }
705                         break;
706                     }
707                     default:
708                         break;
709                     }
710                 }
711                 cpu->current_tb = NULL;
712 #ifdef CONFIG_HAX
713                 if (hax_enabled() && hax_stop_emulation(cpu))
714                     cpu_loop_exit(cpu);
715 #endif
716                 /* reset soft MMU for next block (it can currently
717                    only be set by a memory fault) */
718             } /* for(;;) */
719         } else {
720             /* Reload env after longjmp - the compiler may have smashed all
721              * local variables as longjmp is marked 'noreturn'. */
722             cpu = current_cpu;
723             env = cpu->env_ptr;
724 #if !(defined(CONFIG_USER_ONLY) && \
725       (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
726             cc = CPU_GET_CLASS(cpu);
727 #endif
728 #ifdef TARGET_I386
729             x86_cpu = X86_CPU(cpu);
730 #endif
731             if (have_tb_lock) {
732                 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
733                 have_tb_lock = false;
734             }
735         }
736     } /* for(;;) */
737
738
739 #if defined(TARGET_I386)
740     /* restore flags in standard format */
741     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
742         | (env->df & DF_MASK);
743 #elif defined(TARGET_ARM)
744     /* XXX: Save/restore host fpu exception state?.  */
745 #elif defined(TARGET_UNICORE32)
746 #elif defined(TARGET_SPARC)
747 #elif defined(TARGET_PPC)
748 #elif defined(TARGET_LM32)
749 #elif defined(TARGET_M68K)
750     cpu_m68k_flush_flags(env, env->cc_op);
751     env->cc_op = CC_OP_FLAGS;
752     env->sr = (env->sr & 0xffe0)
753               | env->cc_dest | (env->cc_x << 4);
754 #elif defined(TARGET_MICROBLAZE)
755 #elif defined(TARGET_MIPS)
756 #elif defined(TARGET_MOXIE)
757 #elif defined(TARGET_OPENRISC)
758 #elif defined(TARGET_SH4)
759 #elif defined(TARGET_ALPHA)
760 #elif defined(TARGET_CRIS)
761 #elif defined(TARGET_S390X)
762 #elif defined(TARGET_XTENSA)
763     /* XXXXX */
764 #else
765 #error unsupported target CPU
766 #endif
767
768     /* fail safe : never use current_cpu outside cpu_exec() */
769     current_cpu = NULL;
770     return ret;
771 }