Merge "evdi,nfc: add checking if initialized" into tizen_qemu_2.0
[sdk/emulator/qemu.git] / cpu-exec.c
1 /*
2  *  emulator main execution loop
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "tcg.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
25 #include "sysemu/hax.h"
26
27 void cpu_loop_exit(CPUState *cpu)
28 {
29     cpu->current_tb = NULL;
30     siglongjmp(cpu->jmp_env, 1);
31 }
32
33 /* exit the current TB from a signal handler. The host registers are
34    restored in a state compatible with the CPU emulator
35  */
36 #if defined(CONFIG_SOFTMMU)
37 void cpu_resume_from_signal(CPUState *cpu, void *puc)
38 {
39     /* XXX: restore cpu registers saved in host registers */
40
41     cpu->exception_index = -1;
42     siglongjmp(cpu->jmp_env, 1);
43 }
44 #endif
45
46 /* Execute a TB, and fix up the CPU state afterwards if necessary */
47 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
48 {
49     CPUArchState *env = cpu->env_ptr;
50     uintptr_t next_tb;
51
52 #if defined(DEBUG_DISAS)
53     if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
54 #if defined(TARGET_I386)
55         log_cpu_state(cpu, CPU_DUMP_CCOP);
56 #elif defined(TARGET_M68K)
57         /* ??? Should not modify env state for dumping.  */
58         cpu_m68k_flush_flags(env, env->cc_op);
59         env->cc_op = CC_OP_FLAGS;
60         env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
61         log_cpu_state(cpu, 0);
62 #else
63         log_cpu_state(cpu, 0);
64 #endif
65     }
66 #endif /* DEBUG_DISAS */
67
68     next_tb = tcg_qemu_tb_exec(env, tb_ptr);
69     if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
70         /* We didn't start executing this TB (eg because the instruction
71          * counter hit zero); we must restore the guest PC to the address
72          * of the start of the TB.
73          */
74         CPUClass *cc = CPU_GET_CLASS(cpu);
75         TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
76         if (cc->synchronize_from_tb) {
77             cc->synchronize_from_tb(cpu, tb);
78         } else {
79             assert(cc->set_pc);
80             cc->set_pc(cpu, tb->pc);
81         }
82     }
83     if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
84         /* We were asked to stop executing TBs (probably a pending
85          * interrupt. We've now stopped, so clear the flag.
86          */
87         cpu->tcg_exit_req = 0;
88     }
89     return next_tb;
90 }
91
92 /* Execute the code without caching the generated code. An interpreter
93    could be used if available. */
94 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
95                              TranslationBlock *orig_tb)
96 {
97     CPUState *cpu = ENV_GET_CPU(env);
98     TranslationBlock *tb;
99
100     /* Should never happen.
101        We only end up here when an existing TB is too long.  */
102     if (max_cycles > CF_COUNT_MASK)
103         max_cycles = CF_COUNT_MASK;
104
105     tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
106                      max_cycles);
107     cpu->current_tb = tb;
108     /* execute the generated code */
109     cpu_tb_exec(cpu, tb->tc_ptr);
110     cpu->current_tb = NULL;
111     tb_phys_invalidate(tb, -1);
112     tb_free(tb);
113 }
114
115 static TranslationBlock *tb_find_slow(CPUArchState *env,
116                                       target_ulong pc,
117                                       target_ulong cs_base,
118                                       uint64_t flags)
119 {
120     CPUState *cpu = ENV_GET_CPU(env);
121     TranslationBlock *tb, **ptb1;
122     unsigned int h;
123     tb_page_addr_t phys_pc, phys_page1;
124     target_ulong virt_page2;
125
126     tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
127
128     /* find translated block using physical mappings */
129     phys_pc = get_page_addr_code(env, pc);
130     phys_page1 = phys_pc & TARGET_PAGE_MASK;
131     h = tb_phys_hash_func(phys_pc);
132     ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
133     for(;;) {
134         tb = *ptb1;
135         if (!tb)
136             goto not_found;
137         if (tb->pc == pc &&
138             tb->page_addr[0] == phys_page1 &&
139             tb->cs_base == cs_base &&
140             tb->flags == flags) {
141             /* check next page if needed */
142             if (tb->page_addr[1] != -1) {
143                 tb_page_addr_t phys_page2;
144
145                 virt_page2 = (pc & TARGET_PAGE_MASK) +
146                     TARGET_PAGE_SIZE;
147                 phys_page2 = get_page_addr_code(env, virt_page2);
148                 if (tb->page_addr[1] == phys_page2)
149                     goto found;
150             } else {
151                 goto found;
152             }
153         }
154         ptb1 = &tb->phys_hash_next;
155     }
156  not_found:
157    /* if no translated code available, then translate it now */
158     tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
159
160  found:
161     /* Move the last found TB to the head of the list */
162     if (likely(*ptb1)) {
163         *ptb1 = tb->phys_hash_next;
164         tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
165         tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
166     }
167     /* we add the TB in the virtual pc hash table */
168     cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
169     return tb;
170 }
171
172 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
173 {
174     CPUState *cpu = ENV_GET_CPU(env);
175     TranslationBlock *tb;
176     target_ulong cs_base, pc;
177     int flags;
178
179     /* we record a subset of the CPU state. It will
180        always be the same before a given translated block
181        is executed. */
182     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
183     tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
184     if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
185                  tb->flags != flags)) {
186         tb = tb_find_slow(env, pc, cs_base, flags);
187     }
188     return tb;
189 }
190
191 static CPUDebugExcpHandler *debug_excp_handler;
192
193 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
194 {
195     debug_excp_handler = handler;
196 }
197
198 static void cpu_handle_debug_exception(CPUArchState *env)
199 {
200     CPUState *cpu = ENV_GET_CPU(env);
201     CPUWatchpoint *wp;
202
203     if (!cpu->watchpoint_hit) {
204         QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
205             wp->flags &= ~BP_WATCHPOINT_HIT;
206         }
207     }
208     if (debug_excp_handler) {
209         debug_excp_handler(env);
210     }
211 }
212
213 /* main execution loop */
214
215 volatile sig_atomic_t exit_request;
216
217 /*
218  * QEMU emulate can happens because of MMIO or emulation mode, i.e. non-PG mode,
219  * when it's because of MMIO, the MMIO, the interrupt should not be emulated,
220  * because MMIO is emulated for only one instruction now and then back to
221  * HAX kernel
222  */
223 static int need_handle_intr_request(CPUState *cpu)
224 {
225 #ifdef CONFIG_HAX
226     CPUArchState *env = cpu->env_ptr;
227     if (!hax_enabled() || hax_vcpu_emulation_mode(env))
228         return cpu->interrupt_request;
229     return 0;
230 #else
231     return cpu->interrupt_request;
232 #endif
233 }
234
235
236 int cpu_exec(CPUArchState *env)
237 {
238     CPUState *cpu = ENV_GET_CPU(env);
239 #if !(defined(CONFIG_USER_ONLY) && \
240       (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
241     CPUClass *cc = CPU_GET_CLASS(cpu);
242 #endif
243 #ifdef TARGET_I386
244     X86CPU *x86_cpu = X86_CPU(cpu);
245 #endif
246     int ret, interrupt_request;
247     TranslationBlock *tb;
248     uint8_t *tc_ptr;
249     uintptr_t next_tb;
250     /* This must be volatile so it is not trashed by longjmp() */
251     volatile bool have_tb_lock = false;
252
253     if (cpu->halted) {
254         if (!cpu_has_work(cpu)) {
255             return EXCP_HALTED;
256         }
257
258         cpu->halted = 0;
259     }
260
261     current_cpu = cpu;
262
263     /* As long as current_cpu is null, up to the assignment just above,
264      * requests by other threads to exit the execution loop are expected to
265      * be issued using the exit_request global. We must make sure that our
266      * evaluation of the global value is performed past the current_cpu
267      * value transition point, which requires a memory barrier as well as
268      * an instruction scheduling constraint on modern architectures.  */
269     smp_mb();
270
271     if (unlikely(exit_request)) {
272         cpu->exit_request = 1;
273     }
274
275 #if defined(TARGET_I386)
276     /* put eflags in CPU temporary format */
277     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
278     env->df = 1 - (2 * ((env->eflags >> 10) & 1));
279     CC_OP = CC_OP_EFLAGS;
280     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
281 #elif defined(TARGET_SPARC)
282 #elif defined(TARGET_M68K)
283     env->cc_op = CC_OP_FLAGS;
284     env->cc_dest = env->sr & 0xf;
285     env->cc_x = (env->sr >> 4) & 1;
286 #elif defined(TARGET_ALPHA)
287 #elif defined(TARGET_ARM)
288 #elif defined(TARGET_UNICORE32)
289 #elif defined(TARGET_PPC)
290     env->reserve_addr = -1;
291 #elif defined(TARGET_LM32)
292 #elif defined(TARGET_MICROBLAZE)
293 #elif defined(TARGET_MIPS)
294 #elif defined(TARGET_MOXIE)
295 #elif defined(TARGET_OPENRISC)
296 #elif defined(TARGET_SH4)
297 #elif defined(TARGET_CRIS)
298 #elif defined(TARGET_S390X)
299 #elif defined(TARGET_XTENSA)
300     /* XXXXX */
301 #else
302 #error unsupported target CPU
303 #endif
304     cpu->exception_index = -1;
305
306     /* prepare setjmp context for exception handling */
307     for(;;) {
308         if (sigsetjmp(cpu->jmp_env, 0) == 0) {
309             /* if an exception is pending, we execute it here */
310             if (cpu->exception_index >= 0) {
311                 if (cpu->exception_index >= EXCP_INTERRUPT) {
312                     /* exit request from the cpu execution loop */
313                     ret = cpu->exception_index;
314                     if (ret == EXCP_DEBUG) {
315                         cpu_handle_debug_exception(env);
316                     }
317                     break;
318                 } else {
319 #if defined(CONFIG_USER_ONLY)
320                     /* if user mode only, we simulate a fake exception
321                        which will be handled outside the cpu execution
322                        loop */
323 #if defined(TARGET_I386)
324                     cc->do_interrupt(cpu);
325 #endif
326                     ret = cpu->exception_index;
327                     break;
328 #else
329                     cc->do_interrupt(cpu);
330                     cpu->exception_index = -1;
331 #endif
332                 }
333             }
334
335 #ifdef CONFIG_HAX
336             if (hax_enabled() && !hax_vcpu_exec(env))
337                 longjmp(env->jmp_env, 1);
338 #endif
339
340             next_tb = 0; /* force lookup of first TB */
341             for(;;) {
342                 interrupt_request = need_handle_intr_request(cpu);
343                 if (unlikely(interrupt_request)) {
344                     if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
345                         /* Mask out external interrupts for this step. */
346                         interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
347                     }
348                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
349                         cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
350                         cpu->exception_index = EXCP_DEBUG;
351                         cpu_loop_exit(cpu);
352                     }
353 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
354     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
355     defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
356                     if (interrupt_request & CPU_INTERRUPT_HALT) {
357                         cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
358                         cpu->halted = 1;
359                         cpu->exception_index = EXCP_HLT;
360                         cpu_loop_exit(cpu);
361                     }
362 #endif
363 #if defined(TARGET_I386)
364 #if !defined(CONFIG_USER_ONLY)
365                     if (interrupt_request & CPU_INTERRUPT_POLL) {
366                         cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
367                         apic_poll_irq(x86_cpu->apic_state);
368                     }
369 #endif
370                     if (interrupt_request & CPU_INTERRUPT_INIT) {
371                             cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
372                                                           0);
373                             do_cpu_init(x86_cpu);
374                             cpu->exception_index = EXCP_HALTED;
375                             cpu_loop_exit(cpu);
376                     } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
377                             do_cpu_sipi(x86_cpu);
378                     } else if (env->hflags2 & HF2_GIF_MASK) {
379                         if ((interrupt_request & CPU_INTERRUPT_SMI) &&
380                             !(env->hflags & HF_SMM_MASK)) {
381                             cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
382                                                           0);
383                             cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
384 #ifdef CONFIG_HAX
385                             if (hax_enabled())
386                                 env->hax_vcpu->resync = 1;
387 #endif
388                             do_smm_enter(x86_cpu);
389                             next_tb = 0;
390                         } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
391                                    !(env->hflags2 & HF2_NMI_MASK)) {
392                             cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
393                             env->hflags2 |= HF2_NMI_MASK;
394                             do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
395                             next_tb = 0;
396                         } else if (interrupt_request & CPU_INTERRUPT_MCE) {
397                             cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
398                             do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
399                             next_tb = 0;
400                         } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
401                                    (((env->hflags2 & HF2_VINTR_MASK) && 
402                                      (env->hflags2 & HF2_HIF_MASK)) ||
403                                     (!(env->hflags2 & HF2_VINTR_MASK) && 
404                                      (env->eflags & IF_MASK && 
405                                       !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
406                             int intno;
407                             cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
408                                                           0);
409                             cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
410                                                         CPU_INTERRUPT_VIRQ);
411                             intno = cpu_get_pic_interrupt(env);
412                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
413                             do_interrupt_x86_hardirq(env, intno, 1);
414                             /* ensure that no TB jump will be modified as
415                                the program flow was changed */
416                             next_tb = 0;
417 #if !defined(CONFIG_USER_ONLY)
418                         } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
419                                    (env->eflags & IF_MASK) && 
420                                    !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
421                             int intno;
422                             /* FIXME: this should respect TPR */
423                             cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
424                                                           0);
425                             intno = ldl_phys(cpu->as,
426                                              env->vm_vmcb
427                                              + offsetof(struct vmcb,
428                                                         control.int_vector));
429                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
430                             do_interrupt_x86_hardirq(env, intno, 1);
431                             cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
432                             next_tb = 0;
433 #endif
434                         }
435                     }
436 #elif defined(TARGET_PPC)
437                     if ((interrupt_request & CPU_INTERRUPT_RESET)) {
438                         cpu_reset(cpu);
439                     }
440                     if (interrupt_request & CPU_INTERRUPT_HARD) {
441                         ppc_hw_interrupt(env);
442                         if (env->pending_interrupts == 0) {
443                             cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
444                         }
445                         next_tb = 0;
446                     }
447 #elif defined(TARGET_LM32)
448                     if ((interrupt_request & CPU_INTERRUPT_HARD)
449                         && (env->ie & IE_IE)) {
450                         cpu->exception_index = EXCP_IRQ;
451                         cc->do_interrupt(cpu);
452                         next_tb = 0;
453                     }
454 #elif defined(TARGET_MICROBLAZE)
455                     if ((interrupt_request & CPU_INTERRUPT_HARD)
456                         && (env->sregs[SR_MSR] & MSR_IE)
457                         && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
458                         && !(env->iflags & (D_FLAG | IMM_FLAG))) {
459                         cpu->exception_index = EXCP_IRQ;
460                         cc->do_interrupt(cpu);
461                         next_tb = 0;
462                     }
463 #elif defined(TARGET_MIPS)
464                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
465                         cpu_mips_hw_interrupts_pending(env)) {
466                         /* Raise it */
467                         cpu->exception_index = EXCP_EXT_INTERRUPT;
468                         env->error_code = 0;
469                         cc->do_interrupt(cpu);
470                         next_tb = 0;
471                     }
472 #elif defined(TARGET_OPENRISC)
473                     {
474                         int idx = -1;
475                         if ((interrupt_request & CPU_INTERRUPT_HARD)
476                             && (env->sr & SR_IEE)) {
477                             idx = EXCP_INT;
478                         }
479                         if ((interrupt_request & CPU_INTERRUPT_TIMER)
480                             && (env->sr & SR_TEE)) {
481                             idx = EXCP_TICK;
482                         }
483                         if (idx >= 0) {
484                             cpu->exception_index = idx;
485                             cc->do_interrupt(cpu);
486                             next_tb = 0;
487                         }
488                     }
489 #elif defined(TARGET_SPARC)
490                     if (interrupt_request & CPU_INTERRUPT_HARD) {
491                         if (cpu_interrupts_enabled(env) &&
492                             env->interrupt_index > 0) {
493                             int pil = env->interrupt_index & 0xf;
494                             int type = env->interrupt_index & 0xf0;
495
496                             if (((type == TT_EXTINT) &&
497                                   cpu_pil_allowed(env, pil)) ||
498                                   type != TT_EXTINT) {
499                                 cpu->exception_index = env->interrupt_index;
500                                 cc->do_interrupt(cpu);
501                                 next_tb = 0;
502                             }
503                         }
504                     }
505 #elif defined(TARGET_ARM)
506                     if (interrupt_request & CPU_INTERRUPT_FIQ
507                         && !(env->daif & PSTATE_F)) {
508                         cpu->exception_index = EXCP_FIQ;
509                         cc->do_interrupt(cpu);
510                         next_tb = 0;
511                     }
512                     /* ARMv7-M interrupt return works by loading a magic value
513                        into the PC.  On real hardware the load causes the
514                        return to occur.  The qemu implementation performs the
515                        jump normally, then does the exception return when the
516                        CPU tries to execute code at the magic address.
517                        This will cause the magic PC value to be pushed to
518                        the stack if an interrupt occurred at the wrong time.
519                        We avoid this by disabling interrupts when
520                        pc contains a magic address.  */
521                     if (interrupt_request & CPU_INTERRUPT_HARD
522                         && ((IS_M(env) && env->regs[15] < 0xfffffff0)
523                             || !(env->daif & PSTATE_I))) {
524                         cpu->exception_index = EXCP_IRQ;
525                         cc->do_interrupt(cpu);
526                         next_tb = 0;
527                     }
528 #elif defined(TARGET_UNICORE32)
529                     if (interrupt_request & CPU_INTERRUPT_HARD
530                         && !(env->uncached_asr & ASR_I)) {
531                         cpu->exception_index = UC32_EXCP_INTR;
532                         cc->do_interrupt(cpu);
533                         next_tb = 0;
534                     }
535 #elif defined(TARGET_SH4)
536                     if (interrupt_request & CPU_INTERRUPT_HARD) {
537                         cc->do_interrupt(cpu);
538                         next_tb = 0;
539                     }
540 #elif defined(TARGET_ALPHA)
541                     {
542                         int idx = -1;
543                         /* ??? This hard-codes the OSF/1 interrupt levels.  */
544                         switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
545                         case 0 ... 3:
546                             if (interrupt_request & CPU_INTERRUPT_HARD) {
547                                 idx = EXCP_DEV_INTERRUPT;
548                             }
549                             /* FALLTHRU */
550                         case 4:
551                             if (interrupt_request & CPU_INTERRUPT_TIMER) {
552                                 idx = EXCP_CLK_INTERRUPT;
553                             }
554                             /* FALLTHRU */
555                         case 5:
556                             if (interrupt_request & CPU_INTERRUPT_SMP) {
557                                 idx = EXCP_SMP_INTERRUPT;
558                             }
559                             /* FALLTHRU */
560                         case 6:
561                             if (interrupt_request & CPU_INTERRUPT_MCHK) {
562                                 idx = EXCP_MCHK;
563                             }
564                         }
565                         if (idx >= 0) {
566                             cpu->exception_index = idx;
567                             env->error_code = 0;
568                             cc->do_interrupt(cpu);
569                             next_tb = 0;
570                         }
571                     }
572 #elif defined(TARGET_CRIS)
573                     if (interrupt_request & CPU_INTERRUPT_HARD
574                         && (env->pregs[PR_CCS] & I_FLAG)
575                         && !env->locked_irq) {
576                         cpu->exception_index = EXCP_IRQ;
577                         cc->do_interrupt(cpu);
578                         next_tb = 0;
579                     }
580                     if (interrupt_request & CPU_INTERRUPT_NMI) {
581                         unsigned int m_flag_archval;
582                         if (env->pregs[PR_VR] < 32) {
583                             m_flag_archval = M_FLAG_V10;
584                         } else {
585                             m_flag_archval = M_FLAG_V32;
586                         }
587                         if ((env->pregs[PR_CCS] & m_flag_archval)) {
588                             cpu->exception_index = EXCP_NMI;
589                             cc->do_interrupt(cpu);
590                             next_tb = 0;
591                         }
592                     }
593 #elif defined(TARGET_M68K)
594                     if (interrupt_request & CPU_INTERRUPT_HARD
595                         && ((env->sr & SR_I) >> SR_I_SHIFT)
596                             < env->pending_level) {
597                         /* Real hardware gets the interrupt vector via an
598                            IACK cycle at this point.  Current emulated
599                            hardware doesn't rely on this, so we
600                            provide/save the vector when the interrupt is
601                            first signalled.  */
602                         cpu->exception_index = env->pending_vector;
603                         do_interrupt_m68k_hardirq(env);
604                         next_tb = 0;
605                     }
606 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
607                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
608                         (env->psw.mask & PSW_MASK_EXT)) {
609                         cc->do_interrupt(cpu);
610                         next_tb = 0;
611                     }
612 #elif defined(TARGET_XTENSA)
613                     if (interrupt_request & CPU_INTERRUPT_HARD) {
614                         cpu->exception_index = EXC_IRQ;
615                         cc->do_interrupt(cpu);
616                         next_tb = 0;
617                     }
618 #endif
619                    /* Don't use the cached interrupt_request value,
620                       do_interrupt may have updated the EXITTB flag. */
621                     if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
622                         cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
623                         /* ensure that no TB jump will be modified as
624                            the program flow was changed */
625                         next_tb = 0;
626                     }
627                 }
628                 if (unlikely(cpu->exit_request)) {
629                     cpu->exit_request = 0;
630                     cpu->exception_index = EXCP_INTERRUPT;
631                     cpu_loop_exit(cpu);
632                 }
633                 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
634                 have_tb_lock = true;
635                 tb = tb_find_fast(env);
636                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
637                    doing it in tb_find_slow */
638                 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
639                     /* as some TB could have been invalidated because
640                        of memory exceptions while generating the code, we
641                        must recompute the hash index here */
642                     next_tb = 0;
643                     tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
644                 }
645                 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
646                     qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
647                              tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
648                 }
649                 /* see if we can patch the calling TB. When the TB
650                    spans two pages, we cannot safely do a direct
651                    jump. */
652                 if (next_tb != 0 && tb->page_addr[1] == -1) {
653                     tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
654                                 next_tb & TB_EXIT_MASK, tb);
655                 }
656                 have_tb_lock = false;
657                 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
658
659                 /* cpu_interrupt might be called while translating the
660                    TB, but before it is linked into a potentially
661                    infinite loop and becomes env->current_tb. Avoid
662                    starting execution if there is a pending interrupt. */
663                 cpu->current_tb = tb;
664                 barrier();
665                 if (likely(!cpu->exit_request)) {
666                     tc_ptr = tb->tc_ptr;
667                     /* execute the generated code */
668                     next_tb = cpu_tb_exec(cpu, tc_ptr);
669                     switch (next_tb & TB_EXIT_MASK) {
670                     case TB_EXIT_REQUESTED:
671                         /* Something asked us to stop executing
672                          * chained TBs; just continue round the main
673                          * loop. Whatever requested the exit will also
674                          * have set something else (eg exit_request or
675                          * interrupt_request) which we will handle
676                          * next time around the loop.
677                          */
678                         tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
679                         next_tb = 0;
680                         break;
681                     case TB_EXIT_ICOUNT_EXPIRED:
682                     {
683                         /* Instruction counter expired.  */
684                         int insns_left;
685                         tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
686                         insns_left = cpu->icount_decr.u32;
687                         if (cpu->icount_extra && insns_left >= 0) {
688                             /* Refill decrementer and continue execution.  */
689                             cpu->icount_extra += insns_left;
690                             if (cpu->icount_extra > 0xffff) {
691                                 insns_left = 0xffff;
692                             } else {
693                                 insns_left = cpu->icount_extra;
694                             }
695                             cpu->icount_extra -= insns_left;
696                             cpu->icount_decr.u16.low = insns_left;
697                         } else {
698                             if (insns_left > 0) {
699                                 /* Execute remaining instructions.  */
700                                 cpu_exec_nocache(env, insns_left, tb);
701                             }
702                             cpu->exception_index = EXCP_INTERRUPT;
703                             next_tb = 0;
704                             cpu_loop_exit(cpu);
705                         }
706                         break;
707                     }
708                     default:
709                         break;
710                     }
711                 }
712                 cpu->current_tb = NULL;
713 #ifdef CONFIG_HAX
714                 if (hax_enabled() && hax_stop_emulation(env))
715                     cpu_loop_exit(env);
716 #endif
717                 /* reset soft MMU for next block (it can currently
718                    only be set by a memory fault) */
719             } /* for(;;) */
720         } else {
721             /* Reload env after longjmp - the compiler may have smashed all
722              * local variables as longjmp is marked 'noreturn'. */
723             cpu = current_cpu;
724             env = cpu->env_ptr;
725 #if !(defined(CONFIG_USER_ONLY) && \
726       (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
727             cc = CPU_GET_CLASS(cpu);
728 #endif
729 #ifdef TARGET_I386
730             x86_cpu = X86_CPU(cpu);
731 #endif
732             if (have_tb_lock) {
733                 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
734                 have_tb_lock = false;
735             }
736         }
737     } /* for(;;) */
738
739
740 #if defined(TARGET_I386)
741     /* restore flags in standard format */
742     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
743         | (env->df & DF_MASK);
744 #elif defined(TARGET_ARM)
745     /* XXX: Save/restore host fpu exception state?.  */
746 #elif defined(TARGET_UNICORE32)
747 #elif defined(TARGET_SPARC)
748 #elif defined(TARGET_PPC)
749 #elif defined(TARGET_LM32)
750 #elif defined(TARGET_M68K)
751     cpu_m68k_flush_flags(env, env->cc_op);
752     env->cc_op = CC_OP_FLAGS;
753     env->sr = (env->sr & 0xffe0)
754               | env->cc_dest | (env->cc_x << 4);
755 #elif defined(TARGET_MICROBLAZE)
756 #elif defined(TARGET_MIPS)
757 #elif defined(TARGET_MOXIE)
758 #elif defined(TARGET_OPENRISC)
759 #elif defined(TARGET_SH4)
760 #elif defined(TARGET_ALPHA)
761 #elif defined(TARGET_CRIS)
762 #elif defined(TARGET_S390X)
763 #elif defined(TARGET_XTENSA)
764     /* XXXXX */
765 #else
766 #error unsupported target CPU
767 #endif
768
769     /* fail safe : never use current_cpu outside cpu_exec() */
770     current_cpu = NULL;
771     return ret;
772 }