hax: for testing...
[sdk/emulator/qemu.git] / cpu-exec.c
1 /*
2  *  emulator main execution loop
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "tcg.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
25 #include "sysemu/hax.h"
26
27 void cpu_loop_exit(CPUState *cpu)
28 {
29     cpu->current_tb = NULL;
30     siglongjmp(cpu->jmp_env, 1);
31 }
32
33 /* exit the current TB from a signal handler. The host registers are
34    restored in a state compatible with the CPU emulator
35  */
36 #if defined(CONFIG_SOFTMMU)
37 void cpu_resume_from_signal(CPUState *cpu, void *puc)
38 {
39     /* XXX: restore cpu registers saved in host registers */
40
41     cpu->exception_index = -1;
42     siglongjmp(cpu->jmp_env, 1);
43 }
44 #endif
45
46 /* Execute a TB, and fix up the CPU state afterwards if necessary */
47 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
48 {
49     CPUArchState *env = cpu->env_ptr;
50     uintptr_t next_tb;
51
52 #if defined(DEBUG_DISAS)
53     if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
54 #if defined(TARGET_I386)
55         log_cpu_state(cpu, CPU_DUMP_CCOP);
56 #elif defined(TARGET_M68K)
57         /* ??? Should not modify env state for dumping.  */
58         cpu_m68k_flush_flags(env, env->cc_op);
59         env->cc_op = CC_OP_FLAGS;
60         env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
61         log_cpu_state(cpu, 0);
62 #else
63         log_cpu_state(cpu, 0);
64 #endif
65     }
66 #endif /* DEBUG_DISAS */
67
68     next_tb = tcg_qemu_tb_exec(env, tb_ptr);
69     if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
70         /* We didn't start executing this TB (eg because the instruction
71          * counter hit zero); we must restore the guest PC to the address
72          * of the start of the TB.
73          */
74         CPUClass *cc = CPU_GET_CLASS(cpu);
75         TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
76         if (cc->synchronize_from_tb) {
77             cc->synchronize_from_tb(cpu, tb);
78         } else {
79             assert(cc->set_pc);
80             cc->set_pc(cpu, tb->pc);
81         }
82     }
83     if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
84         /* We were asked to stop executing TBs (probably a pending
85          * interrupt. We've now stopped, so clear the flag.
86          */
87         cpu->tcg_exit_req = 0;
88     }
89     return next_tb;
90 }
91
92 /* Execute the code without caching the generated code. An interpreter
93    could be used if available. */
94 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
95                              TranslationBlock *orig_tb)
96 {
97     CPUState *cpu = ENV_GET_CPU(env);
98     TranslationBlock *tb;
99
100     /* Should never happen.
101        We only end up here when an existing TB is too long.  */
102     if (max_cycles > CF_COUNT_MASK)
103         max_cycles = CF_COUNT_MASK;
104
105     tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
106                      max_cycles);
107     cpu->current_tb = tb;
108     /* execute the generated code */
109     cpu_tb_exec(cpu, tb->tc_ptr);
110     cpu->current_tb = NULL;
111     tb_phys_invalidate(tb, -1);
112     tb_free(tb);
113 }
114
115 static TranslationBlock *tb_find_slow(CPUArchState *env,
116                                       target_ulong pc,
117                                       target_ulong cs_base,
118                                       uint64_t flags)
119 {
120     CPUState *cpu = ENV_GET_CPU(env);
121     TranslationBlock *tb, **ptb1;
122     unsigned int h;
123     tb_page_addr_t phys_pc, phys_page1;
124     target_ulong virt_page2;
125
126     tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
127
128     /* find translated block using physical mappings */
129     phys_pc = get_page_addr_code(env, pc);
130     phys_page1 = phys_pc & TARGET_PAGE_MASK;
131     h = tb_phys_hash_func(phys_pc);
132     ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
133     for(;;) {
134         tb = *ptb1;
135         if (!tb)
136             goto not_found;
137         if (tb->pc == pc &&
138             tb->page_addr[0] == phys_page1 &&
139             tb->cs_base == cs_base &&
140             tb->flags == flags) {
141             /* check next page if needed */
142             if (tb->page_addr[1] != -1) {
143                 tb_page_addr_t phys_page2;
144
145                 virt_page2 = (pc & TARGET_PAGE_MASK) +
146                     TARGET_PAGE_SIZE;
147                 phys_page2 = get_page_addr_code(env, virt_page2);
148                 if (tb->page_addr[1] == phys_page2)
149                     goto found;
150             } else {
151                 goto found;
152             }
153         }
154         ptb1 = &tb->phys_hash_next;
155     }
156  not_found:
157    /* if no translated code available, then translate it now */
158     tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
159
160  found:
161     /* Move the last found TB to the head of the list */
162     if (likely(*ptb1)) {
163         *ptb1 = tb->phys_hash_next;
164         tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
165         tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
166     }
167     /* we add the TB in the virtual pc hash table */
168     cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
169     return tb;
170 }
171
172 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
173 {
174     CPUState *cpu = ENV_GET_CPU(env);
175     TranslationBlock *tb;
176     target_ulong cs_base, pc;
177     int flags;
178
179     /* we record a subset of the CPU state. It will
180        always be the same before a given translated block
181        is executed. */
182     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
183     tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
184     if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
185                  tb->flags != flags)) {
186         tb = tb_find_slow(env, pc, cs_base, flags);
187     }
188     return tb;
189 }
190
191 static CPUDebugExcpHandler *debug_excp_handler;
192
193 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
194 {
195     debug_excp_handler = handler;
196 }
197
198 static void cpu_handle_debug_exception(CPUArchState *env)
199 {
200     CPUState *cpu = ENV_GET_CPU(env);
201     CPUWatchpoint *wp;
202
203     if (!cpu->watchpoint_hit) {
204         QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
205             wp->flags &= ~BP_WATCHPOINT_HIT;
206         }
207     }
208     if (debug_excp_handler) {
209         debug_excp_handler(env);
210     }
211 }
212
213 /* main execution loop */
214
215 volatile sig_atomic_t exit_request;
216
217 static int need_handle_intr_request(CPUState *cpu)
218 {
219     return cpu->interrupt_request;
220 }
221
222
223 int cpu_exec(CPUArchState *env)
224 {
225 #ifdef CONFIG_HAX
226     assert(0);
227 #else
228     CPUState *cpu = ENV_GET_CPU(env);
229 #if !(defined(CONFIG_USER_ONLY) && \
230       (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
231     CPUClass *cc = CPU_GET_CLASS(cpu);
232 #endif
233 #ifdef TARGET_I386
234     X86CPU *x86_cpu = X86_CPU(cpu);
235 #endif
236     int ret, interrupt_request;
237     TranslationBlock *tb;
238     uint8_t *tc_ptr;
239     uintptr_t next_tb;
240     /* This must be volatile so it is not trashed by longjmp() */
241     volatile bool have_tb_lock = false;
242
243     if (cpu->halted) {
244         if (!cpu_has_work(cpu)) {
245             return EXCP_HALTED;
246         }
247
248         cpu->halted = 0;
249     }
250
251     current_cpu = cpu;
252
253     /* As long as current_cpu is null, up to the assignment just above,
254      * requests by other threads to exit the execution loop are expected to
255      * be issued using the exit_request global. We must make sure that our
256      * evaluation of the global value is performed past the current_cpu
257      * value transition point, which requires a memory barrier as well as
258      * an instruction scheduling constraint on modern architectures.  */
259     smp_mb();
260
261     if (unlikely(exit_request)) {
262         cpu->exit_request = 1;
263     }
264
265 #if defined(TARGET_I386)
266     /* put eflags in CPU temporary format */
267     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
268     env->df = 1 - (2 * ((env->eflags >> 10) & 1));
269     CC_OP = CC_OP_EFLAGS;
270     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
271 #elif defined(TARGET_SPARC)
272 #elif defined(TARGET_M68K)
273     env->cc_op = CC_OP_FLAGS;
274     env->cc_dest = env->sr & 0xf;
275     env->cc_x = (env->sr >> 4) & 1;
276 #elif defined(TARGET_ALPHA)
277 #elif defined(TARGET_ARM)
278 #elif defined(TARGET_UNICORE32)
279 #elif defined(TARGET_PPC)
280     env->reserve_addr = -1;
281 #elif defined(TARGET_LM32)
282 #elif defined(TARGET_MICROBLAZE)
283 #elif defined(TARGET_MIPS)
284 #elif defined(TARGET_MOXIE)
285 #elif defined(TARGET_OPENRISC)
286 #elif defined(TARGET_SH4)
287 #elif defined(TARGET_CRIS)
288 #elif defined(TARGET_S390X)
289 #elif defined(TARGET_XTENSA)
290     /* XXXXX */
291 #else
292 #error unsupported target CPU
293 #endif
294     cpu->exception_index = -1;
295
296     /* prepare setjmp context for exception handling */
297     for(;;) {
298         if (sigsetjmp(cpu->jmp_env, 0) == 0) {
299             /* if an exception is pending, we execute it here */
300             if (cpu->exception_index >= 0) {
301                 if (cpu->exception_index >= EXCP_INTERRUPT) {
302                     /* exit request from the cpu execution loop */
303                     ret = cpu->exception_index;
304                     if (ret == EXCP_DEBUG) {
305                         cpu_handle_debug_exception(env);
306                     }
307                     break;
308                 } else {
309 #if defined(CONFIG_USER_ONLY)
310                     /* if user mode only, we simulate a fake exception
311                        which will be handled outside the cpu execution
312                        loop */
313 #if defined(TARGET_I386)
314                     cc->do_interrupt(cpu);
315 #endif
316                     ret = cpu->exception_index;
317                     break;
318 #else
319                     cc->do_interrupt(cpu);
320                     cpu->exception_index = -1;
321 #endif
322                 }
323             }
324
325             next_tb = 0; /* force lookup of first TB */
326             for(;;) {
327                 interrupt_request = need_handle_intr_request(cpu);
328                 if (unlikely(interrupt_request)) {
329                     if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
330                         /* Mask out external interrupts for this step. */
331                         interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
332                     }
333                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
334                         cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
335                         cpu->exception_index = EXCP_DEBUG;
336                         cpu_loop_exit(cpu);
337                     }
338 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
339     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
340     defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
341                     if (interrupt_request & CPU_INTERRUPT_HALT) {
342                         cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
343                         cpu->halted = 1;
344                         cpu->exception_index = EXCP_HLT;
345                         cpu_loop_exit(cpu);
346                     }
347 #endif
348 #if defined(TARGET_I386)
349 #if !defined(CONFIG_USER_ONLY)
350                     if (interrupt_request & CPU_INTERRUPT_POLL) {
351                         cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
352                         apic_poll_irq(x86_cpu->apic_state);
353                     }
354 #endif
355                     if (interrupt_request & CPU_INTERRUPT_INIT) {
356                             cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
357                                                           0);
358                             do_cpu_init(x86_cpu);
359                             cpu->exception_index = EXCP_HALTED;
360                             cpu_loop_exit(cpu);
361                     } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
362                             do_cpu_sipi(x86_cpu);
363                     } else if (env->hflags2 & HF2_GIF_MASK) {
364                         if ((interrupt_request & CPU_INTERRUPT_SMI) &&
365                             !(env->hflags & HF_SMM_MASK)) {
366                             cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
367                                                           0);
368                             cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
369                             do_smm_enter(x86_cpu);
370                             next_tb = 0;
371                         } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
372                                    !(env->hflags2 & HF2_NMI_MASK)) {
373                             cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
374                             env->hflags2 |= HF2_NMI_MASK;
375                             do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
376                             next_tb = 0;
377                         } else if (interrupt_request & CPU_INTERRUPT_MCE) {
378                             cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
379                             do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
380                             next_tb = 0;
381                         } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
382                                    (((env->hflags2 & HF2_VINTR_MASK) && 
383                                      (env->hflags2 & HF2_HIF_MASK)) ||
384                                     (!(env->hflags2 & HF2_VINTR_MASK) && 
385                                      (env->eflags & IF_MASK && 
386                                       !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
387                             int intno;
388                             cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
389                                                           0);
390                             cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
391                                                         CPU_INTERRUPT_VIRQ);
392                             intno = cpu_get_pic_interrupt(env);
393                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
394                             do_interrupt_x86_hardirq(env, intno, 1);
395                             /* ensure that no TB jump will be modified as
396                                the program flow was changed */
397                             next_tb = 0;
398 #if !defined(CONFIG_USER_ONLY)
399                         } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
400                                    (env->eflags & IF_MASK) && 
401                                    !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
402                             int intno;
403                             /* FIXME: this should respect TPR */
404                             cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
405                                                           0);
406                             intno = ldl_phys(cpu->as,
407                                              env->vm_vmcb
408                                              + offsetof(struct vmcb,
409                                                         control.int_vector));
410                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
411                             do_interrupt_x86_hardirq(env, intno, 1);
412                             cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
413                             next_tb = 0;
414 #endif
415                         }
416                     }
417 #elif defined(TARGET_PPC)
418                     if ((interrupt_request & CPU_INTERRUPT_RESET)) {
419                         cpu_reset(cpu);
420                     }
421                     if (interrupt_request & CPU_INTERRUPT_HARD) {
422                         ppc_hw_interrupt(env);
423                         if (env->pending_interrupts == 0) {
424                             cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
425                         }
426                         next_tb = 0;
427                     }
428 #elif defined(TARGET_LM32)
429                     if ((interrupt_request & CPU_INTERRUPT_HARD)
430                         && (env->ie & IE_IE)) {
431                         cpu->exception_index = EXCP_IRQ;
432                         cc->do_interrupt(cpu);
433                         next_tb = 0;
434                     }
435 #elif defined(TARGET_MICROBLAZE)
436                     if ((interrupt_request & CPU_INTERRUPT_HARD)
437                         && (env->sregs[SR_MSR] & MSR_IE)
438                         && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
439                         && !(env->iflags & (D_FLAG | IMM_FLAG))) {
440                         cpu->exception_index = EXCP_IRQ;
441                         cc->do_interrupt(cpu);
442                         next_tb = 0;
443                     }
444 #elif defined(TARGET_MIPS)
445                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
446                         cpu_mips_hw_interrupts_pending(env)) {
447                         /* Raise it */
448                         cpu->exception_index = EXCP_EXT_INTERRUPT;
449                         env->error_code = 0;
450                         cc->do_interrupt(cpu);
451                         next_tb = 0;
452                     }
453 #elif defined(TARGET_OPENRISC)
454                     {
455                         int idx = -1;
456                         if ((interrupt_request & CPU_INTERRUPT_HARD)
457                             && (env->sr & SR_IEE)) {
458                             idx = EXCP_INT;
459                         }
460                         if ((interrupt_request & CPU_INTERRUPT_TIMER)
461                             && (env->sr & SR_TEE)) {
462                             idx = EXCP_TICK;
463                         }
464                         if (idx >= 0) {
465                             cpu->exception_index = idx;
466                             cc->do_interrupt(cpu);
467                             next_tb = 0;
468                         }
469                     }
470 #elif defined(TARGET_SPARC)
471                     if (interrupt_request & CPU_INTERRUPT_HARD) {
472                         if (cpu_interrupts_enabled(env) &&
473                             env->interrupt_index > 0) {
474                             int pil = env->interrupt_index & 0xf;
475                             int type = env->interrupt_index & 0xf0;
476
477                             if (((type == TT_EXTINT) &&
478                                   cpu_pil_allowed(env, pil)) ||
479                                   type != TT_EXTINT) {
480                                 cpu->exception_index = env->interrupt_index;
481                                 cc->do_interrupt(cpu);
482                                 next_tb = 0;
483                             }
484                         }
485                     }
486 #elif defined(TARGET_ARM)
487                     if (interrupt_request & CPU_INTERRUPT_FIQ
488                         && !(env->daif & PSTATE_F)) {
489                         cpu->exception_index = EXCP_FIQ;
490                         cc->do_interrupt(cpu);
491                         next_tb = 0;
492                     }
493                     /* ARMv7-M interrupt return works by loading a magic value
494                        into the PC.  On real hardware the load causes the
495                        return to occur.  The qemu implementation performs the
496                        jump normally, then does the exception return when the
497                        CPU tries to execute code at the magic address.
498                        This will cause the magic PC value to be pushed to
499                        the stack if an interrupt occurred at the wrong time.
500                        We avoid this by disabling interrupts when
501                        pc contains a magic address.  */
502                     if (interrupt_request & CPU_INTERRUPT_HARD
503                         && ((IS_M(env) && env->regs[15] < 0xfffffff0)
504                             || !(env->daif & PSTATE_I))) {
505                         cpu->exception_index = EXCP_IRQ;
506                         cc->do_interrupt(cpu);
507                         next_tb = 0;
508                     }
509 #elif defined(TARGET_UNICORE32)
510                     if (interrupt_request & CPU_INTERRUPT_HARD
511                         && !(env->uncached_asr & ASR_I)) {
512                         cpu->exception_index = UC32_EXCP_INTR;
513                         cc->do_interrupt(cpu);
514                         next_tb = 0;
515                     }
516 #elif defined(TARGET_SH4)
517                     if (interrupt_request & CPU_INTERRUPT_HARD) {
518                         cc->do_interrupt(cpu);
519                         next_tb = 0;
520                     }
521 #elif defined(TARGET_ALPHA)
522                     {
523                         int idx = -1;
524                         /* ??? This hard-codes the OSF/1 interrupt levels.  */
525                         switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
526                         case 0 ... 3:
527                             if (interrupt_request & CPU_INTERRUPT_HARD) {
528                                 idx = EXCP_DEV_INTERRUPT;
529                             }
530                             /* FALLTHRU */
531                         case 4:
532                             if (interrupt_request & CPU_INTERRUPT_TIMER) {
533                                 idx = EXCP_CLK_INTERRUPT;
534                             }
535                             /* FALLTHRU */
536                         case 5:
537                             if (interrupt_request & CPU_INTERRUPT_SMP) {
538                                 idx = EXCP_SMP_INTERRUPT;
539                             }
540                             /* FALLTHRU */
541                         case 6:
542                             if (interrupt_request & CPU_INTERRUPT_MCHK) {
543                                 idx = EXCP_MCHK;
544                             }
545                         }
546                         if (idx >= 0) {
547                             cpu->exception_index = idx;
548                             env->error_code = 0;
549                             cc->do_interrupt(cpu);
550                             next_tb = 0;
551                         }
552                     }
553 #elif defined(TARGET_CRIS)
554                     if (interrupt_request & CPU_INTERRUPT_HARD
555                         && (env->pregs[PR_CCS] & I_FLAG)
556                         && !env->locked_irq) {
557                         cpu->exception_index = EXCP_IRQ;
558                         cc->do_interrupt(cpu);
559                         next_tb = 0;
560                     }
561                     if (interrupt_request & CPU_INTERRUPT_NMI) {
562                         unsigned int m_flag_archval;
563                         if (env->pregs[PR_VR] < 32) {
564                             m_flag_archval = M_FLAG_V10;
565                         } else {
566                             m_flag_archval = M_FLAG_V32;
567                         }
568                         if ((env->pregs[PR_CCS] & m_flag_archval)) {
569                             cpu->exception_index = EXCP_NMI;
570                             cc->do_interrupt(cpu);
571                             next_tb = 0;
572                         }
573                     }
574 #elif defined(TARGET_M68K)
575                     if (interrupt_request & CPU_INTERRUPT_HARD
576                         && ((env->sr & SR_I) >> SR_I_SHIFT)
577                             < env->pending_level) {
578                         /* Real hardware gets the interrupt vector via an
579                            IACK cycle at this point.  Current emulated
580                            hardware doesn't rely on this, so we
581                            provide/save the vector when the interrupt is
582                            first signalled.  */
583                         cpu->exception_index = env->pending_vector;
584                         do_interrupt_m68k_hardirq(env);
585                         next_tb = 0;
586                     }
587 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
588                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
589                         (env->psw.mask & PSW_MASK_EXT)) {
590                         cc->do_interrupt(cpu);
591                         next_tb = 0;
592                     }
593 #elif defined(TARGET_XTENSA)
594                     if (interrupt_request & CPU_INTERRUPT_HARD) {
595                         cpu->exception_index = EXC_IRQ;
596                         cc->do_interrupt(cpu);
597                         next_tb = 0;
598                     }
599 #endif
600                    /* Don't use the cached interrupt_request value,
601                       do_interrupt may have updated the EXITTB flag. */
602                     if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
603                         cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
604                         /* ensure that no TB jump will be modified as
605                            the program flow was changed */
606                         next_tb = 0;
607                     }
608                 }
609                 if (unlikely(cpu->exit_request)) {
610                     cpu->exit_request = 0;
611                     cpu->exception_index = EXCP_INTERRUPT;
612                     cpu_loop_exit(cpu);
613                 }
614                 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
615                 have_tb_lock = true;
616                 tb = tb_find_fast(env);
617                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
618                    doing it in tb_find_slow */
619                 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
620                     /* as some TB could have been invalidated because
621                        of memory exceptions while generating the code, we
622                        must recompute the hash index here */
623                     next_tb = 0;
624                     tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
625                 }
626                 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
627                     qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
628                              tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
629                 }
630                 /* see if we can patch the calling TB. When the TB
631                    spans two pages, we cannot safely do a direct
632                    jump. */
633                 if (next_tb != 0 && tb->page_addr[1] == -1) {
634                     tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
635                                 next_tb & TB_EXIT_MASK, tb);
636                 }
637                 have_tb_lock = false;
638                 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
639
640                 /* cpu_interrupt might be called while translating the
641                    TB, but before it is linked into a potentially
642                    infinite loop and becomes env->current_tb. Avoid
643                    starting execution if there is a pending interrupt. */
644                 cpu->current_tb = tb;
645                 barrier();
646                 if (likely(!cpu->exit_request)) {
647                     tc_ptr = tb->tc_ptr;
648                     /* execute the generated code */
649                     next_tb = cpu_tb_exec(cpu, tc_ptr);
650                     switch (next_tb & TB_EXIT_MASK) {
651                     case TB_EXIT_REQUESTED:
652                         /* Something asked us to stop executing
653                          * chained TBs; just continue round the main
654                          * loop. Whatever requested the exit will also
655                          * have set something else (eg exit_request or
656                          * interrupt_request) which we will handle
657                          * next time around the loop.
658                          */
659                         tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
660                         next_tb = 0;
661                         break;
662                     case TB_EXIT_ICOUNT_EXPIRED:
663                     {
664                         /* Instruction counter expired.  */
665                         int insns_left;
666                         tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
667                         insns_left = cpu->icount_decr.u32;
668                         if (cpu->icount_extra && insns_left >= 0) {
669                             /* Refill decrementer and continue execution.  */
670                             cpu->icount_extra += insns_left;
671                             if (cpu->icount_extra > 0xffff) {
672                                 insns_left = 0xffff;
673                             } else {
674                                 insns_left = cpu->icount_extra;
675                             }
676                             cpu->icount_extra -= insns_left;
677                             cpu->icount_decr.u16.low = insns_left;
678                         } else {
679                             if (insns_left > 0) {
680                                 /* Execute remaining instructions.  */
681                                 cpu_exec_nocache(env, insns_left, tb);
682                             }
683                             cpu->exception_index = EXCP_INTERRUPT;
684                             next_tb = 0;
685                             cpu_loop_exit(cpu);
686                         }
687                         break;
688                     }
689                     default:
690                         break;
691                     }
692                 }
693                 cpu->current_tb = NULL;
694                 /* reset soft MMU for next block (it can currently
695                    only be set by a memory fault) */
696             } /* for(;;) */
697         } else {
698             /* Reload env after longjmp - the compiler may have smashed all
699              * local variables as longjmp is marked 'noreturn'. */
700             cpu = current_cpu;
701             env = cpu->env_ptr;
702 #if !(defined(CONFIG_USER_ONLY) && \
703       (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
704             cc = CPU_GET_CLASS(cpu);
705 #endif
706 #ifdef TARGET_I386
707             x86_cpu = X86_CPU(cpu);
708 #endif
709             if (have_tb_lock) {
710                 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
711                 have_tb_lock = false;
712             }
713         }
714     } /* for(;;) */
715
716
717 #if defined(TARGET_I386)
718     /* restore flags in standard format */
719     env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
720         | (env->df & DF_MASK);
721 #elif defined(TARGET_ARM)
722     /* XXX: Save/restore host fpu exception state?.  */
723 #elif defined(TARGET_UNICORE32)
724 #elif defined(TARGET_SPARC)
725 #elif defined(TARGET_PPC)
726 #elif defined(TARGET_LM32)
727 #elif defined(TARGET_M68K)
728     cpu_m68k_flush_flags(env, env->cc_op);
729     env->cc_op = CC_OP_FLAGS;
730     env->sr = (env->sr & 0xffe0)
731               | env->cc_dest | (env->cc_x << 4);
732 #elif defined(TARGET_MICROBLAZE)
733 #elif defined(TARGET_MIPS)
734 #elif defined(TARGET_MOXIE)
735 #elif defined(TARGET_OPENRISC)
736 #elif defined(TARGET_SH4)
737 #elif defined(TARGET_ALPHA)
738 #elif defined(TARGET_CRIS)
739 #elif defined(TARGET_S390X)
740 #elif defined(TARGET_XTENSA)
741     /* XXXXX */
742 #else
743 #error unsupported target CPU
744 #endif
745
746     /* fail safe : never use current_cpu outside cpu_exec() */
747     current_cpu = NULL;
748     return ret;
749 }