2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #if !defined(CONFIG_SOFTMMU)
35 #include <sys/ucontext.h>
38 int tb_invalidated_flag;
41 //#define DEBUG_SIGNAL
43 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_M68K) || \
45 /* XXX: unify with i386 target */
46 void cpu_loop_exit(void)
48 longjmp(env->jmp_env, 1);
51 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
55 /* exit the current TB from a signal handler. The host registers are
56 restored in a state compatible with the CPU emulator
58 void cpu_resume_from_signal(CPUState *env1, void *puc)
60 #if !defined(CONFIG_SOFTMMU)
61 struct ucontext *uc = puc;
66 /* XXX: restore cpu registers saved in host registers */
68 #if !defined(CONFIG_SOFTMMU)
70 /* XXX: use siglongjmp ? */
71 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
74 longjmp(env->jmp_env, 1);
78 static TranslationBlock *tb_find_slow(target_ulong pc,
82 TranslationBlock *tb, **ptb1;
85 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
90 tb_invalidated_flag = 0;
92 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
94 /* find translated block using physical mappings */
95 phys_pc = get_phys_addr_code(env, pc);
96 phys_page1 = phys_pc & TARGET_PAGE_MASK;
98 h = tb_phys_hash_func(phys_pc);
99 ptb1 = &tb_phys_hash[h];
105 tb->page_addr[0] == phys_page1 &&
106 tb->cs_base == cs_base &&
107 tb->flags == flags) {
108 /* check next page if needed */
109 if (tb->page_addr[1] != -1) {
110 virt_page2 = (pc & TARGET_PAGE_MASK) +
112 phys_page2 = get_phys_addr_code(env, virt_page2);
113 if (tb->page_addr[1] == phys_page2)
119 ptb1 = &tb->phys_hash_next;
122 /* if no translated code available, then translate it now */
125 /* flush must be done */
127 /* cannot fail at this point */
129 /* don't forget to invalidate previous TB info */
130 tb_invalidated_flag = 1;
132 tc_ptr = code_gen_ptr;
134 tb->cs_base = cs_base;
136 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
137 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
139 /* check next page if needed */
140 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
142 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
143 phys_page2 = get_phys_addr_code(env, virt_page2);
145 tb_link_phys(tb, phys_pc, phys_page2);
148 /* we add the TB in the virtual pc hash table */
149 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
150 spin_unlock(&tb_lock);
154 static inline TranslationBlock *tb_find_fast(void)
156 TranslationBlock *tb;
157 target_ulong cs_base, pc;
160 /* we record a subset of the CPU state. It will
161 always be the same before a given translated block
163 #if defined(TARGET_I386)
165 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
166 cs_base = env->segs[R_CS].base;
167 pc = cs_base + env->eip;
168 #elif defined(TARGET_ARM)
169 flags = env->thumb | (env->vfp.vec_len << 1)
170 | (env->vfp.vec_stride << 4);
171 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
173 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
177 #elif defined(TARGET_SPARC)
178 #ifdef TARGET_SPARC64
179 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
180 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
181 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
183 // FPU enable . MMU enabled . MMU no-fault . Supervisor
184 flags = (env->psref << 3) | ((env->mmuregs[0] & (MMU_E | MMU_NF)) << 1)
189 #elif defined(TARGET_PPC)
190 flags = (msr_pr << MSR_PR) | (msr_fp << MSR_FP) |
191 (msr_se << MSR_SE) | (msr_le << MSR_LE);
194 #elif defined(TARGET_MIPS)
195 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
198 #elif defined(TARGET_M68K)
199 flags = (env->fpcr & M68K_FPCR_PREC) | (env->sr & SR_S);
202 #elif defined(TARGET_SH4)
203 flags = env->sr & (SR_MD | SR_RB);
204 cs_base = 0; /* XXXXX */
206 #elif defined(TARGET_ALPHA)
211 #error unsupported CPU
213 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
214 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
215 tb->flags != flags, 0)) {
216 tb = tb_find_slow(pc, cs_base, flags);
217 /* Note: we do it here to avoid a gcc bug on Mac OS X when
218 doing it in tb_find_slow */
219 if (tb_invalidated_flag) {
220 /* as some TB could have been invalidated because
221 of memory exceptions while generating the code, we
222 must recompute the hash index here */
230 /* main execution loop */
232 int cpu_exec(CPUState *env1)
234 #define DECLARE_HOST_REGS 1
235 #include "hostregs_helper.h"
236 #if defined(TARGET_SPARC)
237 #if defined(reg_REGWPTR)
238 uint32_t *saved_regwptr;
241 #if defined(__sparc__) && !defined(HOST_SOLARIS)
245 int ret, interrupt_request;
246 void (*gen_func)(void);
247 TranslationBlock *tb;
250 #if defined(TARGET_I386)
251 /* handle exit of HALTED state */
252 if (env1->hflags & HF_HALTED_MASK) {
253 /* disable halt condition */
254 if ((env1->interrupt_request & CPU_INTERRUPT_HARD) &&
255 (env1->eflags & IF_MASK)) {
256 env1->hflags &= ~HF_HALTED_MASK;
261 #elif defined(TARGET_PPC)
263 if (env1->msr[MSR_EE] &&
264 (env1->interrupt_request & CPU_INTERRUPT_HARD)) {
270 #elif defined(TARGET_SPARC)
272 if ((env1->interrupt_request & CPU_INTERRUPT_HARD) &&
273 (env1->psret != 0)) {
279 #elif defined(TARGET_ARM)
281 /* An interrupt wakes the CPU even if the I and F CPSR bits are
282 set. We use EXITTB to silently wake CPU without causing an
284 if (env1->interrupt_request &
285 (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB)) {
291 #elif defined(TARGET_MIPS)
293 if (env1->interrupt_request &
294 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER)) {
300 #elif defined(TARGET_ALPHA) || defined(TARGET_M68K)
302 if (env1->interrupt_request & CPU_INTERRUPT_HARD) {
310 cpu_single_env = env1;
312 /* first we save global registers */
313 #define SAVE_HOST_REGS 1
314 #include "hostregs_helper.h"
316 #if defined(__sparc__) && !defined(HOST_SOLARIS)
317 /* we also save i7 because longjmp may not restore it */
318 asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
321 #if defined(TARGET_I386)
323 /* put eflags in CPU temporary format */
324 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
325 DF = 1 - (2 * ((env->eflags >> 10) & 1));
326 CC_OP = CC_OP_EFLAGS;
327 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
328 #elif defined(TARGET_ARM)
329 #elif defined(TARGET_SPARC)
330 #if defined(reg_REGWPTR)
331 saved_regwptr = REGWPTR;
333 #elif defined(TARGET_PPC)
334 #elif defined(TARGET_M68K)
335 env->cc_op = CC_OP_FLAGS;
336 env->cc_dest = env->sr & 0xf;
337 env->cc_x = (env->sr >> 4) & 1;
338 #elif defined(TARGET_MIPS)
339 #elif defined(TARGET_SH4)
341 #elif defined(TARGET_ALPHA)
344 #error unsupported target CPU
346 env->exception_index = -1;
348 /* prepare setjmp context for exception handling */
350 if (setjmp(env->jmp_env) == 0) {
351 env->current_tb = NULL;
352 /* if an exception is pending, we execute it here */
353 if (env->exception_index >= 0) {
354 if (env->exception_index >= EXCP_INTERRUPT) {
355 /* exit request from the cpu execution loop */
356 ret = env->exception_index;
358 } else if (env->user_mode_only) {
359 /* if user mode only, we simulate a fake exception
360 which will be handled outside the cpu execution
362 #if defined(TARGET_I386)
363 do_interrupt_user(env->exception_index,
364 env->exception_is_int,
366 env->exception_next_eip);
368 ret = env->exception_index;
371 #if defined(TARGET_I386)
372 /* simulate a real cpu exception. On i386, it can
373 trigger new exceptions, but we do not handle
374 double or triple faults yet. */
375 do_interrupt(env->exception_index,
376 env->exception_is_int,
378 env->exception_next_eip, 0);
379 /* successfully delivered */
380 env->old_exception = -1;
381 #elif defined(TARGET_PPC)
383 #elif defined(TARGET_MIPS)
385 #elif defined(TARGET_SPARC)
386 do_interrupt(env->exception_index);
387 #elif defined(TARGET_ARM)
389 #elif defined(TARGET_SH4)
391 #elif defined(TARGET_ALPHA)
393 #elif defined(TARGET_M68K)
397 env->exception_index = -1;
400 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
402 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
403 ret = kqemu_cpu_exec(env);
404 /* put eflags in CPU temporary format */
405 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
406 DF = 1 - (2 * ((env->eflags >> 10) & 1));
407 CC_OP = CC_OP_EFLAGS;
408 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
411 longjmp(env->jmp_env, 1);
412 } else if (ret == 2) {
413 /* softmmu execution needed */
415 if (env->interrupt_request != 0) {
416 /* hardware interrupt will be executed just after */
418 /* otherwise, we restart */
419 longjmp(env->jmp_env, 1);
425 T0 = 0; /* force lookup of first TB */
427 #if defined(__sparc__) && !defined(HOST_SOLARIS)
428 /* g1 can be modified by some libc? functions */
431 interrupt_request = env->interrupt_request;
432 if (__builtin_expect(interrupt_request, 0)) {
433 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
434 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
435 env->exception_index = EXCP_DEBUG;
438 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
439 defined(TARGET_PPC) || defined(TARGET_ALPHA)
440 if (interrupt_request & CPU_INTERRUPT_HALT) {
441 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
443 env->exception_index = EXCP_HLT;
447 #if defined(TARGET_I386)
448 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
449 !(env->hflags & HF_SMM_MASK)) {
450 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
452 #if defined(__sparc__) && !defined(HOST_SOLARIS)
457 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
458 (env->eflags & IF_MASK) &&
459 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
461 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
462 intno = cpu_get_pic_interrupt(env);
463 if (loglevel & CPU_LOG_TB_IN_ASM) {
464 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
466 do_interrupt(intno, 0, 0, 0, 1);
467 /* ensure that no TB jump will be modified as
468 the program flow was changed */
469 #if defined(__sparc__) && !defined(HOST_SOLARIS)
475 #elif defined(TARGET_PPC)
477 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
481 if (interrupt_request & CPU_INTERRUPT_HARD) {
482 ppc_hw_interrupt(env);
483 if (env->pending_interrupts == 0)
484 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
485 #if defined(__sparc__) && !defined(HOST_SOLARIS)
491 #elif defined(TARGET_MIPS)
492 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
493 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
494 (env->CP0_Status & (1 << CP0St_IE)) &&
495 !(env->CP0_Status & (1 << CP0St_EXL)) &&
496 !(env->CP0_Status & (1 << CP0St_ERL)) &&
497 !(env->hflags & MIPS_HFLAG_DM)) {
499 env->exception_index = EXCP_EXT_INTERRUPT;
502 #if defined(__sparc__) && !defined(HOST_SOLARIS)
508 #elif defined(TARGET_SPARC)
509 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
511 int pil = env->interrupt_index & 15;
512 int type = env->interrupt_index & 0xf0;
514 if (((type == TT_EXTINT) &&
515 (pil == 15 || pil > env->psrpil)) ||
517 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
518 do_interrupt(env->interrupt_index);
519 env->interrupt_index = 0;
520 #if defined(__sparc__) && !defined(HOST_SOLARIS)
526 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
527 //do_interrupt(0, 0, 0, 0, 0);
528 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
530 #elif defined(TARGET_ARM)
531 if (interrupt_request & CPU_INTERRUPT_FIQ
532 && !(env->uncached_cpsr & CPSR_F)) {
533 env->exception_index = EXCP_FIQ;
536 if (interrupt_request & CPU_INTERRUPT_HARD
537 && !(env->uncached_cpsr & CPSR_I)) {
538 env->exception_index = EXCP_IRQ;
541 #elif defined(TARGET_SH4)
543 #elif defined(TARGET_ALPHA)
544 if (interrupt_request & CPU_INTERRUPT_HARD) {
547 #elif defined(TARGET_M68K)
548 if (interrupt_request & CPU_INTERRUPT_HARD
549 && ((env->sr & SR_I) >> SR_I_SHIFT)
550 < env->pending_level) {
551 /* Real hardware gets the interrupt vector via an
552 IACK cycle at this point. Current emulated
553 hardware doesn't rely on this, so we
554 provide/save the vector when the interrupt is
556 env->exception_index = env->pending_vector;
560 /* Don't use the cached interupt_request value,
561 do_interrupt may have updated the EXITTB flag. */
562 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
563 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
564 /* ensure that no TB jump will be modified as
565 the program flow was changed */
566 #if defined(__sparc__) && !defined(HOST_SOLARIS)
572 if (interrupt_request & CPU_INTERRUPT_EXIT) {
573 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
574 env->exception_index = EXCP_INTERRUPT;
579 if ((loglevel & CPU_LOG_TB_CPU)) {
580 #if defined(TARGET_I386)
581 /* restore flags in standard format */
583 env->regs[R_EAX] = EAX;
586 env->regs[R_EBX] = EBX;
589 env->regs[R_ECX] = ECX;
592 env->regs[R_EDX] = EDX;
595 env->regs[R_ESI] = ESI;
598 env->regs[R_EDI] = EDI;
601 env->regs[R_EBP] = EBP;
604 env->regs[R_ESP] = ESP;
606 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
607 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
608 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
609 #elif defined(TARGET_ARM)
610 cpu_dump_state(env, logfile, fprintf, 0);
611 #elif defined(TARGET_SPARC)
612 REGWPTR = env->regbase + (env->cwp * 16);
613 env->regwptr = REGWPTR;
614 cpu_dump_state(env, logfile, fprintf, 0);
615 #elif defined(TARGET_PPC)
616 cpu_dump_state(env, logfile, fprintf, 0);
617 #elif defined(TARGET_M68K)
618 cpu_m68k_flush_flags(env, env->cc_op);
619 env->cc_op = CC_OP_FLAGS;
620 env->sr = (env->sr & 0xffe0)
621 | env->cc_dest | (env->cc_x << 4);
622 cpu_dump_state(env, logfile, fprintf, 0);
623 #elif defined(TARGET_MIPS)
624 cpu_dump_state(env, logfile, fprintf, 0);
625 #elif defined(TARGET_SH4)
626 cpu_dump_state(env, logfile, fprintf, 0);
627 #elif defined(TARGET_ALPHA)
628 cpu_dump_state(env, logfile, fprintf, 0);
630 #error unsupported target CPU
636 if ((loglevel & CPU_LOG_EXEC)) {
637 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
638 (long)tb->tc_ptr, tb->pc,
639 lookup_symbol(tb->pc));
642 #if defined(__sparc__) && !defined(HOST_SOLARIS)
645 /* see if we can patch the calling TB. When the TB
646 spans two pages, we cannot safely do a direct
651 (env->kqemu_enabled != 2) &&
653 tb->page_addr[1] == -1
654 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
655 && (tb->cflags & CF_CODE_COPY) ==
656 (((TranslationBlock *)(T0 & ~3))->cflags & CF_CODE_COPY)
660 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
661 #if defined(USE_CODE_COPY)
662 /* propagates the FP use info */
663 ((TranslationBlock *)(T0 & ~3))->cflags |=
664 (tb->cflags & CF_FP_USED);
666 spin_unlock(&tb_lock);
670 env->current_tb = tb;
671 /* execute the generated code */
672 gen_func = (void *)tc_ptr;
673 #if defined(__sparc__)
674 __asm__ __volatile__("call %0\n\t"
678 : "i0", "i1", "i2", "i3", "i4", "i5",
679 "o0", "o1", "o2", "o3", "o4", "o5",
680 "l0", "l1", "l2", "l3", "l4", "l5",
682 #elif defined(__arm__)
683 asm volatile ("mov pc, %0\n\t"
684 ".global exec_loop\n\t"
688 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
689 #elif defined(TARGET_I386) && defined(USE_CODE_COPY)
691 if (!(tb->cflags & CF_CODE_COPY)) {
692 if ((tb->cflags & CF_FP_USED) && env->native_fp_regs) {
693 save_native_fp_state(env);
697 if ((tb->cflags & CF_FP_USED) && !env->native_fp_regs) {
698 restore_native_fp_state(env);
700 /* we work with native eflags */
701 CC_SRC = cc_table[CC_OP].compute_all();
702 CC_OP = CC_OP_EFLAGS;
703 asm(".globl exec_loop\n"
708 " fs movl %11, %%eax\n"
709 " andl $0x400, %%eax\n"
710 " fs orl %8, %%eax\n"
713 " fs movl %%esp, %12\n"
714 " fs movl %0, %%eax\n"
715 " fs movl %1, %%ecx\n"
716 " fs movl %2, %%edx\n"
717 " fs movl %3, %%ebx\n"
718 " fs movl %4, %%esp\n"
719 " fs movl %5, %%ebp\n"
720 " fs movl %6, %%esi\n"
721 " fs movl %7, %%edi\n"
724 " fs movl %%esp, %4\n"
725 " fs movl %12, %%esp\n"
726 " fs movl %%eax, %0\n"
727 " fs movl %%ecx, %1\n"
728 " fs movl %%edx, %2\n"
729 " fs movl %%ebx, %3\n"
730 " fs movl %%ebp, %5\n"
731 " fs movl %%esi, %6\n"
732 " fs movl %%edi, %7\n"
735 " movl %%eax, %%ecx\n"
736 " andl $0x400, %%ecx\n"
738 " andl $0x8d5, %%eax\n"
739 " fs movl %%eax, %8\n"
741 " subl %%ecx, %%eax\n"
742 " fs movl %%eax, %11\n"
743 " fs movl %9, %%ebx\n" /* get T0 value */
746 : "m" (*(uint8_t *)offsetof(CPUState, regs[0])),
747 "m" (*(uint8_t *)offsetof(CPUState, regs[1])),
748 "m" (*(uint8_t *)offsetof(CPUState, regs[2])),
749 "m" (*(uint8_t *)offsetof(CPUState, regs[3])),
750 "m" (*(uint8_t *)offsetof(CPUState, regs[4])),
751 "m" (*(uint8_t *)offsetof(CPUState, regs[5])),
752 "m" (*(uint8_t *)offsetof(CPUState, regs[6])),
753 "m" (*(uint8_t *)offsetof(CPUState, regs[7])),
754 "m" (*(uint8_t *)offsetof(CPUState, cc_src)),
755 "m" (*(uint8_t *)offsetof(CPUState, tmp0)),
757 "m" (*(uint8_t *)offsetof(CPUState, df)),
758 "m" (*(uint8_t *)offsetof(CPUState, saved_esp))
763 #elif defined(__ia64)
770 fp.gp = code_gen_buffer + 2 * (1 << 20);
771 (*(void (*)(void)) &fp)();
775 env->current_tb = NULL;
776 /* reset soft MMU for next block (it can currently
777 only be set by a memory fault) */
778 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
779 if (env->hflags & HF_SOFTMMU_MASK) {
780 env->hflags &= ~HF_SOFTMMU_MASK;
781 /* do not allow linking to another block */
785 #if defined(USE_KQEMU)
786 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
787 if (kqemu_is_ok(env) &&
788 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
799 #if defined(TARGET_I386)
800 #if defined(USE_CODE_COPY)
801 if (env->native_fp_regs) {
802 save_native_fp_state(env);
805 /* restore flags in standard format */
806 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
807 #elif defined(TARGET_ARM)
808 /* XXX: Save/restore host fpu exception state?. */
809 #elif defined(TARGET_SPARC)
810 #if defined(reg_REGWPTR)
811 REGWPTR = saved_regwptr;
813 #elif defined(TARGET_PPC)
814 #elif defined(TARGET_M68K)
815 cpu_m68k_flush_flags(env, env->cc_op);
816 env->cc_op = CC_OP_FLAGS;
817 env->sr = (env->sr & 0xffe0)
818 | env->cc_dest | (env->cc_x << 4);
819 #elif defined(TARGET_MIPS)
820 #elif defined(TARGET_SH4)
821 #elif defined(TARGET_ALPHA)
824 #error unsupported target CPU
827 /* restore global registers */
828 #if defined(__sparc__) && !defined(HOST_SOLARIS)
829 asm volatile ("mov %0, %%i7" : : "r" (saved_i7));
831 #include "hostregs_helper.h"
833 /* fail safe : never use cpu_single_env outside cpu_exec() */
834 cpu_single_env = NULL;
838 /* must only be called from the generated code as an exception can be
840 void tb_invalidate_page_range(target_ulong start, target_ulong end)
842 /* XXX: cannot enable it yet because it yields to MMU exception
843 where NIP != read address on PowerPC */
845 target_ulong phys_addr;
846 phys_addr = get_phys_addr_code(env, start);
847 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
851 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
853 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
855 CPUX86State *saved_env;
859 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
861 cpu_x86_load_seg_cache(env, seg_reg, selector,
862 (selector << 4), 0xffff, 0);
864 load_seg(seg_reg, selector);
869 void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
871 CPUX86State *saved_env;
876 helper_fsave((target_ulong)ptr, data32);
881 void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
883 CPUX86State *saved_env;
888 helper_frstor((target_ulong)ptr, data32);
893 #endif /* TARGET_I386 */
895 #if !defined(CONFIG_SOFTMMU)
897 #if defined(TARGET_I386)
899 /* 'pc' is the host PC at which the exception was raised. 'address' is
900 the effective address of the memory exception. 'is_write' is 1 if a
901 write caused the exception and otherwise 0'. 'old_set' is the
902 signal set which should be restored */
903 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
904 int is_write, sigset_t *old_set,
907 TranslationBlock *tb;
911 env = cpu_single_env; /* XXX: find a correct solution for multithread */
912 #if defined(DEBUG_SIGNAL)
913 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
914 pc, address, is_write, *(unsigned long *)old_set);
916 /* XXX: locking issue */
917 if (is_write && page_unprotect(h2g(address), pc, puc)) {
921 /* see if it is an MMU fault */
922 ret = cpu_x86_handle_mmu_fault(env, address, is_write,
923 ((env->hflags & HF_CPL_MASK) == 3), 0);
925 return 0; /* not an MMU fault */
927 return 1; /* the MMU fault was handled without causing real CPU fault */
928 /* now we have a real cpu fault */
931 /* the PC is inside the translated code. It means that we have
932 a virtual CPU fault */
933 cpu_restore_state(tb, env, pc, puc);
937 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
938 env->eip, env->cr[2], env->error_code);
940 /* we restore the process signal mask as the sigreturn should
941 do it (XXX: use sigsetjmp) */
942 sigprocmask(SIG_SETMASK, old_set, NULL);
943 raise_exception_err(env->exception_index, env->error_code);
945 /* activate soft MMU for this block */
946 env->hflags |= HF_SOFTMMU_MASK;
947 cpu_resume_from_signal(env, puc);
949 /* never comes here */
953 #elif defined(TARGET_ARM)
954 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
955 int is_write, sigset_t *old_set,
958 TranslationBlock *tb;
962 env = cpu_single_env; /* XXX: find a correct solution for multithread */
963 #if defined(DEBUG_SIGNAL)
964 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
965 pc, address, is_write, *(unsigned long *)old_set);
967 /* XXX: locking issue */
968 if (is_write && page_unprotect(h2g(address), pc, puc)) {
971 /* see if it is an MMU fault */
972 ret = cpu_arm_handle_mmu_fault(env, address, is_write, 1, 0);
974 return 0; /* not an MMU fault */
976 return 1; /* the MMU fault was handled without causing real CPU fault */
977 /* now we have a real cpu fault */
980 /* the PC is inside the translated code. It means that we have
981 a virtual CPU fault */
982 cpu_restore_state(tb, env, pc, puc);
984 /* we restore the process signal mask as the sigreturn should
985 do it (XXX: use sigsetjmp) */
986 sigprocmask(SIG_SETMASK, old_set, NULL);
989 #elif defined(TARGET_SPARC)
990 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
991 int is_write, sigset_t *old_set,
994 TranslationBlock *tb;
998 env = cpu_single_env; /* XXX: find a correct solution for multithread */
999 #if defined(DEBUG_SIGNAL)
1000 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1001 pc, address, is_write, *(unsigned long *)old_set);
1003 /* XXX: locking issue */
1004 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1007 /* see if it is an MMU fault */
1008 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, 1, 0);
1010 return 0; /* not an MMU fault */
1012 return 1; /* the MMU fault was handled without causing real CPU fault */
1013 /* now we have a real cpu fault */
1014 tb = tb_find_pc(pc);
1016 /* the PC is inside the translated code. It means that we have
1017 a virtual CPU fault */
1018 cpu_restore_state(tb, env, pc, puc);
1020 /* we restore the process signal mask as the sigreturn should
1021 do it (XXX: use sigsetjmp) */
1022 sigprocmask(SIG_SETMASK, old_set, NULL);
1025 #elif defined (TARGET_PPC)
1026 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1027 int is_write, sigset_t *old_set,
1030 TranslationBlock *tb;
1034 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1035 #if defined(DEBUG_SIGNAL)
1036 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1037 pc, address, is_write, *(unsigned long *)old_set);
1039 /* XXX: locking issue */
1040 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1044 /* see if it is an MMU fault */
1045 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
1047 return 0; /* not an MMU fault */
1049 return 1; /* the MMU fault was handled without causing real CPU fault */
1051 /* now we have a real cpu fault */
1052 tb = tb_find_pc(pc);
1054 /* the PC is inside the translated code. It means that we have
1055 a virtual CPU fault */
1056 cpu_restore_state(tb, env, pc, puc);
1060 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1061 env->nip, env->error_code, tb);
1063 /* we restore the process signal mask as the sigreturn should
1064 do it (XXX: use sigsetjmp) */
1065 sigprocmask(SIG_SETMASK, old_set, NULL);
1066 do_raise_exception_err(env->exception_index, env->error_code);
1068 /* activate soft MMU for this block */
1069 cpu_resume_from_signal(env, puc);
1071 /* never comes here */
1075 #elif defined(TARGET_M68K)
1076 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1077 int is_write, sigset_t *old_set,
1080 TranslationBlock *tb;
1084 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1085 #if defined(DEBUG_SIGNAL)
1086 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1087 pc, address, is_write, *(unsigned long *)old_set);
1089 /* XXX: locking issue */
1090 if (is_write && page_unprotect(address, pc, puc)) {
1093 /* see if it is an MMU fault */
1094 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, 1, 0);
1096 return 0; /* not an MMU fault */
1098 return 1; /* the MMU fault was handled without causing real CPU fault */
1099 /* now we have a real cpu fault */
1100 tb = tb_find_pc(pc);
1102 /* the PC is inside the translated code. It means that we have
1103 a virtual CPU fault */
1104 cpu_restore_state(tb, env, pc, puc);
1106 /* we restore the process signal mask as the sigreturn should
1107 do it (XXX: use sigsetjmp) */
1108 sigprocmask(SIG_SETMASK, old_set, NULL);
1110 /* never comes here */
1114 #elif defined (TARGET_MIPS)
1115 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1116 int is_write, sigset_t *old_set,
1119 TranslationBlock *tb;
1123 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1124 #if defined(DEBUG_SIGNAL)
1125 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1126 pc, address, is_write, *(unsigned long *)old_set);
1128 /* XXX: locking issue */
1129 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1133 /* see if it is an MMU fault */
1134 ret = cpu_mips_handle_mmu_fault(env, address, is_write, 1, 0);
1136 return 0; /* not an MMU fault */
1138 return 1; /* the MMU fault was handled without causing real CPU fault */
1140 /* now we have a real cpu fault */
1141 tb = tb_find_pc(pc);
1143 /* the PC is inside the translated code. It means that we have
1144 a virtual CPU fault */
1145 cpu_restore_state(tb, env, pc, puc);
1149 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1150 env->PC, env->error_code, tb);
1152 /* we restore the process signal mask as the sigreturn should
1153 do it (XXX: use sigsetjmp) */
1154 sigprocmask(SIG_SETMASK, old_set, NULL);
1155 do_raise_exception_err(env->exception_index, env->error_code);
1157 /* activate soft MMU for this block */
1158 cpu_resume_from_signal(env, puc);
1160 /* never comes here */
1164 #elif defined (TARGET_SH4)
1165 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1166 int is_write, sigset_t *old_set,
1169 TranslationBlock *tb;
1173 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1174 #if defined(DEBUG_SIGNAL)
1175 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1176 pc, address, is_write, *(unsigned long *)old_set);
1178 /* XXX: locking issue */
1179 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1183 /* see if it is an MMU fault */
1184 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, 1, 0);
1186 return 0; /* not an MMU fault */
1188 return 1; /* the MMU fault was handled without causing real CPU fault */
1190 /* now we have a real cpu fault */
1191 tb = tb_find_pc(pc);
1193 /* the PC is inside the translated code. It means that we have
1194 a virtual CPU fault */
1195 cpu_restore_state(tb, env, pc, puc);
1198 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1199 env->nip, env->error_code, tb);
1201 /* we restore the process signal mask as the sigreturn should
1202 do it (XXX: use sigsetjmp) */
1203 sigprocmask(SIG_SETMASK, old_set, NULL);
1205 /* never comes here */
1209 #elif defined (TARGET_ALPHA)
1210 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1211 int is_write, sigset_t *old_set,
1214 TranslationBlock *tb;
1218 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1219 #if defined(DEBUG_SIGNAL)
1220 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1221 pc, address, is_write, *(unsigned long *)old_set);
1223 /* XXX: locking issue */
1224 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1228 /* see if it is an MMU fault */
1229 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, 1, 0);
1231 return 0; /* not an MMU fault */
1233 return 1; /* the MMU fault was handled without causing real CPU fault */
1235 /* now we have a real cpu fault */
1236 tb = tb_find_pc(pc);
1238 /* the PC is inside the translated code. It means that we have
1239 a virtual CPU fault */
1240 cpu_restore_state(tb, env, pc, puc);
1243 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1244 env->nip, env->error_code, tb);
1246 /* we restore the process signal mask as the sigreturn should
1247 do it (XXX: use sigsetjmp) */
1248 sigprocmask(SIG_SETMASK, old_set, NULL);
1250 /* never comes here */
1254 #error unsupported target CPU
1257 #if defined(__i386__)
1259 #if defined(__APPLE__)
1260 # include <sys/ucontext.h>
1262 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1263 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1264 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1266 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1267 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1268 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1271 #if defined(USE_CODE_COPY)
1272 static void cpu_send_trap(unsigned long pc, int trap,
1273 struct ucontext *uc)
1275 TranslationBlock *tb;
1278 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1279 /* now we have a real cpu fault */
1280 tb = tb_find_pc(pc);
1282 /* the PC is inside the translated code. It means that we have
1283 a virtual CPU fault */
1284 cpu_restore_state(tb, env, pc, uc);
1286 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
1287 raise_exception_err(trap, env->error_code);
1291 int cpu_signal_handler(int host_signum, void *pinfo,
1294 siginfo_t *info = pinfo;
1295 struct ucontext *uc = puc;
1303 #define REG_TRAPNO TRAPNO
1306 trapno = TRAP_sig(uc);
1307 #if defined(TARGET_I386) && defined(USE_CODE_COPY)
1308 if (trapno == 0x00 || trapno == 0x05) {
1309 /* send division by zero or bound exception */
1310 cpu_send_trap(pc, trapno, uc);
1314 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1316 (ERROR_sig(uc) >> 1) & 1 : 0,
1317 &uc->uc_sigmask, puc);
1320 #elif defined(__x86_64__)
1322 int cpu_signal_handler(int host_signum, void *pinfo,
1325 siginfo_t *info = pinfo;
1326 struct ucontext *uc = puc;
1329 pc = uc->uc_mcontext.gregs[REG_RIP];
1330 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1331 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1332 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1333 &uc->uc_sigmask, puc);
1336 #elif defined(__powerpc__)
1338 /***********************************************************************
1339 * signal context platform-specific definitions
1343 /* All Registers access - only for local access */
1344 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1345 /* Gpr Registers access */
1346 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1347 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1348 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1349 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1350 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1351 # define LR_sig(context) REG_sig(link, context) /* Link register */
1352 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1353 /* Float Registers access */
1354 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1355 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1356 /* Exception Registers access */
1357 # define DAR_sig(context) REG_sig(dar, context)
1358 # define DSISR_sig(context) REG_sig(dsisr, context)
1359 # define TRAP_sig(context) REG_sig(trap, context)
1363 # include <sys/ucontext.h>
1364 typedef struct ucontext SIGCONTEXT;
1365 /* All Registers access - only for local access */
1366 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1367 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1368 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1369 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1370 /* Gpr Registers access */
1371 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1372 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1373 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1374 # define CTR_sig(context) REG_sig(ctr, context)
1375 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1376 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1377 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1378 /* Float Registers access */
1379 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1380 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1381 /* Exception Registers access */
1382 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1383 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1384 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1385 #endif /* __APPLE__ */
1387 int cpu_signal_handler(int host_signum, void *pinfo,
1390 siginfo_t *info = pinfo;
1391 struct ucontext *uc = puc;
1399 if (DSISR_sig(uc) & 0x00800000)
1402 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1405 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1406 is_write, &uc->uc_sigmask, puc);
1409 #elif defined(__alpha__)
1411 int cpu_signal_handler(int host_signum, void *pinfo,
1414 siginfo_t *info = pinfo;
1415 struct ucontext *uc = puc;
1416 uint32_t *pc = uc->uc_mcontext.sc_pc;
1417 uint32_t insn = *pc;
1420 /* XXX: need kernel patch to get write flag faster */
1421 switch (insn >> 26) {
1436 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1437 is_write, &uc->uc_sigmask, puc);
1439 #elif defined(__sparc__)
1441 int cpu_signal_handler(int host_signum, void *pinfo,
1444 siginfo_t *info = pinfo;
1445 uint32_t *regs = (uint32_t *)(info + 1);
1446 void *sigmask = (regs + 20);
1451 /* XXX: is there a standard glibc define ? */
1453 /* XXX: need kernel patch to get write flag faster */
1455 insn = *(uint32_t *)pc;
1456 if ((insn >> 30) == 3) {
1457 switch((insn >> 19) & 0x3f) {
1469 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1470 is_write, sigmask, NULL);
1473 #elif defined(__arm__)
1475 int cpu_signal_handler(int host_signum, void *pinfo,
1478 siginfo_t *info = pinfo;
1479 struct ucontext *uc = puc;
1483 pc = uc->uc_mcontext.gregs[R15];
1484 /* XXX: compute is_write */
1486 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1488 &uc->uc_sigmask, puc);
1491 #elif defined(__mc68000)
1493 int cpu_signal_handler(int host_signum, void *pinfo,
1496 siginfo_t *info = pinfo;
1497 struct ucontext *uc = puc;
1501 pc = uc->uc_mcontext.gregs[16];
1502 /* XXX: compute is_write */
1504 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1506 &uc->uc_sigmask, puc);
1509 #elif defined(__ia64)
1512 /* This ought to be in <bits/siginfo.h>... */
1513 # define __ISR_VALID 1
1516 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1518 siginfo_t *info = pinfo;
1519 struct ucontext *uc = puc;
1523 ip = uc->uc_mcontext.sc_ip;
1524 switch (host_signum) {
1530 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1531 /* ISR.W (write-access) is bit 33: */
1532 is_write = (info->si_isr >> 33) & 1;
1538 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1540 &uc->uc_sigmask, puc);
1543 #elif defined(__s390__)
1545 int cpu_signal_handler(int host_signum, void *pinfo,
1548 siginfo_t *info = pinfo;
1549 struct ucontext *uc = puc;
1553 pc = uc->uc_mcontext.psw.addr;
1554 /* XXX: compute is_write */
1556 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1557 is_write, &uc->uc_sigmask, puc);
1560 #elif defined(__mips__)
1562 int cpu_signal_handler(int host_signum, void *pinfo,
1565 siginfo_t *info = pinfo;
1566 struct ucontext *uc = puc;
1567 greg_t pc = uc->uc_mcontext.pc;
1570 /* XXX: compute is_write */
1572 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1573 is_write, &uc->uc_sigmask, puc);
1578 #error host CPU specific signal handler needed
1582 #endif /* !defined(CONFIG_SOFTMMU) */