2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #if !defined(CONFIG_SOFTMMU)
35 #include <sys/ucontext.h>
38 int tb_invalidated_flag;
41 //#define DEBUG_SIGNAL
43 #define SAVE_GLOBALS()
44 #define RESTORE_GLOBALS()
46 #if defined(__sparc__) && !defined(HOST_SOLARIS)
48 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
49 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
50 // Work around ugly bugs in glibc that mangle global register contents
52 static volatile void *saved_env;
53 static volatile unsigned long saved_t0, saved_i7;
55 #define SAVE_GLOBALS() do { \
58 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
61 #undef RESTORE_GLOBALS
62 #define RESTORE_GLOBALS() do { \
63 env = (void *)saved_env; \
65 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
68 static int sparc_setjmp(jmp_buf buf)
78 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
80 static void sparc_longjmp(jmp_buf buf, int val)
85 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
89 void cpu_loop_exit(void)
91 /* NOTE: the register at this point must be saved by hand because
92 longjmp restore them */
94 longjmp(env->jmp_env, 1);
97 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
101 /* exit the current TB from a signal handler. The host registers are
102 restored in a state compatible with the CPU emulator
104 void cpu_resume_from_signal(CPUState *env1, void *puc)
106 #if !defined(CONFIG_SOFTMMU)
107 struct ucontext *uc = puc;
112 /* XXX: restore cpu registers saved in host registers */
114 #if !defined(CONFIG_SOFTMMU)
116 /* XXX: use siglongjmp ? */
117 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
120 longjmp(env->jmp_env, 1);
123 static TranslationBlock *tb_find_slow(target_ulong pc,
124 target_ulong cs_base,
127 TranslationBlock *tb, **ptb1;
130 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
135 tb_invalidated_flag = 0;
137 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
139 /* find translated block using physical mappings */
140 phys_pc = get_phys_addr_code(env, pc);
141 phys_page1 = phys_pc & TARGET_PAGE_MASK;
143 h = tb_phys_hash_func(phys_pc);
144 ptb1 = &tb_phys_hash[h];
150 tb->page_addr[0] == phys_page1 &&
151 tb->cs_base == cs_base &&
152 tb->flags == flags) {
153 /* check next page if needed */
154 if (tb->page_addr[1] != -1) {
155 virt_page2 = (pc & TARGET_PAGE_MASK) +
157 phys_page2 = get_phys_addr_code(env, virt_page2);
158 if (tb->page_addr[1] == phys_page2)
164 ptb1 = &tb->phys_hash_next;
167 /* if no translated code available, then translate it now */
170 /* flush must be done */
172 /* cannot fail at this point */
174 /* don't forget to invalidate previous TB info */
175 tb_invalidated_flag = 1;
177 tc_ptr = code_gen_ptr;
179 tb->cs_base = cs_base;
182 cpu_gen_code(env, tb, &code_gen_size);
184 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
186 /* check next page if needed */
187 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
189 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
190 phys_page2 = get_phys_addr_code(env, virt_page2);
192 tb_link_phys(tb, phys_pc, phys_page2);
195 /* we add the TB in the virtual pc hash table */
196 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
197 spin_unlock(&tb_lock);
201 static inline TranslationBlock *tb_find_fast(void)
203 TranslationBlock *tb;
204 target_ulong cs_base, pc;
207 /* we record a subset of the CPU state. It will
208 always be the same before a given translated block
210 #if defined(TARGET_I386)
212 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
213 flags |= env->intercept;
214 cs_base = env->segs[R_CS].base;
215 pc = cs_base + env->eip;
216 #elif defined(TARGET_ARM)
217 flags = env->thumb | (env->vfp.vec_len << 1)
218 | (env->vfp.vec_stride << 4);
219 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
221 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
223 flags |= (env->condexec_bits << 8);
226 #elif defined(TARGET_SPARC)
227 #ifdef TARGET_SPARC64
228 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
229 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
230 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
232 // FPU enable . Supervisor
233 flags = (env->psref << 4) | env->psrs;
237 #elif defined(TARGET_PPC)
241 #elif defined(TARGET_MIPS)
242 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
244 pc = env->PC[env->current_tc];
245 #elif defined(TARGET_M68K)
246 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
247 | (env->sr & SR_S) /* Bit 13 */
248 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
251 #elif defined(TARGET_SH4)
255 #elif defined(TARGET_ALPHA)
259 #elif defined(TARGET_CRIS)
264 #error unsupported CPU
266 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
267 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
268 tb->flags != flags, 0)) {
269 tb = tb_find_slow(pc, cs_base, flags);
270 /* Note: we do it here to avoid a gcc bug on Mac OS X when
271 doing it in tb_find_slow */
272 if (tb_invalidated_flag) {
273 /* as some TB could have been invalidated because
274 of memory exceptions while generating the code, we
275 must recompute the hash index here */
282 #define BREAK_CHAIN T0 = 0
284 /* main execution loop */
286 int cpu_exec(CPUState *env1)
288 #define DECLARE_HOST_REGS 1
289 #include "hostregs_helper.h"
290 #if defined(TARGET_SPARC)
291 #if defined(reg_REGWPTR)
292 uint32_t *saved_regwptr;
295 int ret, interrupt_request;
296 long (*gen_func)(void);
297 TranslationBlock *tb;
300 if (cpu_halted(env1) == EXCP_HALTED)
303 cpu_single_env = env1;
305 /* first we save global registers */
306 #define SAVE_HOST_REGS 1
307 #include "hostregs_helper.h"
312 #if defined(TARGET_I386)
313 /* put eflags in CPU temporary format */
314 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
315 DF = 1 - (2 * ((env->eflags >> 10) & 1));
316 CC_OP = CC_OP_EFLAGS;
317 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
318 #elif defined(TARGET_SPARC)
319 #if defined(reg_REGWPTR)
320 saved_regwptr = REGWPTR;
322 #elif defined(TARGET_M68K)
323 env->cc_op = CC_OP_FLAGS;
324 env->cc_dest = env->sr & 0xf;
325 env->cc_x = (env->sr >> 4) & 1;
326 #elif defined(TARGET_ALPHA)
327 #elif defined(TARGET_ARM)
328 #elif defined(TARGET_PPC)
329 #elif defined(TARGET_MIPS)
330 #elif defined(TARGET_SH4)
331 #elif defined(TARGET_CRIS)
334 #error unsupported target CPU
336 env->exception_index = -1;
338 /* prepare setjmp context for exception handling */
340 if (setjmp(env->jmp_env) == 0) {
341 env->current_tb = NULL;
342 /* if an exception is pending, we execute it here */
343 if (env->exception_index >= 0) {
344 if (env->exception_index >= EXCP_INTERRUPT) {
345 /* exit request from the cpu execution loop */
346 ret = env->exception_index;
348 } else if (env->user_mode_only) {
349 /* if user mode only, we simulate a fake exception
350 which will be handled outside the cpu execution
352 #if defined(TARGET_I386)
353 do_interrupt_user(env->exception_index,
354 env->exception_is_int,
356 env->exception_next_eip);
358 ret = env->exception_index;
361 #if defined(TARGET_I386)
362 /* simulate a real cpu exception. On i386, it can
363 trigger new exceptions, but we do not handle
364 double or triple faults yet. */
365 do_interrupt(env->exception_index,
366 env->exception_is_int,
368 env->exception_next_eip, 0);
369 /* successfully delivered */
370 env->old_exception = -1;
371 #elif defined(TARGET_PPC)
373 #elif defined(TARGET_MIPS)
375 #elif defined(TARGET_SPARC)
376 do_interrupt(env->exception_index);
377 #elif defined(TARGET_ARM)
379 #elif defined(TARGET_SH4)
381 #elif defined(TARGET_ALPHA)
383 #elif defined(TARGET_CRIS)
385 #elif defined(TARGET_M68K)
389 env->exception_index = -1;
392 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
394 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
395 ret = kqemu_cpu_exec(env);
396 /* put eflags in CPU temporary format */
397 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
398 DF = 1 - (2 * ((env->eflags >> 10) & 1));
399 CC_OP = CC_OP_EFLAGS;
400 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
403 longjmp(env->jmp_env, 1);
404 } else if (ret == 2) {
405 /* softmmu execution needed */
407 if (env->interrupt_request != 0) {
408 /* hardware interrupt will be executed just after */
410 /* otherwise, we restart */
411 longjmp(env->jmp_env, 1);
417 T0 = 0; /* force lookup of first TB */
420 interrupt_request = env->interrupt_request;
421 if (__builtin_expect(interrupt_request, 0)
422 #if defined(TARGET_I386)
423 && env->hflags & HF_GIF_MASK
426 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
427 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
428 env->exception_index = EXCP_DEBUG;
431 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
432 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
433 if (interrupt_request & CPU_INTERRUPT_HALT) {
434 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
436 env->exception_index = EXCP_HLT;
440 #if defined(TARGET_I386)
441 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
442 !(env->hflags & HF_SMM_MASK)) {
443 svm_check_intercept(SVM_EXIT_SMI);
444 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
447 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
448 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
449 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
451 svm_check_intercept(SVM_EXIT_INTR);
452 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
453 intno = cpu_get_pic_interrupt(env);
454 if (loglevel & CPU_LOG_TB_IN_ASM) {
455 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
457 do_interrupt(intno, 0, 0, 0, 1);
458 /* ensure that no TB jump will be modified as
459 the program flow was changed */
461 #if !defined(CONFIG_USER_ONLY)
462 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
463 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
465 /* FIXME: this should respect TPR */
466 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
467 svm_check_intercept(SVM_EXIT_VINTR);
468 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
469 if (loglevel & CPU_LOG_TB_IN_ASM)
470 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
471 do_interrupt(intno, 0, 0, -1, 1);
472 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
473 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
477 #elif defined(TARGET_PPC)
479 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
483 if (interrupt_request & CPU_INTERRUPT_HARD) {
484 ppc_hw_interrupt(env);
485 if (env->pending_interrupts == 0)
486 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
489 #elif defined(TARGET_MIPS)
490 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
491 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
492 (env->CP0_Status & (1 << CP0St_IE)) &&
493 !(env->CP0_Status & (1 << CP0St_EXL)) &&
494 !(env->CP0_Status & (1 << CP0St_ERL)) &&
495 !(env->hflags & MIPS_HFLAG_DM)) {
497 env->exception_index = EXCP_EXT_INTERRUPT;
502 #elif defined(TARGET_SPARC)
503 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
505 int pil = env->interrupt_index & 15;
506 int type = env->interrupt_index & 0xf0;
508 if (((type == TT_EXTINT) &&
509 (pil == 15 || pil > env->psrpil)) ||
511 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
512 do_interrupt(env->interrupt_index);
513 env->interrupt_index = 0;
514 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
519 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
520 //do_interrupt(0, 0, 0, 0, 0);
521 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
523 #elif defined(TARGET_ARM)
524 if (interrupt_request & CPU_INTERRUPT_FIQ
525 && !(env->uncached_cpsr & CPSR_F)) {
526 env->exception_index = EXCP_FIQ;
530 /* ARMv7-M interrupt return works by loading a magic value
531 into the PC. On real hardware the load causes the
532 return to occur. The qemu implementation performs the
533 jump normally, then does the exception return when the
534 CPU tries to execute code at the magic address.
535 This will cause the magic PC value to be pushed to
536 the stack if an interrupt occured at the wrong time.
537 We avoid this by disabling interrupts when
538 pc contains a magic address. */
539 if (interrupt_request & CPU_INTERRUPT_HARD
540 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
541 || !(env->uncached_cpsr & CPSR_I))) {
542 env->exception_index = EXCP_IRQ;
546 #elif defined(TARGET_SH4)
547 if (interrupt_request & CPU_INTERRUPT_HARD) {
551 #elif defined(TARGET_ALPHA)
552 if (interrupt_request & CPU_INTERRUPT_HARD) {
556 #elif defined(TARGET_CRIS)
557 if (interrupt_request & CPU_INTERRUPT_HARD) {
561 #elif defined(TARGET_M68K)
562 if (interrupt_request & CPU_INTERRUPT_HARD
563 && ((env->sr & SR_I) >> SR_I_SHIFT)
564 < env->pending_level) {
565 /* Real hardware gets the interrupt vector via an
566 IACK cycle at this point. Current emulated
567 hardware doesn't rely on this, so we
568 provide/save the vector when the interrupt is
570 env->exception_index = env->pending_vector;
575 /* Don't use the cached interupt_request value,
576 do_interrupt may have updated the EXITTB flag. */
577 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
578 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
579 /* ensure that no TB jump will be modified as
580 the program flow was changed */
583 if (interrupt_request & CPU_INTERRUPT_EXIT) {
584 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
585 env->exception_index = EXCP_INTERRUPT;
590 if ((loglevel & CPU_LOG_TB_CPU)) {
591 /* restore flags in standard format */
593 #if defined(TARGET_I386)
594 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
595 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
596 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
597 #elif defined(TARGET_ARM)
598 cpu_dump_state(env, logfile, fprintf, 0);
599 #elif defined(TARGET_SPARC)
600 REGWPTR = env->regbase + (env->cwp * 16);
601 env->regwptr = REGWPTR;
602 cpu_dump_state(env, logfile, fprintf, 0);
603 #elif defined(TARGET_PPC)
604 cpu_dump_state(env, logfile, fprintf, 0);
605 #elif defined(TARGET_M68K)
606 cpu_m68k_flush_flags(env, env->cc_op);
607 env->cc_op = CC_OP_FLAGS;
608 env->sr = (env->sr & 0xffe0)
609 | env->cc_dest | (env->cc_x << 4);
610 cpu_dump_state(env, logfile, fprintf, 0);
611 #elif defined(TARGET_MIPS)
612 cpu_dump_state(env, logfile, fprintf, 0);
613 #elif defined(TARGET_SH4)
614 cpu_dump_state(env, logfile, fprintf, 0);
615 #elif defined(TARGET_ALPHA)
616 cpu_dump_state(env, logfile, fprintf, 0);
617 #elif defined(TARGET_CRIS)
618 cpu_dump_state(env, logfile, fprintf, 0);
620 #error unsupported target CPU
626 if ((loglevel & CPU_LOG_EXEC)) {
627 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
628 (long)tb->tc_ptr, tb->pc,
629 lookup_symbol(tb->pc));
633 /* see if we can patch the calling TB. When the TB
634 spans two pages, we cannot safely do a direct
639 (env->kqemu_enabled != 2) &&
641 tb->page_addr[1] == -1) {
643 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
644 spin_unlock(&tb_lock);
648 env->current_tb = tb;
649 /* execute the generated code */
650 gen_func = (void *)tc_ptr;
651 #if defined(__sparc__)
652 __asm__ __volatile__("call %0\n\t"
656 : "i0", "i1", "i2", "i3", "i4", "i5",
657 "o0", "o1", "o2", "o3", "o4", "o5",
658 "l0", "l1", "l2", "l3", "l4", "l5",
660 #elif defined(__hppa__)
661 asm volatile ("ble 0(%%sr4,%1)\n"
666 : "r1", "r2", "r3", "r4", "r5", "r6", "r7",
667 "r8", "r9", "r10", "r11", "r12", "r13",
668 "r18", "r19", "r20", "r21", "r22", "r23",
669 "r24", "r25", "r26", "r27", "r28", "r29",
671 #elif defined(__arm__)
672 asm volatile ("mov pc, %0\n\t"
673 ".global exec_loop\n\t"
677 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
678 #elif defined(__ia64)
685 fp.gp = code_gen_buffer + 2 * (1 << 20);
686 (*(void (*)(void)) &fp)();
690 env->current_tb = NULL;
691 /* reset soft MMU for next block (it can currently
692 only be set by a memory fault) */
693 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
694 if (env->hflags & HF_SOFTMMU_MASK) {
695 env->hflags &= ~HF_SOFTMMU_MASK;
696 /* do not allow linking to another block */
700 #if defined(USE_KQEMU)
701 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
702 if (kqemu_is_ok(env) &&
703 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
714 #if defined(TARGET_I386)
715 /* restore flags in standard format */
716 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
717 #elif defined(TARGET_ARM)
718 /* XXX: Save/restore host fpu exception state?. */
719 #elif defined(TARGET_SPARC)
720 #if defined(reg_REGWPTR)
721 REGWPTR = saved_regwptr;
723 #elif defined(TARGET_PPC)
724 #elif defined(TARGET_M68K)
725 cpu_m68k_flush_flags(env, env->cc_op);
726 env->cc_op = CC_OP_FLAGS;
727 env->sr = (env->sr & 0xffe0)
728 | env->cc_dest | (env->cc_x << 4);
729 #elif defined(TARGET_MIPS)
730 #elif defined(TARGET_SH4)
731 #elif defined(TARGET_ALPHA)
732 #elif defined(TARGET_CRIS)
735 #error unsupported target CPU
738 /* restore global registers */
740 #include "hostregs_helper.h"
742 /* fail safe : never use cpu_single_env outside cpu_exec() */
743 cpu_single_env = NULL;
747 /* must only be called from the generated code as an exception can be
749 void tb_invalidate_page_range(target_ulong start, target_ulong end)
751 /* XXX: cannot enable it yet because it yields to MMU exception
752 where NIP != read address on PowerPC */
754 target_ulong phys_addr;
755 phys_addr = get_phys_addr_code(env, start);
756 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
760 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
762 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
764 CPUX86State *saved_env;
768 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
770 cpu_x86_load_seg_cache(env, seg_reg, selector,
771 (selector << 4), 0xffff, 0);
773 load_seg(seg_reg, selector);
778 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
780 CPUX86State *saved_env;
785 helper_fsave(ptr, data32);
790 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
792 CPUX86State *saved_env;
797 helper_frstor(ptr, data32);
802 #endif /* TARGET_I386 */
804 #if !defined(CONFIG_SOFTMMU)
806 #if defined(TARGET_I386)
808 /* 'pc' is the host PC at which the exception was raised. 'address' is
809 the effective address of the memory exception. 'is_write' is 1 if a
810 write caused the exception and otherwise 0'. 'old_set' is the
811 signal set which should be restored */
812 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
813 int is_write, sigset_t *old_set,
816 TranslationBlock *tb;
820 env = cpu_single_env; /* XXX: find a correct solution for multithread */
821 #if defined(DEBUG_SIGNAL)
822 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
823 pc, address, is_write, *(unsigned long *)old_set);
825 /* XXX: locking issue */
826 if (is_write && page_unprotect(h2g(address), pc, puc)) {
830 /* see if it is an MMU fault */
831 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
833 return 0; /* not an MMU fault */
835 return 1; /* the MMU fault was handled without causing real CPU fault */
836 /* now we have a real cpu fault */
839 /* the PC is inside the translated code. It means that we have
840 a virtual CPU fault */
841 cpu_restore_state(tb, env, pc, puc);
845 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
846 env->eip, env->cr[2], env->error_code);
848 /* we restore the process signal mask as the sigreturn should
849 do it (XXX: use sigsetjmp) */
850 sigprocmask(SIG_SETMASK, old_set, NULL);
851 raise_exception_err(env->exception_index, env->error_code);
853 /* activate soft MMU for this block */
854 env->hflags |= HF_SOFTMMU_MASK;
855 cpu_resume_from_signal(env, puc);
857 /* never comes here */
861 #elif defined(TARGET_ARM)
862 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
863 int is_write, sigset_t *old_set,
866 TranslationBlock *tb;
870 env = cpu_single_env; /* XXX: find a correct solution for multithread */
871 #if defined(DEBUG_SIGNAL)
872 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
873 pc, address, is_write, *(unsigned long *)old_set);
875 /* XXX: locking issue */
876 if (is_write && page_unprotect(h2g(address), pc, puc)) {
879 /* see if it is an MMU fault */
880 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
882 return 0; /* not an MMU fault */
884 return 1; /* the MMU fault was handled without causing real CPU fault */
885 /* now we have a real cpu fault */
888 /* the PC is inside the translated code. It means that we have
889 a virtual CPU fault */
890 cpu_restore_state(tb, env, pc, puc);
892 /* we restore the process signal mask as the sigreturn should
893 do it (XXX: use sigsetjmp) */
894 sigprocmask(SIG_SETMASK, old_set, NULL);
896 /* never comes here */
899 #elif defined(TARGET_SPARC)
900 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
901 int is_write, sigset_t *old_set,
904 TranslationBlock *tb;
908 env = cpu_single_env; /* XXX: find a correct solution for multithread */
909 #if defined(DEBUG_SIGNAL)
910 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
911 pc, address, is_write, *(unsigned long *)old_set);
913 /* XXX: locking issue */
914 if (is_write && page_unprotect(h2g(address), pc, puc)) {
917 /* see if it is an MMU fault */
918 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
920 return 0; /* not an MMU fault */
922 return 1; /* the MMU fault was handled without causing real CPU fault */
923 /* now we have a real cpu fault */
926 /* the PC is inside the translated code. It means that we have
927 a virtual CPU fault */
928 cpu_restore_state(tb, env, pc, puc);
930 /* we restore the process signal mask as the sigreturn should
931 do it (XXX: use sigsetjmp) */
932 sigprocmask(SIG_SETMASK, old_set, NULL);
934 /* never comes here */
937 #elif defined (TARGET_PPC)
938 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
939 int is_write, sigset_t *old_set,
942 TranslationBlock *tb;
946 env = cpu_single_env; /* XXX: find a correct solution for multithread */
947 #if defined(DEBUG_SIGNAL)
948 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
949 pc, address, is_write, *(unsigned long *)old_set);
951 /* XXX: locking issue */
952 if (is_write && page_unprotect(h2g(address), pc, puc)) {
956 /* see if it is an MMU fault */
957 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
959 return 0; /* not an MMU fault */
961 return 1; /* the MMU fault was handled without causing real CPU fault */
963 /* now we have a real cpu fault */
966 /* the PC is inside the translated code. It means that we have
967 a virtual CPU fault */
968 cpu_restore_state(tb, env, pc, puc);
972 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
973 env->nip, env->error_code, tb);
975 /* we restore the process signal mask as the sigreturn should
976 do it (XXX: use sigsetjmp) */
977 sigprocmask(SIG_SETMASK, old_set, NULL);
978 do_raise_exception_err(env->exception_index, env->error_code);
980 /* activate soft MMU for this block */
981 cpu_resume_from_signal(env, puc);
983 /* never comes here */
987 #elif defined(TARGET_M68K)
988 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
989 int is_write, sigset_t *old_set,
992 TranslationBlock *tb;
996 env = cpu_single_env; /* XXX: find a correct solution for multithread */
997 #if defined(DEBUG_SIGNAL)
998 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
999 pc, address, is_write, *(unsigned long *)old_set);
1001 /* XXX: locking issue */
1002 if (is_write && page_unprotect(address, pc, puc)) {
1005 /* see if it is an MMU fault */
1006 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1008 return 0; /* not an MMU fault */
1010 return 1; /* the MMU fault was handled without causing real CPU fault */
1011 /* now we have a real cpu fault */
1012 tb = tb_find_pc(pc);
1014 /* the PC is inside the translated code. It means that we have
1015 a virtual CPU fault */
1016 cpu_restore_state(tb, env, pc, puc);
1018 /* we restore the process signal mask as the sigreturn should
1019 do it (XXX: use sigsetjmp) */
1020 sigprocmask(SIG_SETMASK, old_set, NULL);
1022 /* never comes here */
1026 #elif defined (TARGET_MIPS)
1027 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1028 int is_write, sigset_t *old_set,
1031 TranslationBlock *tb;
1035 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1036 #if defined(DEBUG_SIGNAL)
1037 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1038 pc, address, is_write, *(unsigned long *)old_set);
1040 /* XXX: locking issue */
1041 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1045 /* see if it is an MMU fault */
1046 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1048 return 0; /* not an MMU fault */
1050 return 1; /* the MMU fault was handled without causing real CPU fault */
1052 /* now we have a real cpu fault */
1053 tb = tb_find_pc(pc);
1055 /* the PC is inside the translated code. It means that we have
1056 a virtual CPU fault */
1057 cpu_restore_state(tb, env, pc, puc);
1061 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1062 env->PC, env->error_code, tb);
1064 /* we restore the process signal mask as the sigreturn should
1065 do it (XXX: use sigsetjmp) */
1066 sigprocmask(SIG_SETMASK, old_set, NULL);
1067 do_raise_exception_err(env->exception_index, env->error_code);
1069 /* activate soft MMU for this block */
1070 cpu_resume_from_signal(env, puc);
1072 /* never comes here */
1076 #elif defined (TARGET_SH4)
1077 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1078 int is_write, sigset_t *old_set,
1081 TranslationBlock *tb;
1085 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1086 #if defined(DEBUG_SIGNAL)
1087 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1088 pc, address, is_write, *(unsigned long *)old_set);
1090 /* XXX: locking issue */
1091 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1095 /* see if it is an MMU fault */
1096 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1098 return 0; /* not an MMU fault */
1100 return 1; /* the MMU fault was handled without causing real CPU fault */
1102 /* now we have a real cpu fault */
1103 tb = tb_find_pc(pc);
1105 /* the PC is inside the translated code. It means that we have
1106 a virtual CPU fault */
1107 cpu_restore_state(tb, env, pc, puc);
1110 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1111 env->nip, env->error_code, tb);
1113 /* we restore the process signal mask as the sigreturn should
1114 do it (XXX: use sigsetjmp) */
1115 sigprocmask(SIG_SETMASK, old_set, NULL);
1117 /* never comes here */
1121 #elif defined (TARGET_ALPHA)
1122 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1123 int is_write, sigset_t *old_set,
1126 TranslationBlock *tb;
1130 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1131 #if defined(DEBUG_SIGNAL)
1132 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1133 pc, address, is_write, *(unsigned long *)old_set);
1135 /* XXX: locking issue */
1136 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1140 /* see if it is an MMU fault */
1141 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1143 return 0; /* not an MMU fault */
1145 return 1; /* the MMU fault was handled without causing real CPU fault */
1147 /* now we have a real cpu fault */
1148 tb = tb_find_pc(pc);
1150 /* the PC is inside the translated code. It means that we have
1151 a virtual CPU fault */
1152 cpu_restore_state(tb, env, pc, puc);
1155 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1156 env->nip, env->error_code, tb);
1158 /* we restore the process signal mask as the sigreturn should
1159 do it (XXX: use sigsetjmp) */
1160 sigprocmask(SIG_SETMASK, old_set, NULL);
1162 /* never comes here */
1165 #elif defined (TARGET_CRIS)
1166 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1167 int is_write, sigset_t *old_set,
1170 TranslationBlock *tb;
1174 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1175 #if defined(DEBUG_SIGNAL)
1176 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1177 pc, address, is_write, *(unsigned long *)old_set);
1179 /* XXX: locking issue */
1180 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1184 /* see if it is an MMU fault */
1185 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1187 return 0; /* not an MMU fault */
1189 return 1; /* the MMU fault was handled without causing real CPU fault */
1191 /* now we have a real cpu fault */
1192 tb = tb_find_pc(pc);
1194 /* the PC is inside the translated code. It means that we have
1195 a virtual CPU fault */
1196 cpu_restore_state(tb, env, pc, puc);
1198 /* we restore the process signal mask as the sigreturn should
1199 do it (XXX: use sigsetjmp) */
1200 sigprocmask(SIG_SETMASK, old_set, NULL);
1202 /* never comes here */
1207 #error unsupported target CPU
1210 #if defined(__i386__)
1212 #if defined(__APPLE__)
1213 # include <sys/ucontext.h>
1215 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1216 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1217 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1219 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1220 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1221 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1224 int cpu_signal_handler(int host_signum, void *pinfo,
1227 siginfo_t *info = pinfo;
1228 struct ucontext *uc = puc;
1236 #define REG_TRAPNO TRAPNO
1239 trapno = TRAP_sig(uc);
1240 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1242 (ERROR_sig(uc) >> 1) & 1 : 0,
1243 &uc->uc_sigmask, puc);
1246 #elif defined(__x86_64__)
1248 int cpu_signal_handler(int host_signum, void *pinfo,
1251 siginfo_t *info = pinfo;
1252 struct ucontext *uc = puc;
1255 pc = uc->uc_mcontext.gregs[REG_RIP];
1256 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1257 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1258 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1259 &uc->uc_sigmask, puc);
1262 #elif defined(__powerpc__)
1264 /***********************************************************************
1265 * signal context platform-specific definitions
1269 /* All Registers access - only for local access */
1270 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1271 /* Gpr Registers access */
1272 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1273 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1274 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1275 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1276 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1277 # define LR_sig(context) REG_sig(link, context) /* Link register */
1278 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1279 /* Float Registers access */
1280 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1281 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1282 /* Exception Registers access */
1283 # define DAR_sig(context) REG_sig(dar, context)
1284 # define DSISR_sig(context) REG_sig(dsisr, context)
1285 # define TRAP_sig(context) REG_sig(trap, context)
1289 # include <sys/ucontext.h>
1290 typedef struct ucontext SIGCONTEXT;
1291 /* All Registers access - only for local access */
1292 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1293 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1294 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1295 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1296 /* Gpr Registers access */
1297 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1298 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1299 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1300 # define CTR_sig(context) REG_sig(ctr, context)
1301 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1302 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1303 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1304 /* Float Registers access */
1305 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1306 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1307 /* Exception Registers access */
1308 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1309 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1310 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1311 #endif /* __APPLE__ */
1313 int cpu_signal_handler(int host_signum, void *pinfo,
1316 siginfo_t *info = pinfo;
1317 struct ucontext *uc = puc;
1325 if (DSISR_sig(uc) & 0x00800000)
1328 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1331 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1332 is_write, &uc->uc_sigmask, puc);
1335 #elif defined(__alpha__)
1337 int cpu_signal_handler(int host_signum, void *pinfo,
1340 siginfo_t *info = pinfo;
1341 struct ucontext *uc = puc;
1342 uint32_t *pc = uc->uc_mcontext.sc_pc;
1343 uint32_t insn = *pc;
1346 /* XXX: need kernel patch to get write flag faster */
1347 switch (insn >> 26) {
1362 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1363 is_write, &uc->uc_sigmask, puc);
1365 #elif defined(__sparc__)
1367 int cpu_signal_handler(int host_signum, void *pinfo,
1370 siginfo_t *info = pinfo;
1371 uint32_t *regs = (uint32_t *)(info + 1);
1372 void *sigmask = (regs + 20);
1377 /* XXX: is there a standard glibc define ? */
1379 /* XXX: need kernel patch to get write flag faster */
1381 insn = *(uint32_t *)pc;
1382 if ((insn >> 30) == 3) {
1383 switch((insn >> 19) & 0x3f) {
1395 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1396 is_write, sigmask, NULL);
1399 #elif defined(__arm__)
1401 int cpu_signal_handler(int host_signum, void *pinfo,
1404 siginfo_t *info = pinfo;
1405 struct ucontext *uc = puc;
1409 pc = uc->uc_mcontext.gregs[R15];
1410 /* XXX: compute is_write */
1412 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1414 &uc->uc_sigmask, puc);
1417 #elif defined(__mc68000)
1419 int cpu_signal_handler(int host_signum, void *pinfo,
1422 siginfo_t *info = pinfo;
1423 struct ucontext *uc = puc;
1427 pc = uc->uc_mcontext.gregs[16];
1428 /* XXX: compute is_write */
1430 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1432 &uc->uc_sigmask, puc);
1435 #elif defined(__ia64)
1438 /* This ought to be in <bits/siginfo.h>... */
1439 # define __ISR_VALID 1
1442 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1444 siginfo_t *info = pinfo;
1445 struct ucontext *uc = puc;
1449 ip = uc->uc_mcontext.sc_ip;
1450 switch (host_signum) {
1456 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1457 /* ISR.W (write-access) is bit 33: */
1458 is_write = (info->si_isr >> 33) & 1;
1464 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1466 &uc->uc_sigmask, puc);
1469 #elif defined(__s390__)
1471 int cpu_signal_handler(int host_signum, void *pinfo,
1474 siginfo_t *info = pinfo;
1475 struct ucontext *uc = puc;
1479 pc = uc->uc_mcontext.psw.addr;
1480 /* XXX: compute is_write */
1482 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1483 is_write, &uc->uc_sigmask, puc);
1486 #elif defined(__mips__)
1488 int cpu_signal_handler(int host_signum, void *pinfo,
1491 siginfo_t *info = pinfo;
1492 struct ucontext *uc = puc;
1493 greg_t pc = uc->uc_mcontext.pc;
1496 /* XXX: compute is_write */
1498 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1499 is_write, &uc->uc_sigmask, puc);
1502 #elif defined(__hppa__)
1504 int cpu_signal_handler(int host_signum, void *pinfo,
1507 struct siginfo *info = pinfo;
1508 struct ucontext *uc = puc;
1512 pc = uc->uc_mcontext.sc_iaoq[0];
1513 /* FIXME: compute is_write */
1515 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1517 &uc->uc_sigmask, puc);
1522 #error host CPU specific signal handler needed
1526 #endif /* !defined(CONFIG_SOFTMMU) */