2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #if !defined(CONFIG_SOFTMMU)
35 #include <sys/ucontext.h>
38 int tb_invalidated_flag;
41 //#define DEBUG_SIGNAL
43 void cpu_loop_exit(void)
45 /* NOTE: the register at this point must be saved by hand because
46 longjmp restore them */
48 longjmp(env->jmp_env, 1);
51 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
55 /* exit the current TB from a signal handler. The host registers are
56 restored in a state compatible with the CPU emulator
58 void cpu_resume_from_signal(CPUState *env1, void *puc)
60 #if !defined(CONFIG_SOFTMMU)
61 struct ucontext *uc = puc;
66 /* XXX: restore cpu registers saved in host registers */
68 #if !defined(CONFIG_SOFTMMU)
70 /* XXX: use siglongjmp ? */
71 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
74 longjmp(env->jmp_env, 1);
78 static TranslationBlock *tb_find_slow(target_ulong pc,
82 TranslationBlock *tb, **ptb1;
85 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
90 tb_invalidated_flag = 0;
92 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
94 /* find translated block using physical mappings */
95 phys_pc = get_phys_addr_code(env, pc);
96 phys_page1 = phys_pc & TARGET_PAGE_MASK;
98 h = tb_phys_hash_func(phys_pc);
99 ptb1 = &tb_phys_hash[h];
105 tb->page_addr[0] == phys_page1 &&
106 tb->cs_base == cs_base &&
107 tb->flags == flags) {
108 /* check next page if needed */
109 if (tb->page_addr[1] != -1) {
110 virt_page2 = (pc & TARGET_PAGE_MASK) +
112 phys_page2 = get_phys_addr_code(env, virt_page2);
113 if (tb->page_addr[1] == phys_page2)
119 ptb1 = &tb->phys_hash_next;
122 /* if no translated code available, then translate it now */
125 /* flush must be done */
127 /* cannot fail at this point */
129 /* don't forget to invalidate previous TB info */
130 tb_invalidated_flag = 1;
132 tc_ptr = code_gen_ptr;
134 tb->cs_base = cs_base;
136 cpu_gen_code(env, tb, &code_gen_size);
137 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
139 /* check next page if needed */
140 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
142 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
143 phys_page2 = get_phys_addr_code(env, virt_page2);
145 tb_link_phys(tb, phys_pc, phys_page2);
148 /* we add the TB in the virtual pc hash table */
149 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
150 spin_unlock(&tb_lock);
154 static inline TranslationBlock *tb_find_fast(void)
156 TranslationBlock *tb;
157 target_ulong cs_base, pc;
160 /* we record a subset of the CPU state. It will
161 always be the same before a given translated block
163 #if defined(TARGET_I386)
165 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
166 flags |= env->intercept;
167 cs_base = env->segs[R_CS].base;
168 pc = cs_base + env->eip;
169 #elif defined(TARGET_ARM)
170 flags = env->thumb | (env->vfp.vec_len << 1)
171 | (env->vfp.vec_stride << 4);
172 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
174 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
176 flags |= (env->condexec_bits << 8);
179 #elif defined(TARGET_SPARC)
180 #ifdef TARGET_SPARC64
181 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
182 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
183 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
185 // FPU enable . Supervisor
186 flags = (env->psref << 4) | env->psrs;
190 #elif defined(TARGET_PPC)
194 #elif defined(TARGET_MIPS)
195 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
197 pc = env->PC[env->current_tc];
198 #elif defined(TARGET_M68K)
199 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
200 | (env->sr & SR_S) /* Bit 13 */
201 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
204 #elif defined(TARGET_SH4)
208 #elif defined(TARGET_ALPHA)
212 #elif defined(TARGET_CRIS)
217 #error unsupported CPU
219 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
220 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
221 tb->flags != flags, 0)) {
222 tb = tb_find_slow(pc, cs_base, flags);
223 /* Note: we do it here to avoid a gcc bug on Mac OS X when
224 doing it in tb_find_slow */
225 if (tb_invalidated_flag) {
226 /* as some TB could have been invalidated because
227 of memory exceptions while generating the code, we
228 must recompute the hash index here */
235 #if defined(__sparc__) && !defined(HOST_SOLARIS)
236 #define BREAK_CHAIN tmp_T0 = 0
238 #define BREAK_CHAIN T0 = 0
241 /* main execution loop */
243 int cpu_exec(CPUState *env1)
245 #define DECLARE_HOST_REGS 1
246 #include "hostregs_helper.h"
247 #if defined(TARGET_SPARC)
248 #if defined(reg_REGWPTR)
249 uint32_t *saved_regwptr;
252 #if defined(__sparc__) && !defined(HOST_SOLARIS)
256 int ret, interrupt_request;
257 void (*gen_func)(void);
258 TranslationBlock *tb;
261 if (cpu_halted(env1) == EXCP_HALTED)
264 cpu_single_env = env1;
266 /* first we save global registers */
267 #define SAVE_HOST_REGS 1
268 #include "hostregs_helper.h"
270 #if defined(__sparc__) && !defined(HOST_SOLARIS)
271 /* we also save i7 because longjmp may not restore it */
272 asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
276 #if defined(TARGET_I386)
277 /* put eflags in CPU temporary format */
278 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
279 DF = 1 - (2 * ((env->eflags >> 10) & 1));
280 CC_OP = CC_OP_EFLAGS;
281 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
282 #elif defined(TARGET_SPARC)
283 #if defined(reg_REGWPTR)
284 saved_regwptr = REGWPTR;
286 #elif defined(TARGET_M68K)
287 env->cc_op = CC_OP_FLAGS;
288 env->cc_dest = env->sr & 0xf;
289 env->cc_x = (env->sr >> 4) & 1;
290 #elif defined(TARGET_ALPHA)
291 #elif defined(TARGET_ARM)
292 #elif defined(TARGET_PPC)
293 #elif defined(TARGET_MIPS)
294 #elif defined(TARGET_SH4)
295 #elif defined(TARGET_CRIS)
298 #error unsupported target CPU
300 env->exception_index = -1;
302 /* prepare setjmp context for exception handling */
304 if (setjmp(env->jmp_env) == 0) {
305 env->current_tb = NULL;
306 /* if an exception is pending, we execute it here */
307 if (env->exception_index >= 0) {
308 if (env->exception_index >= EXCP_INTERRUPT) {
309 /* exit request from the cpu execution loop */
310 ret = env->exception_index;
312 } else if (env->user_mode_only) {
313 /* if user mode only, we simulate a fake exception
314 which will be handled outside the cpu execution
316 #if defined(TARGET_I386)
317 do_interrupt_user(env->exception_index,
318 env->exception_is_int,
320 env->exception_next_eip);
322 ret = env->exception_index;
325 #if defined(TARGET_I386)
326 /* simulate a real cpu exception. On i386, it can
327 trigger new exceptions, but we do not handle
328 double or triple faults yet. */
329 do_interrupt(env->exception_index,
330 env->exception_is_int,
332 env->exception_next_eip, 0);
333 /* successfully delivered */
334 env->old_exception = -1;
335 #elif defined(TARGET_PPC)
337 #elif defined(TARGET_MIPS)
339 #elif defined(TARGET_SPARC)
340 do_interrupt(env->exception_index);
341 #elif defined(TARGET_ARM)
343 #elif defined(TARGET_SH4)
345 #elif defined(TARGET_ALPHA)
347 #elif defined(TARGET_CRIS)
349 #elif defined(TARGET_M68K)
353 env->exception_index = -1;
356 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
358 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
359 ret = kqemu_cpu_exec(env);
360 /* put eflags in CPU temporary format */
361 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
362 DF = 1 - (2 * ((env->eflags >> 10) & 1));
363 CC_OP = CC_OP_EFLAGS;
364 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
367 longjmp(env->jmp_env, 1);
368 } else if (ret == 2) {
369 /* softmmu execution needed */
371 if (env->interrupt_request != 0) {
372 /* hardware interrupt will be executed just after */
374 /* otherwise, we restart */
375 longjmp(env->jmp_env, 1);
381 T0 = 0; /* force lookup of first TB */
383 #if defined(__sparc__) && !defined(HOST_SOLARIS)
384 /* g1 can be modified by some libc? functions */
387 interrupt_request = env->interrupt_request;
388 if (__builtin_expect(interrupt_request, 0)
389 #if defined(TARGET_I386)
390 && env->hflags & HF_GIF_MASK
393 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
394 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
395 env->exception_index = EXCP_DEBUG;
398 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
399 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
400 if (interrupt_request & CPU_INTERRUPT_HALT) {
401 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
403 env->exception_index = EXCP_HLT;
407 #if defined(TARGET_I386)
408 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
409 !(env->hflags & HF_SMM_MASK)) {
410 svm_check_intercept(SVM_EXIT_SMI);
411 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
414 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
415 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
416 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
418 svm_check_intercept(SVM_EXIT_INTR);
419 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
420 intno = cpu_get_pic_interrupt(env);
421 if (loglevel & CPU_LOG_TB_IN_ASM) {
422 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
424 do_interrupt(intno, 0, 0, 0, 1);
425 /* ensure that no TB jump will be modified as
426 the program flow was changed */
428 #if !defined(CONFIG_USER_ONLY)
429 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
430 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
432 /* FIXME: this should respect TPR */
433 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
434 svm_check_intercept(SVM_EXIT_VINTR);
435 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
436 if (loglevel & CPU_LOG_TB_IN_ASM)
437 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
438 do_interrupt(intno, 0, 0, -1, 1);
439 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
440 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
444 #elif defined(TARGET_PPC)
446 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
450 if (interrupt_request & CPU_INTERRUPT_HARD) {
451 ppc_hw_interrupt(env);
452 if (env->pending_interrupts == 0)
453 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
456 #elif defined(TARGET_MIPS)
457 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
458 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
459 (env->CP0_Status & (1 << CP0St_IE)) &&
460 !(env->CP0_Status & (1 << CP0St_EXL)) &&
461 !(env->CP0_Status & (1 << CP0St_ERL)) &&
462 !(env->hflags & MIPS_HFLAG_DM)) {
464 env->exception_index = EXCP_EXT_INTERRUPT;
469 #elif defined(TARGET_SPARC)
470 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
472 int pil = env->interrupt_index & 15;
473 int type = env->interrupt_index & 0xf0;
475 if (((type == TT_EXTINT) &&
476 (pil == 15 || pil > env->psrpil)) ||
478 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
479 do_interrupt(env->interrupt_index);
480 env->interrupt_index = 0;
481 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
486 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
487 //do_interrupt(0, 0, 0, 0, 0);
488 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
490 #elif defined(TARGET_ARM)
491 if (interrupt_request & CPU_INTERRUPT_FIQ
492 && !(env->uncached_cpsr & CPSR_F)) {
493 env->exception_index = EXCP_FIQ;
497 /* ARMv7-M interrupt return works by loading a magic value
498 into the PC. On real hardware the load causes the
499 return to occur. The qemu implementation performs the
500 jump normally, then does the exception return when the
501 CPU tries to execute code at the magic address.
502 This will cause the magic PC value to be pushed to
503 the stack if an interrupt occured at the wrong time.
504 We avoid this by disabling interrupts when
505 pc contains a magic address. */
506 if (interrupt_request & CPU_INTERRUPT_HARD
507 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
508 || !(env->uncached_cpsr & CPSR_I))) {
509 env->exception_index = EXCP_IRQ;
513 #elif defined(TARGET_SH4)
514 if (interrupt_request & CPU_INTERRUPT_HARD) {
518 #elif defined(TARGET_ALPHA)
519 if (interrupt_request & CPU_INTERRUPT_HARD) {
523 #elif defined(TARGET_CRIS)
524 if (interrupt_request & CPU_INTERRUPT_HARD) {
526 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
529 #elif defined(TARGET_M68K)
530 if (interrupt_request & CPU_INTERRUPT_HARD
531 && ((env->sr & SR_I) >> SR_I_SHIFT)
532 < env->pending_level) {
533 /* Real hardware gets the interrupt vector via an
534 IACK cycle at this point. Current emulated
535 hardware doesn't rely on this, so we
536 provide/save the vector when the interrupt is
538 env->exception_index = env->pending_vector;
543 /* Don't use the cached interupt_request value,
544 do_interrupt may have updated the EXITTB flag. */
545 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
546 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
547 /* ensure that no TB jump will be modified as
548 the program flow was changed */
551 if (interrupt_request & CPU_INTERRUPT_EXIT) {
552 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
553 env->exception_index = EXCP_INTERRUPT;
558 if ((loglevel & CPU_LOG_TB_CPU)) {
559 /* restore flags in standard format */
561 #if defined(TARGET_I386)
562 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
563 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
564 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
565 #elif defined(TARGET_ARM)
566 cpu_dump_state(env, logfile, fprintf, 0);
567 #elif defined(TARGET_SPARC)
568 REGWPTR = env->regbase + (env->cwp * 16);
569 env->regwptr = REGWPTR;
570 cpu_dump_state(env, logfile, fprintf, 0);
571 #elif defined(TARGET_PPC)
572 cpu_dump_state(env, logfile, fprintf, 0);
573 #elif defined(TARGET_M68K)
574 cpu_m68k_flush_flags(env, env->cc_op);
575 env->cc_op = CC_OP_FLAGS;
576 env->sr = (env->sr & 0xffe0)
577 | env->cc_dest | (env->cc_x << 4);
578 cpu_dump_state(env, logfile, fprintf, 0);
579 #elif defined(TARGET_MIPS)
580 cpu_dump_state(env, logfile, fprintf, 0);
581 #elif defined(TARGET_SH4)
582 cpu_dump_state(env, logfile, fprintf, 0);
583 #elif defined(TARGET_ALPHA)
584 cpu_dump_state(env, logfile, fprintf, 0);
585 #elif defined(TARGET_CRIS)
586 cpu_dump_state(env, logfile, fprintf, 0);
588 #error unsupported target CPU
594 if ((loglevel & CPU_LOG_EXEC)) {
595 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
596 (long)tb->tc_ptr, tb->pc,
597 lookup_symbol(tb->pc));
600 #if defined(__sparc__) && !defined(HOST_SOLARIS)
603 /* see if we can patch the calling TB. When the TB
604 spans two pages, we cannot safely do a direct
609 (env->kqemu_enabled != 2) &&
611 tb->page_addr[1] == -1) {
613 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
614 spin_unlock(&tb_lock);
618 env->current_tb = tb;
619 /* execute the generated code */
620 gen_func = (void *)tc_ptr;
621 #if defined(__sparc__)
622 __asm__ __volatile__("call %0\n\t"
626 : "i0", "i1", "i2", "i3", "i4", "i5",
627 "o0", "o1", "o2", "o3", "o4", "o5",
628 "l0", "l1", "l2", "l3", "l4", "l5",
630 #elif defined(__arm__)
631 asm volatile ("mov pc, %0\n\t"
632 ".global exec_loop\n\t"
636 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
637 #elif defined(__ia64)
644 fp.gp = code_gen_buffer + 2 * (1 << 20);
645 (*(void (*)(void)) &fp)();
649 env->current_tb = NULL;
650 /* reset soft MMU for next block (it can currently
651 only be set by a memory fault) */
652 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
653 if (env->hflags & HF_SOFTMMU_MASK) {
654 env->hflags &= ~HF_SOFTMMU_MASK;
655 /* do not allow linking to another block */
659 #if defined(USE_KQEMU)
660 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
661 if (kqemu_is_ok(env) &&
662 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
673 #if defined(TARGET_I386)
674 /* restore flags in standard format */
675 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
676 #elif defined(TARGET_ARM)
677 /* XXX: Save/restore host fpu exception state?. */
678 #elif defined(TARGET_SPARC)
679 #if defined(reg_REGWPTR)
680 REGWPTR = saved_regwptr;
682 #elif defined(TARGET_PPC)
683 #elif defined(TARGET_M68K)
684 cpu_m68k_flush_flags(env, env->cc_op);
685 env->cc_op = CC_OP_FLAGS;
686 env->sr = (env->sr & 0xffe0)
687 | env->cc_dest | (env->cc_x << 4);
688 #elif defined(TARGET_MIPS)
689 #elif defined(TARGET_SH4)
690 #elif defined(TARGET_ALPHA)
691 #elif defined(TARGET_CRIS)
694 #error unsupported target CPU
697 /* restore global registers */
698 #if defined(__sparc__) && !defined(HOST_SOLARIS)
699 asm volatile ("mov %0, %%i7" : : "r" (saved_i7));
701 #include "hostregs_helper.h"
703 /* fail safe : never use cpu_single_env outside cpu_exec() */
704 cpu_single_env = NULL;
708 /* must only be called from the generated code as an exception can be
710 void tb_invalidate_page_range(target_ulong start, target_ulong end)
712 /* XXX: cannot enable it yet because it yields to MMU exception
713 where NIP != read address on PowerPC */
715 target_ulong phys_addr;
716 phys_addr = get_phys_addr_code(env, start);
717 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
721 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
723 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
725 CPUX86State *saved_env;
729 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
731 cpu_x86_load_seg_cache(env, seg_reg, selector,
732 (selector << 4), 0xffff, 0);
734 load_seg(seg_reg, selector);
739 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
741 CPUX86State *saved_env;
746 helper_fsave(ptr, data32);
751 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
753 CPUX86State *saved_env;
758 helper_frstor(ptr, data32);
763 #endif /* TARGET_I386 */
765 #if !defined(CONFIG_SOFTMMU)
767 #if defined(TARGET_I386)
769 /* 'pc' is the host PC at which the exception was raised. 'address' is
770 the effective address of the memory exception. 'is_write' is 1 if a
771 write caused the exception and otherwise 0'. 'old_set' is the
772 signal set which should be restored */
773 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
774 int is_write, sigset_t *old_set,
777 TranslationBlock *tb;
781 env = cpu_single_env; /* XXX: find a correct solution for multithread */
782 #if defined(DEBUG_SIGNAL)
783 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
784 pc, address, is_write, *(unsigned long *)old_set);
786 /* XXX: locking issue */
787 if (is_write && page_unprotect(h2g(address), pc, puc)) {
791 /* see if it is an MMU fault */
792 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
794 return 0; /* not an MMU fault */
796 return 1; /* the MMU fault was handled without causing real CPU fault */
797 /* now we have a real cpu fault */
800 /* the PC is inside the translated code. It means that we have
801 a virtual CPU fault */
802 cpu_restore_state(tb, env, pc, puc);
806 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
807 env->eip, env->cr[2], env->error_code);
809 /* we restore the process signal mask as the sigreturn should
810 do it (XXX: use sigsetjmp) */
811 sigprocmask(SIG_SETMASK, old_set, NULL);
812 raise_exception_err(env->exception_index, env->error_code);
814 /* activate soft MMU for this block */
815 env->hflags |= HF_SOFTMMU_MASK;
816 cpu_resume_from_signal(env, puc);
818 /* never comes here */
822 #elif defined(TARGET_ARM)
823 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
824 int is_write, sigset_t *old_set,
827 TranslationBlock *tb;
831 env = cpu_single_env; /* XXX: find a correct solution for multithread */
832 #if defined(DEBUG_SIGNAL)
833 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
834 pc, address, is_write, *(unsigned long *)old_set);
836 /* XXX: locking issue */
837 if (is_write && page_unprotect(h2g(address), pc, puc)) {
840 /* see if it is an MMU fault */
841 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
843 return 0; /* not an MMU fault */
845 return 1; /* the MMU fault was handled without causing real CPU fault */
846 /* now we have a real cpu fault */
849 /* the PC is inside the translated code. It means that we have
850 a virtual CPU fault */
851 cpu_restore_state(tb, env, pc, puc);
853 /* we restore the process signal mask as the sigreturn should
854 do it (XXX: use sigsetjmp) */
855 sigprocmask(SIG_SETMASK, old_set, NULL);
858 #elif defined(TARGET_SPARC)
859 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
860 int is_write, sigset_t *old_set,
863 TranslationBlock *tb;
867 env = cpu_single_env; /* XXX: find a correct solution for multithread */
868 #if defined(DEBUG_SIGNAL)
869 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
870 pc, address, is_write, *(unsigned long *)old_set);
872 /* XXX: locking issue */
873 if (is_write && page_unprotect(h2g(address), pc, puc)) {
876 /* see if it is an MMU fault */
877 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
879 return 0; /* not an MMU fault */
881 return 1; /* the MMU fault was handled without causing real CPU fault */
882 /* now we have a real cpu fault */
885 /* the PC is inside the translated code. It means that we have
886 a virtual CPU fault */
887 cpu_restore_state(tb, env, pc, puc);
889 /* we restore the process signal mask as the sigreturn should
890 do it (XXX: use sigsetjmp) */
891 sigprocmask(SIG_SETMASK, old_set, NULL);
894 #elif defined (TARGET_PPC)
895 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
896 int is_write, sigset_t *old_set,
899 TranslationBlock *tb;
903 env = cpu_single_env; /* XXX: find a correct solution for multithread */
904 #if defined(DEBUG_SIGNAL)
905 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
906 pc, address, is_write, *(unsigned long *)old_set);
908 /* XXX: locking issue */
909 if (is_write && page_unprotect(h2g(address), pc, puc)) {
913 /* see if it is an MMU fault */
914 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
916 return 0; /* not an MMU fault */
918 return 1; /* the MMU fault was handled without causing real CPU fault */
920 /* now we have a real cpu fault */
923 /* the PC is inside the translated code. It means that we have
924 a virtual CPU fault */
925 cpu_restore_state(tb, env, pc, puc);
929 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
930 env->nip, env->error_code, tb);
932 /* we restore the process signal mask as the sigreturn should
933 do it (XXX: use sigsetjmp) */
934 sigprocmask(SIG_SETMASK, old_set, NULL);
935 do_raise_exception_err(env->exception_index, env->error_code);
937 /* activate soft MMU for this block */
938 cpu_resume_from_signal(env, puc);
940 /* never comes here */
944 #elif defined(TARGET_M68K)
945 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
946 int is_write, sigset_t *old_set,
949 TranslationBlock *tb;
953 env = cpu_single_env; /* XXX: find a correct solution for multithread */
954 #if defined(DEBUG_SIGNAL)
955 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
956 pc, address, is_write, *(unsigned long *)old_set);
958 /* XXX: locking issue */
959 if (is_write && page_unprotect(address, pc, puc)) {
962 /* see if it is an MMU fault */
963 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
965 return 0; /* not an MMU fault */
967 return 1; /* the MMU fault was handled without causing real CPU fault */
968 /* now we have a real cpu fault */
971 /* the PC is inside the translated code. It means that we have
972 a virtual CPU fault */
973 cpu_restore_state(tb, env, pc, puc);
975 /* we restore the process signal mask as the sigreturn should
976 do it (XXX: use sigsetjmp) */
977 sigprocmask(SIG_SETMASK, old_set, NULL);
979 /* never comes here */
983 #elif defined (TARGET_MIPS)
984 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
985 int is_write, sigset_t *old_set,
988 TranslationBlock *tb;
992 env = cpu_single_env; /* XXX: find a correct solution for multithread */
993 #if defined(DEBUG_SIGNAL)
994 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
995 pc, address, is_write, *(unsigned long *)old_set);
997 /* XXX: locking issue */
998 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1002 /* see if it is an MMU fault */
1003 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1005 return 0; /* not an MMU fault */
1007 return 1; /* the MMU fault was handled without causing real CPU fault */
1009 /* now we have a real cpu fault */
1010 tb = tb_find_pc(pc);
1012 /* the PC is inside the translated code. It means that we have
1013 a virtual CPU fault */
1014 cpu_restore_state(tb, env, pc, puc);
1018 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1019 env->PC, env->error_code, tb);
1021 /* we restore the process signal mask as the sigreturn should
1022 do it (XXX: use sigsetjmp) */
1023 sigprocmask(SIG_SETMASK, old_set, NULL);
1024 do_raise_exception_err(env->exception_index, env->error_code);
1026 /* activate soft MMU for this block */
1027 cpu_resume_from_signal(env, puc);
1029 /* never comes here */
1033 #elif defined (TARGET_SH4)
1034 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1035 int is_write, sigset_t *old_set,
1038 TranslationBlock *tb;
1042 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1043 #if defined(DEBUG_SIGNAL)
1044 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1045 pc, address, is_write, *(unsigned long *)old_set);
1047 /* XXX: locking issue */
1048 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1052 /* see if it is an MMU fault */
1053 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1055 return 0; /* not an MMU fault */
1057 return 1; /* the MMU fault was handled without causing real CPU fault */
1059 /* now we have a real cpu fault */
1060 tb = tb_find_pc(pc);
1062 /* the PC is inside the translated code. It means that we have
1063 a virtual CPU fault */
1064 cpu_restore_state(tb, env, pc, puc);
1067 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1068 env->nip, env->error_code, tb);
1070 /* we restore the process signal mask as the sigreturn should
1071 do it (XXX: use sigsetjmp) */
1072 sigprocmask(SIG_SETMASK, old_set, NULL);
1074 /* never comes here */
1078 #elif defined (TARGET_ALPHA)
1079 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1080 int is_write, sigset_t *old_set,
1083 TranslationBlock *tb;
1087 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1088 #if defined(DEBUG_SIGNAL)
1089 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1090 pc, address, is_write, *(unsigned long *)old_set);
1092 /* XXX: locking issue */
1093 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1097 /* see if it is an MMU fault */
1098 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1100 return 0; /* not an MMU fault */
1102 return 1; /* the MMU fault was handled without causing real CPU fault */
1104 /* now we have a real cpu fault */
1105 tb = tb_find_pc(pc);
1107 /* the PC is inside the translated code. It means that we have
1108 a virtual CPU fault */
1109 cpu_restore_state(tb, env, pc, puc);
1112 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1113 env->nip, env->error_code, tb);
1115 /* we restore the process signal mask as the sigreturn should
1116 do it (XXX: use sigsetjmp) */
1117 sigprocmask(SIG_SETMASK, old_set, NULL);
1119 /* never comes here */
1122 #elif defined (TARGET_CRIS)
1123 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1124 int is_write, sigset_t *old_set,
1127 TranslationBlock *tb;
1131 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1132 #if defined(DEBUG_SIGNAL)
1133 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1134 pc, address, is_write, *(unsigned long *)old_set);
1136 /* XXX: locking issue */
1137 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1141 /* see if it is an MMU fault */
1142 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1144 return 0; /* not an MMU fault */
1146 return 1; /* the MMU fault was handled without causing real CPU fault */
1148 /* now we have a real cpu fault */
1149 tb = tb_find_pc(pc);
1151 /* the PC is inside the translated code. It means that we have
1152 a virtual CPU fault */
1153 cpu_restore_state(tb, env, pc, puc);
1156 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1157 env->nip, env->error_code, tb);
1159 /* we restore the process signal mask as the sigreturn should
1160 do it (XXX: use sigsetjmp) */
1161 sigprocmask(SIG_SETMASK, old_set, NULL);
1163 /* never comes here */
1168 #error unsupported target CPU
1171 #if defined(__i386__)
1173 #if defined(__APPLE__)
1174 # include <sys/ucontext.h>
1176 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1177 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1178 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1180 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1181 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1182 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1185 int cpu_signal_handler(int host_signum, void *pinfo,
1188 siginfo_t *info = pinfo;
1189 struct ucontext *uc = puc;
1197 #define REG_TRAPNO TRAPNO
1200 trapno = TRAP_sig(uc);
1201 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1203 (ERROR_sig(uc) >> 1) & 1 : 0,
1204 &uc->uc_sigmask, puc);
1207 #elif defined(__x86_64__)
1209 int cpu_signal_handler(int host_signum, void *pinfo,
1212 siginfo_t *info = pinfo;
1213 struct ucontext *uc = puc;
1216 pc = uc->uc_mcontext.gregs[REG_RIP];
1217 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1218 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1219 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1220 &uc->uc_sigmask, puc);
1223 #elif defined(__powerpc__)
1225 /***********************************************************************
1226 * signal context platform-specific definitions
1230 /* All Registers access - only for local access */
1231 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1232 /* Gpr Registers access */
1233 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1234 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1235 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1236 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1237 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1238 # define LR_sig(context) REG_sig(link, context) /* Link register */
1239 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1240 /* Float Registers access */
1241 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1242 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1243 /* Exception Registers access */
1244 # define DAR_sig(context) REG_sig(dar, context)
1245 # define DSISR_sig(context) REG_sig(dsisr, context)
1246 # define TRAP_sig(context) REG_sig(trap, context)
1250 # include <sys/ucontext.h>
1251 typedef struct ucontext SIGCONTEXT;
1252 /* All Registers access - only for local access */
1253 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1254 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1255 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1256 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1257 /* Gpr Registers access */
1258 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1259 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1260 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1261 # define CTR_sig(context) REG_sig(ctr, context)
1262 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1263 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1264 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1265 /* Float Registers access */
1266 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1267 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1268 /* Exception Registers access */
1269 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1270 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1271 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1272 #endif /* __APPLE__ */
1274 int cpu_signal_handler(int host_signum, void *pinfo,
1277 siginfo_t *info = pinfo;
1278 struct ucontext *uc = puc;
1286 if (DSISR_sig(uc) & 0x00800000)
1289 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1292 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1293 is_write, &uc->uc_sigmask, puc);
1296 #elif defined(__alpha__)
1298 int cpu_signal_handler(int host_signum, void *pinfo,
1301 siginfo_t *info = pinfo;
1302 struct ucontext *uc = puc;
1303 uint32_t *pc = uc->uc_mcontext.sc_pc;
1304 uint32_t insn = *pc;
1307 /* XXX: need kernel patch to get write flag faster */
1308 switch (insn >> 26) {
1323 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1324 is_write, &uc->uc_sigmask, puc);
1326 #elif defined(__sparc__)
1328 int cpu_signal_handler(int host_signum, void *pinfo,
1331 siginfo_t *info = pinfo;
1332 uint32_t *regs = (uint32_t *)(info + 1);
1333 void *sigmask = (regs + 20);
1338 /* XXX: is there a standard glibc define ? */
1340 /* XXX: need kernel patch to get write flag faster */
1342 insn = *(uint32_t *)pc;
1343 if ((insn >> 30) == 3) {
1344 switch((insn >> 19) & 0x3f) {
1356 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1357 is_write, sigmask, NULL);
1360 #elif defined(__arm__)
1362 int cpu_signal_handler(int host_signum, void *pinfo,
1365 siginfo_t *info = pinfo;
1366 struct ucontext *uc = puc;
1370 pc = uc->uc_mcontext.gregs[R15];
1371 /* XXX: compute is_write */
1373 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1375 &uc->uc_sigmask, puc);
1378 #elif defined(__mc68000)
1380 int cpu_signal_handler(int host_signum, void *pinfo,
1383 siginfo_t *info = pinfo;
1384 struct ucontext *uc = puc;
1388 pc = uc->uc_mcontext.gregs[16];
1389 /* XXX: compute is_write */
1391 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1393 &uc->uc_sigmask, puc);
1396 #elif defined(__ia64)
1399 /* This ought to be in <bits/siginfo.h>... */
1400 # define __ISR_VALID 1
1403 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1405 siginfo_t *info = pinfo;
1406 struct ucontext *uc = puc;
1410 ip = uc->uc_mcontext.sc_ip;
1411 switch (host_signum) {
1417 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1418 /* ISR.W (write-access) is bit 33: */
1419 is_write = (info->si_isr >> 33) & 1;
1425 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1427 &uc->uc_sigmask, puc);
1430 #elif defined(__s390__)
1432 int cpu_signal_handler(int host_signum, void *pinfo,
1435 siginfo_t *info = pinfo;
1436 struct ucontext *uc = puc;
1440 pc = uc->uc_mcontext.psw.addr;
1441 /* XXX: compute is_write */
1443 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1444 is_write, &uc->uc_sigmask, puc);
1447 #elif defined(__mips__)
1449 int cpu_signal_handler(int host_signum, void *pinfo,
1452 siginfo_t *info = pinfo;
1453 struct ucontext *uc = puc;
1454 greg_t pc = uc->uc_mcontext.pc;
1457 /* XXX: compute is_write */
1459 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1460 is_write, &uc->uc_sigmask, puc);
1465 #error host CPU specific signal handler needed
1469 #endif /* !defined(CONFIG_SOFTMMU) */