2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #define CPU_NO_GLOBAL_REGS
26 #if !defined(CONFIG_SOFTMMU)
37 #include <sys/ucontext.h>
40 #if defined(__sparc__) && !defined(HOST_SOLARIS)
41 // Work around ugly bugs in glibc that mangle global register contents
43 #define env cpu_single_env
46 int tb_invalidated_flag;
47 static unsigned long next_tb;
50 //#define DEBUG_SIGNAL
52 void cpu_loop_exit(void)
54 /* NOTE: the register at this point must be saved by hand because
55 longjmp restore them */
57 longjmp(env->jmp_env, 1);
60 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
64 /* exit the current TB from a signal handler. The host registers are
65 restored in a state compatible with the CPU emulator
67 void cpu_resume_from_signal(CPUState *env1, void *puc)
69 #if !defined(CONFIG_SOFTMMU)
70 struct ucontext *uc = puc;
75 /* XXX: restore cpu registers saved in host registers */
77 #if !defined(CONFIG_SOFTMMU)
79 /* XXX: use siglongjmp ? */
80 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
83 longjmp(env->jmp_env, 1);
86 static TranslationBlock *tb_find_slow(target_ulong pc,
90 TranslationBlock *tb, **ptb1;
93 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
98 tb_invalidated_flag = 0;
100 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
102 /* find translated block using physical mappings */
103 phys_pc = get_phys_addr_code(env, pc);
104 phys_page1 = phys_pc & TARGET_PAGE_MASK;
106 h = tb_phys_hash_func(phys_pc);
107 ptb1 = &tb_phys_hash[h];
113 tb->page_addr[0] == phys_page1 &&
114 tb->cs_base == cs_base &&
115 tb->flags == flags) {
116 /* check next page if needed */
117 if (tb->page_addr[1] != -1) {
118 virt_page2 = (pc & TARGET_PAGE_MASK) +
120 phys_page2 = get_phys_addr_code(env, virt_page2);
121 if (tb->page_addr[1] == phys_page2)
127 ptb1 = &tb->phys_hash_next;
130 /* if no translated code available, then translate it now */
133 /* flush must be done */
135 /* cannot fail at this point */
137 /* don't forget to invalidate previous TB info */
138 tb_invalidated_flag = 1;
140 tc_ptr = code_gen_ptr;
142 tb->cs_base = cs_base;
144 cpu_gen_code(env, tb, &code_gen_size);
145 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
147 /* check next page if needed */
148 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
150 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
151 phys_page2 = get_phys_addr_code(env, virt_page2);
153 tb_link_phys(tb, phys_pc, phys_page2);
156 /* we add the TB in the virtual pc hash table */
157 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
158 spin_unlock(&tb_lock);
162 static inline TranslationBlock *tb_find_fast(void)
164 TranslationBlock *tb;
165 target_ulong cs_base, pc;
168 /* we record a subset of the CPU state. It will
169 always be the same before a given translated block
171 #if defined(TARGET_I386)
173 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
174 flags |= env->intercept;
175 cs_base = env->segs[R_CS].base;
176 pc = cs_base + env->eip;
177 #elif defined(TARGET_ARM)
178 flags = env->thumb | (env->vfp.vec_len << 1)
179 | (env->vfp.vec_stride << 4);
180 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
182 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
184 flags |= (env->condexec_bits << 8);
187 #elif defined(TARGET_SPARC)
188 #ifdef TARGET_SPARC64
189 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
190 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
191 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
193 // FPU enable . Supervisor
194 flags = (env->psref << 4) | env->psrs;
198 #elif defined(TARGET_PPC)
202 #elif defined(TARGET_MIPS)
203 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
205 pc = env->PC[env->current_tc];
206 #elif defined(TARGET_M68K)
207 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
208 | (env->sr & SR_S) /* Bit 13 */
209 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
212 #elif defined(TARGET_SH4)
216 #elif defined(TARGET_ALPHA)
220 #elif defined(TARGET_CRIS)
221 flags = env->pregs[PR_CCS] & (U_FLAG | X_FLAG);
226 #error unsupported CPU
228 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
229 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
230 tb->flags != flags, 0)) {
231 tb = tb_find_slow(pc, cs_base, flags);
232 /* Note: we do it here to avoid a gcc bug on Mac OS X when
233 doing it in tb_find_slow */
234 if (tb_invalidated_flag) {
235 /* as some TB could have been invalidated because
236 of memory exceptions while generating the code, we
237 must recompute the hash index here */
244 /* main execution loop */
246 int cpu_exec(CPUState *env1)
248 #define DECLARE_HOST_REGS 1
249 #include "hostregs_helper.h"
250 #if defined(TARGET_SPARC)
251 #if defined(reg_REGWPTR)
252 uint32_t *saved_regwptr;
255 int ret, interrupt_request;
256 TranslationBlock *tb;
259 if (cpu_halted(env1) == EXCP_HALTED)
262 cpu_single_env = env1;
264 /* first we save global registers */
265 #define SAVE_HOST_REGS 1
266 #include "hostregs_helper.h"
270 #if defined(TARGET_I386)
271 /* put eflags in CPU temporary format */
272 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
273 DF = 1 - (2 * ((env->eflags >> 10) & 1));
274 CC_OP = CC_OP_EFLAGS;
275 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
276 #elif defined(TARGET_SPARC)
277 #if defined(reg_REGWPTR)
278 saved_regwptr = REGWPTR;
280 #elif defined(TARGET_M68K)
281 env->cc_op = CC_OP_FLAGS;
282 env->cc_dest = env->sr & 0xf;
283 env->cc_x = (env->sr >> 4) & 1;
284 #elif defined(TARGET_ALPHA)
285 #elif defined(TARGET_ARM)
286 #elif defined(TARGET_PPC)
287 #elif defined(TARGET_MIPS)
288 #elif defined(TARGET_SH4)
289 #elif defined(TARGET_CRIS)
292 #error unsupported target CPU
294 env->exception_index = -1;
296 /* prepare setjmp context for exception handling */
298 if (setjmp(env->jmp_env) == 0) {
299 env->current_tb = NULL;
300 /* if an exception is pending, we execute it here */
301 if (env->exception_index >= 0) {
302 if (env->exception_index >= EXCP_INTERRUPT) {
303 /* exit request from the cpu execution loop */
304 ret = env->exception_index;
306 } else if (env->user_mode_only) {
307 /* if user mode only, we simulate a fake exception
308 which will be handled outside the cpu execution
310 #if defined(TARGET_I386)
311 do_interrupt_user(env->exception_index,
312 env->exception_is_int,
314 env->exception_next_eip);
315 /* successfully delivered */
316 env->old_exception = -1;
318 ret = env->exception_index;
321 #if defined(TARGET_I386)
322 /* simulate a real cpu exception. On i386, it can
323 trigger new exceptions, but we do not handle
324 double or triple faults yet. */
325 do_interrupt(env->exception_index,
326 env->exception_is_int,
328 env->exception_next_eip, 0);
329 /* successfully delivered */
330 env->old_exception = -1;
331 #elif defined(TARGET_PPC)
333 #elif defined(TARGET_MIPS)
335 #elif defined(TARGET_SPARC)
337 #elif defined(TARGET_ARM)
339 #elif defined(TARGET_SH4)
341 #elif defined(TARGET_ALPHA)
343 #elif defined(TARGET_CRIS)
345 #elif defined(TARGET_M68K)
349 env->exception_index = -1;
352 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
354 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
355 ret = kqemu_cpu_exec(env);
356 /* put eflags in CPU temporary format */
357 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
358 DF = 1 - (2 * ((env->eflags >> 10) & 1));
359 CC_OP = CC_OP_EFLAGS;
360 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
363 longjmp(env->jmp_env, 1);
364 } else if (ret == 2) {
365 /* softmmu execution needed */
367 if (env->interrupt_request != 0) {
368 /* hardware interrupt will be executed just after */
370 /* otherwise, we restart */
371 longjmp(env->jmp_env, 1);
377 next_tb = 0; /* force lookup of first TB */
379 interrupt_request = env->interrupt_request;
380 if (__builtin_expect(interrupt_request, 0)
381 #if defined(TARGET_I386)
382 && env->hflags & HF_GIF_MASK
384 && likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
385 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
386 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
387 env->exception_index = EXCP_DEBUG;
390 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
391 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
392 if (interrupt_request & CPU_INTERRUPT_HALT) {
393 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
395 env->exception_index = EXCP_HLT;
399 #if defined(TARGET_I386)
400 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
401 !(env->hflags & HF_SMM_MASK)) {
402 svm_check_intercept(SVM_EXIT_SMI);
403 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
406 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
407 !(env->hflags & HF_NMI_MASK)) {
408 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
409 env->hflags |= HF_NMI_MASK;
410 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
412 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
413 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
414 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
416 svm_check_intercept(SVM_EXIT_INTR);
417 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
418 intno = cpu_get_pic_interrupt(env);
419 if (loglevel & CPU_LOG_TB_IN_ASM) {
420 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
422 do_interrupt(intno, 0, 0, 0, 1);
423 /* ensure that no TB jump will be modified as
424 the program flow was changed */
426 #if !defined(CONFIG_USER_ONLY)
427 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
428 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
430 /* FIXME: this should respect TPR */
431 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
432 svm_check_intercept(SVM_EXIT_VINTR);
433 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
434 if (loglevel & CPU_LOG_TB_IN_ASM)
435 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
436 do_interrupt(intno, 0, 0, -1, 1);
437 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
438 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
442 #elif defined(TARGET_PPC)
444 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
448 if (interrupt_request & CPU_INTERRUPT_HARD) {
449 ppc_hw_interrupt(env);
450 if (env->pending_interrupts == 0)
451 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
454 #elif defined(TARGET_MIPS)
455 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
456 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
457 (env->CP0_Status & (1 << CP0St_IE)) &&
458 !(env->CP0_Status & (1 << CP0St_EXL)) &&
459 !(env->CP0_Status & (1 << CP0St_ERL)) &&
460 !(env->hflags & MIPS_HFLAG_DM)) {
462 env->exception_index = EXCP_EXT_INTERRUPT;
467 #elif defined(TARGET_SPARC)
468 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
470 int pil = env->interrupt_index & 15;
471 int type = env->interrupt_index & 0xf0;
473 if (((type == TT_EXTINT) &&
474 (pil == 15 || pil > env->psrpil)) ||
476 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
477 env->exception_index = env->interrupt_index;
479 env->interrupt_index = 0;
480 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
485 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
486 //do_interrupt(0, 0, 0, 0, 0);
487 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
489 #elif defined(TARGET_ARM)
490 if (interrupt_request & CPU_INTERRUPT_FIQ
491 && !(env->uncached_cpsr & CPSR_F)) {
492 env->exception_index = EXCP_FIQ;
496 /* ARMv7-M interrupt return works by loading a magic value
497 into the PC. On real hardware the load causes the
498 return to occur. The qemu implementation performs the
499 jump normally, then does the exception return when the
500 CPU tries to execute code at the magic address.
501 This will cause the magic PC value to be pushed to
502 the stack if an interrupt occured at the wrong time.
503 We avoid this by disabling interrupts when
504 pc contains a magic address. */
505 if (interrupt_request & CPU_INTERRUPT_HARD
506 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
507 || !(env->uncached_cpsr & CPSR_I))) {
508 env->exception_index = EXCP_IRQ;
512 #elif defined(TARGET_SH4)
513 if (interrupt_request & CPU_INTERRUPT_HARD) {
517 #elif defined(TARGET_ALPHA)
518 if (interrupt_request & CPU_INTERRUPT_HARD) {
522 #elif defined(TARGET_CRIS)
523 if (interrupt_request & CPU_INTERRUPT_HARD) {
527 #elif defined(TARGET_M68K)
528 if (interrupt_request & CPU_INTERRUPT_HARD
529 && ((env->sr & SR_I) >> SR_I_SHIFT)
530 < env->pending_level) {
531 /* Real hardware gets the interrupt vector via an
532 IACK cycle at this point. Current emulated
533 hardware doesn't rely on this, so we
534 provide/save the vector when the interrupt is
536 env->exception_index = env->pending_vector;
541 /* Don't use the cached interupt_request value,
542 do_interrupt may have updated the EXITTB flag. */
543 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
544 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
545 /* ensure that no TB jump will be modified as
546 the program flow was changed */
549 if (interrupt_request & CPU_INTERRUPT_EXIT) {
550 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
551 env->exception_index = EXCP_INTERRUPT;
556 if ((loglevel & CPU_LOG_TB_CPU)) {
557 /* restore flags in standard format */
559 #if defined(TARGET_I386)
560 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
561 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
562 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
563 #elif defined(TARGET_ARM)
564 cpu_dump_state(env, logfile, fprintf, 0);
565 #elif defined(TARGET_SPARC)
566 REGWPTR = env->regbase + (env->cwp * 16);
567 env->regwptr = REGWPTR;
568 cpu_dump_state(env, logfile, fprintf, 0);
569 #elif defined(TARGET_PPC)
570 cpu_dump_state(env, logfile, fprintf, 0);
571 #elif defined(TARGET_M68K)
572 cpu_m68k_flush_flags(env, env->cc_op);
573 env->cc_op = CC_OP_FLAGS;
574 env->sr = (env->sr & 0xffe0)
575 | env->cc_dest | (env->cc_x << 4);
576 cpu_dump_state(env, logfile, fprintf, 0);
577 #elif defined(TARGET_MIPS)
578 cpu_dump_state(env, logfile, fprintf, 0);
579 #elif defined(TARGET_SH4)
580 cpu_dump_state(env, logfile, fprintf, 0);
581 #elif defined(TARGET_ALPHA)
582 cpu_dump_state(env, logfile, fprintf, 0);
583 #elif defined(TARGET_CRIS)
584 cpu_dump_state(env, logfile, fprintf, 0);
586 #error unsupported target CPU
592 if ((loglevel & CPU_LOG_EXEC)) {
593 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
594 (long)tb->tc_ptr, tb->pc,
595 lookup_symbol(tb->pc));
598 /* see if we can patch the calling TB. When the TB
599 spans two pages, we cannot safely do a direct
604 (env->kqemu_enabled != 2) &&
606 tb->page_addr[1] == -1) {
608 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
609 spin_unlock(&tb_lock);
613 env->current_tb = tb;
614 /* execute the generated code */
615 #if defined(__sparc__) && !defined(HOST_SOLARIS)
617 env = cpu_single_env;
618 #define env cpu_single_env
620 next_tb = tcg_qemu_tb_exec(tc_ptr);
621 env->current_tb = NULL;
622 /* reset soft MMU for next block (it can currently
623 only be set by a memory fault) */
624 #if defined(USE_KQEMU)
625 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
626 if (kqemu_is_ok(env) &&
627 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
638 #if defined(TARGET_I386)
639 /* restore flags in standard format */
640 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
641 #elif defined(TARGET_ARM)
642 /* XXX: Save/restore host fpu exception state?. */
643 #elif defined(TARGET_SPARC)
644 #if defined(reg_REGWPTR)
645 REGWPTR = saved_regwptr;
647 #elif defined(TARGET_PPC)
648 #elif defined(TARGET_M68K)
649 cpu_m68k_flush_flags(env, env->cc_op);
650 env->cc_op = CC_OP_FLAGS;
651 env->sr = (env->sr & 0xffe0)
652 | env->cc_dest | (env->cc_x << 4);
653 #elif defined(TARGET_MIPS)
654 #elif defined(TARGET_SH4)
655 #elif defined(TARGET_ALPHA)
656 #elif defined(TARGET_CRIS)
659 #error unsupported target CPU
662 /* restore global registers */
663 #include "hostregs_helper.h"
665 /* fail safe : never use cpu_single_env outside cpu_exec() */
666 cpu_single_env = NULL;
670 /* must only be called from the generated code as an exception can be
672 void tb_invalidate_page_range(target_ulong start, target_ulong end)
674 /* XXX: cannot enable it yet because it yields to MMU exception
675 where NIP != read address on PowerPC */
677 target_ulong phys_addr;
678 phys_addr = get_phys_addr_code(env, start);
679 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
683 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
685 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
687 CPUX86State *saved_env;
691 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
693 cpu_x86_load_seg_cache(env, seg_reg, selector,
694 (selector << 4), 0xffff, 0);
696 helper_load_seg(seg_reg, selector);
701 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
703 CPUX86State *saved_env;
708 helper_fsave(ptr, data32);
713 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
715 CPUX86State *saved_env;
720 helper_frstor(ptr, data32);
725 #endif /* TARGET_I386 */
727 #if !defined(CONFIG_SOFTMMU)
729 #if defined(TARGET_I386)
731 /* 'pc' is the host PC at which the exception was raised. 'address' is
732 the effective address of the memory exception. 'is_write' is 1 if a
733 write caused the exception and otherwise 0'. 'old_set' is the
734 signal set which should be restored */
735 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
736 int is_write, sigset_t *old_set,
739 TranslationBlock *tb;
743 env = cpu_single_env; /* XXX: find a correct solution for multithread */
744 #if defined(DEBUG_SIGNAL)
745 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
746 pc, address, is_write, *(unsigned long *)old_set);
748 /* XXX: locking issue */
749 if (is_write && page_unprotect(h2g(address), pc, puc)) {
753 /* see if it is an MMU fault */
754 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
756 return 0; /* not an MMU fault */
758 return 1; /* the MMU fault was handled without causing real CPU fault */
759 /* now we have a real cpu fault */
762 /* the PC is inside the translated code. It means that we have
763 a virtual CPU fault */
764 cpu_restore_state(tb, env, pc, puc);
768 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
769 env->eip, env->cr[2], env->error_code);
771 /* we restore the process signal mask as the sigreturn should
772 do it (XXX: use sigsetjmp) */
773 sigprocmask(SIG_SETMASK, old_set, NULL);
774 raise_exception_err(env->exception_index, env->error_code);
776 /* activate soft MMU for this block */
777 env->hflags |= HF_SOFTMMU_MASK;
778 cpu_resume_from_signal(env, puc);
780 /* never comes here */
784 #elif defined(TARGET_ARM)
785 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
786 int is_write, sigset_t *old_set,
789 TranslationBlock *tb;
793 env = cpu_single_env; /* XXX: find a correct solution for multithread */
794 #if defined(DEBUG_SIGNAL)
795 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
796 pc, address, is_write, *(unsigned long *)old_set);
798 /* XXX: locking issue */
799 if (is_write && page_unprotect(h2g(address), pc, puc)) {
802 /* see if it is an MMU fault */
803 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
805 return 0; /* not an MMU fault */
807 return 1; /* the MMU fault was handled without causing real CPU fault */
808 /* now we have a real cpu fault */
811 /* the PC is inside the translated code. It means that we have
812 a virtual CPU fault */
813 cpu_restore_state(tb, env, pc, puc);
815 /* we restore the process signal mask as the sigreturn should
816 do it (XXX: use sigsetjmp) */
817 sigprocmask(SIG_SETMASK, old_set, NULL);
819 /* never comes here */
822 #elif defined(TARGET_SPARC)
823 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
824 int is_write, sigset_t *old_set,
827 TranslationBlock *tb;
831 env = cpu_single_env; /* XXX: find a correct solution for multithread */
832 #if defined(DEBUG_SIGNAL)
833 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
834 pc, address, is_write, *(unsigned long *)old_set);
836 /* XXX: locking issue */
837 if (is_write && page_unprotect(h2g(address), pc, puc)) {
840 /* see if it is an MMU fault */
841 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
843 return 0; /* not an MMU fault */
845 return 1; /* the MMU fault was handled without causing real CPU fault */
846 /* now we have a real cpu fault */
849 /* the PC is inside the translated code. It means that we have
850 a virtual CPU fault */
851 cpu_restore_state(tb, env, pc, puc);
853 /* we restore the process signal mask as the sigreturn should
854 do it (XXX: use sigsetjmp) */
855 sigprocmask(SIG_SETMASK, old_set, NULL);
857 /* never comes here */
860 #elif defined (TARGET_PPC)
861 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
862 int is_write, sigset_t *old_set,
865 TranslationBlock *tb;
869 env = cpu_single_env; /* XXX: find a correct solution for multithread */
870 #if defined(DEBUG_SIGNAL)
871 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
872 pc, address, is_write, *(unsigned long *)old_set);
874 /* XXX: locking issue */
875 if (is_write && page_unprotect(h2g(address), pc, puc)) {
879 /* see if it is an MMU fault */
880 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
882 return 0; /* not an MMU fault */
884 return 1; /* the MMU fault was handled without causing real CPU fault */
886 /* now we have a real cpu fault */
889 /* the PC is inside the translated code. It means that we have
890 a virtual CPU fault */
891 cpu_restore_state(tb, env, pc, puc);
895 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
896 env->nip, env->error_code, tb);
898 /* we restore the process signal mask as the sigreturn should
899 do it (XXX: use sigsetjmp) */
900 sigprocmask(SIG_SETMASK, old_set, NULL);
901 do_raise_exception_err(env->exception_index, env->error_code);
903 /* activate soft MMU for this block */
904 cpu_resume_from_signal(env, puc);
906 /* never comes here */
910 #elif defined(TARGET_M68K)
911 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
912 int is_write, sigset_t *old_set,
915 TranslationBlock *tb;
919 env = cpu_single_env; /* XXX: find a correct solution for multithread */
920 #if defined(DEBUG_SIGNAL)
921 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
922 pc, address, is_write, *(unsigned long *)old_set);
924 /* XXX: locking issue */
925 if (is_write && page_unprotect(address, pc, puc)) {
928 /* see if it is an MMU fault */
929 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
931 return 0; /* not an MMU fault */
933 return 1; /* the MMU fault was handled without causing real CPU fault */
934 /* now we have a real cpu fault */
937 /* the PC is inside the translated code. It means that we have
938 a virtual CPU fault */
939 cpu_restore_state(tb, env, pc, puc);
941 /* we restore the process signal mask as the sigreturn should
942 do it (XXX: use sigsetjmp) */
943 sigprocmask(SIG_SETMASK, old_set, NULL);
945 /* never comes here */
949 #elif defined (TARGET_MIPS)
950 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
951 int is_write, sigset_t *old_set,
954 TranslationBlock *tb;
958 env = cpu_single_env; /* XXX: find a correct solution for multithread */
959 #if defined(DEBUG_SIGNAL)
960 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
961 pc, address, is_write, *(unsigned long *)old_set);
963 /* XXX: locking issue */
964 if (is_write && page_unprotect(h2g(address), pc, puc)) {
968 /* see if it is an MMU fault */
969 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
971 return 0; /* not an MMU fault */
973 return 1; /* the MMU fault was handled without causing real CPU fault */
975 /* now we have a real cpu fault */
978 /* the PC is inside the translated code. It means that we have
979 a virtual CPU fault */
980 cpu_restore_state(tb, env, pc, puc);
984 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
985 env->PC, env->error_code, tb);
987 /* we restore the process signal mask as the sigreturn should
988 do it (XXX: use sigsetjmp) */
989 sigprocmask(SIG_SETMASK, old_set, NULL);
990 do_raise_exception_err(env->exception_index, env->error_code);
992 /* activate soft MMU for this block */
993 cpu_resume_from_signal(env, puc);
995 /* never comes here */
999 #elif defined (TARGET_SH4)
1000 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1001 int is_write, sigset_t *old_set,
1004 TranslationBlock *tb;
1008 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1009 #if defined(DEBUG_SIGNAL)
1010 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1011 pc, address, is_write, *(unsigned long *)old_set);
1013 /* XXX: locking issue */
1014 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1018 /* see if it is an MMU fault */
1019 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1021 return 0; /* not an MMU fault */
1023 return 1; /* the MMU fault was handled without causing real CPU fault */
1025 /* now we have a real cpu fault */
1026 tb = tb_find_pc(pc);
1028 /* the PC is inside the translated code. It means that we have
1029 a virtual CPU fault */
1030 cpu_restore_state(tb, env, pc, puc);
1033 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1034 env->nip, env->error_code, tb);
1036 /* we restore the process signal mask as the sigreturn should
1037 do it (XXX: use sigsetjmp) */
1038 sigprocmask(SIG_SETMASK, old_set, NULL);
1040 /* never comes here */
1044 #elif defined (TARGET_ALPHA)
1045 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1046 int is_write, sigset_t *old_set,
1049 TranslationBlock *tb;
1053 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1054 #if defined(DEBUG_SIGNAL)
1055 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1056 pc, address, is_write, *(unsigned long *)old_set);
1058 /* XXX: locking issue */
1059 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1063 /* see if it is an MMU fault */
1064 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1066 return 0; /* not an MMU fault */
1068 return 1; /* the MMU fault was handled without causing real CPU fault */
1070 /* now we have a real cpu fault */
1071 tb = tb_find_pc(pc);
1073 /* the PC is inside the translated code. It means that we have
1074 a virtual CPU fault */
1075 cpu_restore_state(tb, env, pc, puc);
1078 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1079 env->nip, env->error_code, tb);
1081 /* we restore the process signal mask as the sigreturn should
1082 do it (XXX: use sigsetjmp) */
1083 sigprocmask(SIG_SETMASK, old_set, NULL);
1085 /* never comes here */
1088 #elif defined (TARGET_CRIS)
1089 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1090 int is_write, sigset_t *old_set,
1093 TranslationBlock *tb;
1097 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1098 #if defined(DEBUG_SIGNAL)
1099 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1100 pc, address, is_write, *(unsigned long *)old_set);
1102 /* XXX: locking issue */
1103 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1107 /* see if it is an MMU fault */
1108 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1110 return 0; /* not an MMU fault */
1112 return 1; /* the MMU fault was handled without causing real CPU fault */
1114 /* now we have a real cpu fault */
1115 tb = tb_find_pc(pc);
1117 /* the PC is inside the translated code. It means that we have
1118 a virtual CPU fault */
1119 cpu_restore_state(tb, env, pc, puc);
1121 /* we restore the process signal mask as the sigreturn should
1122 do it (XXX: use sigsetjmp) */
1123 sigprocmask(SIG_SETMASK, old_set, NULL);
1125 /* never comes here */
1130 #error unsupported target CPU
1133 #if defined(__i386__)
1135 #if defined(__APPLE__)
1136 # include <sys/ucontext.h>
1138 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1139 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1140 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1142 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1143 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1144 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1147 int cpu_signal_handler(int host_signum, void *pinfo,
1150 siginfo_t *info = pinfo;
1151 struct ucontext *uc = puc;
1159 #define REG_TRAPNO TRAPNO
1162 trapno = TRAP_sig(uc);
1163 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1165 (ERROR_sig(uc) >> 1) & 1 : 0,
1166 &uc->uc_sigmask, puc);
1169 #elif defined(__x86_64__)
1171 int cpu_signal_handler(int host_signum, void *pinfo,
1174 siginfo_t *info = pinfo;
1175 struct ucontext *uc = puc;
1178 pc = uc->uc_mcontext.gregs[REG_RIP];
1179 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1180 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1181 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1182 &uc->uc_sigmask, puc);
1185 #elif defined(__powerpc__)
1187 /***********************************************************************
1188 * signal context platform-specific definitions
1192 /* All Registers access - only for local access */
1193 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1194 /* Gpr Registers access */
1195 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1196 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1197 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1198 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1199 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1200 # define LR_sig(context) REG_sig(link, context) /* Link register */
1201 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1202 /* Float Registers access */
1203 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1204 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1205 /* Exception Registers access */
1206 # define DAR_sig(context) REG_sig(dar, context)
1207 # define DSISR_sig(context) REG_sig(dsisr, context)
1208 # define TRAP_sig(context) REG_sig(trap, context)
1212 # include <sys/ucontext.h>
1213 typedef struct ucontext SIGCONTEXT;
1214 /* All Registers access - only for local access */
1215 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1216 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1217 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1218 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1219 /* Gpr Registers access */
1220 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1221 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1222 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1223 # define CTR_sig(context) REG_sig(ctr, context)
1224 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1225 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1226 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1227 /* Float Registers access */
1228 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1229 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1230 /* Exception Registers access */
1231 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1232 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1233 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1234 #endif /* __APPLE__ */
1236 int cpu_signal_handler(int host_signum, void *pinfo,
1239 siginfo_t *info = pinfo;
1240 struct ucontext *uc = puc;
1248 if (DSISR_sig(uc) & 0x00800000)
1251 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1254 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1255 is_write, &uc->uc_sigmask, puc);
1258 #elif defined(__alpha__)
1260 int cpu_signal_handler(int host_signum, void *pinfo,
1263 siginfo_t *info = pinfo;
1264 struct ucontext *uc = puc;
1265 uint32_t *pc = uc->uc_mcontext.sc_pc;
1266 uint32_t insn = *pc;
1269 /* XXX: need kernel patch to get write flag faster */
1270 switch (insn >> 26) {
1285 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1286 is_write, &uc->uc_sigmask, puc);
1288 #elif defined(__sparc__)
1290 int cpu_signal_handler(int host_signum, void *pinfo,
1293 siginfo_t *info = pinfo;
1296 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1297 uint32_t *regs = (uint32_t *)(info + 1);
1298 void *sigmask = (regs + 20);
1299 /* XXX: is there a standard glibc define ? */
1300 unsigned long pc = regs[1];
1302 struct sigcontext *sc = puc;
1303 unsigned long pc = sc->sigc_regs.tpc;
1304 void *sigmask = (void *)sc->sigc_mask;
1307 /* XXX: need kernel patch to get write flag faster */
1309 insn = *(uint32_t *)pc;
1310 if ((insn >> 30) == 3) {
1311 switch((insn >> 19) & 0x3f) {
1323 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1324 is_write, sigmask, NULL);
1327 #elif defined(__arm__)
1329 int cpu_signal_handler(int host_signum, void *pinfo,
1332 siginfo_t *info = pinfo;
1333 struct ucontext *uc = puc;
1337 pc = uc->uc_mcontext.arm_pc;
1338 /* XXX: compute is_write */
1340 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1342 &uc->uc_sigmask, puc);
1345 #elif defined(__mc68000)
1347 int cpu_signal_handler(int host_signum, void *pinfo,
1350 siginfo_t *info = pinfo;
1351 struct ucontext *uc = puc;
1355 pc = uc->uc_mcontext.gregs[16];
1356 /* XXX: compute is_write */
1358 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1360 &uc->uc_sigmask, puc);
1363 #elif defined(__ia64)
1366 /* This ought to be in <bits/siginfo.h>... */
1367 # define __ISR_VALID 1
1370 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1372 siginfo_t *info = pinfo;
1373 struct ucontext *uc = puc;
1377 ip = uc->uc_mcontext.sc_ip;
1378 switch (host_signum) {
1384 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1385 /* ISR.W (write-access) is bit 33: */
1386 is_write = (info->si_isr >> 33) & 1;
1392 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1394 &uc->uc_sigmask, puc);
1397 #elif defined(__s390__)
1399 int cpu_signal_handler(int host_signum, void *pinfo,
1402 siginfo_t *info = pinfo;
1403 struct ucontext *uc = puc;
1407 pc = uc->uc_mcontext.psw.addr;
1408 /* XXX: compute is_write */
1410 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1411 is_write, &uc->uc_sigmask, puc);
1414 #elif defined(__mips__)
1416 int cpu_signal_handler(int host_signum, void *pinfo,
1419 siginfo_t *info = pinfo;
1420 struct ucontext *uc = puc;
1421 greg_t pc = uc->uc_mcontext.pc;
1424 /* XXX: compute is_write */
1426 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1427 is_write, &uc->uc_sigmask, puc);
1430 #elif defined(__hppa__)
1432 int cpu_signal_handler(int host_signum, void *pinfo,
1435 struct siginfo *info = pinfo;
1436 struct ucontext *uc = puc;
1440 pc = uc->uc_mcontext.sc_iaoq[0];
1441 /* FIXME: compute is_write */
1443 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1445 &uc->uc_sigmask, puc);
1450 #error host CPU specific signal handler needed
1454 #endif /* !defined(CONFIG_SOFTMMU) */