2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #define CPU_NO_GLOBAL_REGS
26 #if !defined(CONFIG_SOFTMMU)
37 #include <sys/ucontext.h>
40 #if defined(__sparc__) && !defined(HOST_SOLARIS)
41 // Work around ugly bugs in glibc that mangle global register contents
43 #define env cpu_single_env
46 int tb_invalidated_flag;
47 static unsigned long next_tb;
50 //#define DEBUG_SIGNAL
52 void cpu_loop_exit(void)
54 /* NOTE: the register at this point must be saved by hand because
55 longjmp restore them */
57 longjmp(env->jmp_env, 1);
60 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
64 /* exit the current TB from a signal handler. The host registers are
65 restored in a state compatible with the CPU emulator
67 void cpu_resume_from_signal(CPUState *env1, void *puc)
69 #if !defined(CONFIG_SOFTMMU)
70 struct ucontext *uc = puc;
75 /* XXX: restore cpu registers saved in host registers */
77 #if !defined(CONFIG_SOFTMMU)
79 /* XXX: use siglongjmp ? */
80 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
83 longjmp(env->jmp_env, 1);
86 static TranslationBlock *tb_find_slow(target_ulong pc,
90 TranslationBlock *tb, **ptb1;
93 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
98 tb_invalidated_flag = 0;
100 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
102 /* find translated block using physical mappings */
103 phys_pc = get_phys_addr_code(env, pc);
104 phys_page1 = phys_pc & TARGET_PAGE_MASK;
106 h = tb_phys_hash_func(phys_pc);
107 ptb1 = &tb_phys_hash[h];
113 tb->page_addr[0] == phys_page1 &&
114 tb->cs_base == cs_base &&
115 tb->flags == flags) {
116 /* check next page if needed */
117 if (tb->page_addr[1] != -1) {
118 virt_page2 = (pc & TARGET_PAGE_MASK) +
120 phys_page2 = get_phys_addr_code(env, virt_page2);
121 if (tb->page_addr[1] == phys_page2)
127 ptb1 = &tb->phys_hash_next;
130 /* if no translated code available, then translate it now */
133 /* flush must be done */
135 /* cannot fail at this point */
137 /* don't forget to invalidate previous TB info */
138 tb_invalidated_flag = 1;
140 tc_ptr = code_gen_ptr;
142 tb->cs_base = cs_base;
144 cpu_gen_code(env, tb, &code_gen_size);
145 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
147 /* check next page if needed */
148 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
150 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
151 phys_page2 = get_phys_addr_code(env, virt_page2);
153 tb_link_phys(tb, phys_pc, phys_page2);
156 /* we add the TB in the virtual pc hash table */
157 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
158 spin_unlock(&tb_lock);
162 static inline TranslationBlock *tb_find_fast(void)
164 TranslationBlock *tb;
165 target_ulong cs_base, pc;
168 /* we record a subset of the CPU state. It will
169 always be the same before a given translated block
171 #if defined(TARGET_I386)
173 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
174 cs_base = env->segs[R_CS].base;
175 pc = cs_base + env->eip;
176 #elif defined(TARGET_ARM)
177 flags = env->thumb | (env->vfp.vec_len << 1)
178 | (env->vfp.vec_stride << 4);
179 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
181 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
183 flags |= (env->condexec_bits << 8);
186 #elif defined(TARGET_SPARC)
187 #ifdef TARGET_SPARC64
188 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
189 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
190 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
192 // FPU enable . Supervisor
193 flags = (env->psref << 4) | env->psrs;
197 #elif defined(TARGET_PPC)
201 #elif defined(TARGET_MIPS)
202 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
204 pc = env->PC[env->current_tc];
205 #elif defined(TARGET_M68K)
206 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
207 | (env->sr & SR_S) /* Bit 13 */
208 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
211 #elif defined(TARGET_SH4)
215 #elif defined(TARGET_ALPHA)
219 #elif defined(TARGET_CRIS)
220 flags = env->pregs[PR_CCS] & (U_FLAG | X_FLAG);
225 #error unsupported CPU
227 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
228 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
229 tb->flags != flags, 0)) {
230 tb = tb_find_slow(pc, cs_base, flags);
231 /* Note: we do it here to avoid a gcc bug on Mac OS X when
232 doing it in tb_find_slow */
233 if (tb_invalidated_flag) {
234 /* as some TB could have been invalidated because
235 of memory exceptions while generating the code, we
236 must recompute the hash index here */
243 /* main execution loop */
245 int cpu_exec(CPUState *env1)
247 #define DECLARE_HOST_REGS 1
248 #include "hostregs_helper.h"
249 int ret, interrupt_request;
250 TranslationBlock *tb;
253 if (cpu_halted(env1) == EXCP_HALTED)
256 cpu_single_env = env1;
258 /* first we save global registers */
259 #define SAVE_HOST_REGS 1
260 #include "hostregs_helper.h"
264 #if defined(TARGET_I386)
265 /* put eflags in CPU temporary format */
266 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
267 DF = 1 - (2 * ((env->eflags >> 10) & 1));
268 CC_OP = CC_OP_EFLAGS;
269 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
270 #elif defined(TARGET_SPARC)
271 #elif defined(TARGET_M68K)
272 env->cc_op = CC_OP_FLAGS;
273 env->cc_dest = env->sr & 0xf;
274 env->cc_x = (env->sr >> 4) & 1;
275 #elif defined(TARGET_ALPHA)
276 #elif defined(TARGET_ARM)
277 #elif defined(TARGET_PPC)
278 #elif defined(TARGET_MIPS)
279 #elif defined(TARGET_SH4)
280 #elif defined(TARGET_CRIS)
283 #error unsupported target CPU
285 env->exception_index = -1;
287 /* prepare setjmp context for exception handling */
289 if (setjmp(env->jmp_env) == 0) {
290 env->current_tb = NULL;
291 /* if an exception is pending, we execute it here */
292 if (env->exception_index >= 0) {
293 if (env->exception_index >= EXCP_INTERRUPT) {
294 /* exit request from the cpu execution loop */
295 ret = env->exception_index;
297 } else if (env->user_mode_only) {
298 /* if user mode only, we simulate a fake exception
299 which will be handled outside the cpu execution
301 #if defined(TARGET_I386)
302 do_interrupt_user(env->exception_index,
303 env->exception_is_int,
305 env->exception_next_eip);
306 /* successfully delivered */
307 env->old_exception = -1;
309 ret = env->exception_index;
312 #if defined(TARGET_I386)
313 /* simulate a real cpu exception. On i386, it can
314 trigger new exceptions, but we do not handle
315 double or triple faults yet. */
316 do_interrupt(env->exception_index,
317 env->exception_is_int,
319 env->exception_next_eip, 0);
320 /* successfully delivered */
321 env->old_exception = -1;
322 #elif defined(TARGET_PPC)
324 #elif defined(TARGET_MIPS)
326 #elif defined(TARGET_SPARC)
328 #elif defined(TARGET_ARM)
330 #elif defined(TARGET_SH4)
332 #elif defined(TARGET_ALPHA)
334 #elif defined(TARGET_CRIS)
336 #elif defined(TARGET_M68K)
340 env->exception_index = -1;
343 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
345 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
346 ret = kqemu_cpu_exec(env);
347 /* put eflags in CPU temporary format */
348 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
349 DF = 1 - (2 * ((env->eflags >> 10) & 1));
350 CC_OP = CC_OP_EFLAGS;
351 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
354 longjmp(env->jmp_env, 1);
355 } else if (ret == 2) {
356 /* softmmu execution needed */
358 if (env->interrupt_request != 0) {
359 /* hardware interrupt will be executed just after */
361 /* otherwise, we restart */
362 longjmp(env->jmp_env, 1);
368 next_tb = 0; /* force lookup of first TB */
370 interrupt_request = env->interrupt_request;
371 if (__builtin_expect(interrupt_request, 0)
372 #if defined(TARGET_I386)
373 && env->hflags & HF_GIF_MASK
375 && likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
376 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
377 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
378 env->exception_index = EXCP_DEBUG;
381 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
382 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
383 if (interrupt_request & CPU_INTERRUPT_HALT) {
384 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
386 env->exception_index = EXCP_HLT;
390 #if defined(TARGET_I386)
391 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
392 !(env->hflags & HF_SMM_MASK)) {
393 svm_check_intercept(SVM_EXIT_SMI);
394 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
397 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
398 !(env->hflags & HF_NMI_MASK)) {
399 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
400 env->hflags |= HF_NMI_MASK;
401 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
403 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
404 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
405 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
407 svm_check_intercept(SVM_EXIT_INTR);
408 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
409 intno = cpu_get_pic_interrupt(env);
410 if (loglevel & CPU_LOG_TB_IN_ASM) {
411 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
413 do_interrupt(intno, 0, 0, 0, 1);
414 /* ensure that no TB jump will be modified as
415 the program flow was changed */
417 #if !defined(CONFIG_USER_ONLY)
418 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
419 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
421 /* FIXME: this should respect TPR */
422 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
423 svm_check_intercept(SVM_EXIT_VINTR);
424 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
425 if (loglevel & CPU_LOG_TB_IN_ASM)
426 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
427 do_interrupt(intno, 0, 0, -1, 1);
428 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
429 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
433 #elif defined(TARGET_PPC)
435 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
439 if (interrupt_request & CPU_INTERRUPT_HARD) {
440 ppc_hw_interrupt(env);
441 if (env->pending_interrupts == 0)
442 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
445 #elif defined(TARGET_MIPS)
446 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
447 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
448 (env->CP0_Status & (1 << CP0St_IE)) &&
449 !(env->CP0_Status & (1 << CP0St_EXL)) &&
450 !(env->CP0_Status & (1 << CP0St_ERL)) &&
451 !(env->hflags & MIPS_HFLAG_DM)) {
453 env->exception_index = EXCP_EXT_INTERRUPT;
458 #elif defined(TARGET_SPARC)
459 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
461 int pil = env->interrupt_index & 15;
462 int type = env->interrupt_index & 0xf0;
464 if (((type == TT_EXTINT) &&
465 (pil == 15 || pil > env->psrpil)) ||
467 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
468 env->exception_index = env->interrupt_index;
470 env->interrupt_index = 0;
471 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
476 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
477 //do_interrupt(0, 0, 0, 0, 0);
478 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
480 #elif defined(TARGET_ARM)
481 if (interrupt_request & CPU_INTERRUPT_FIQ
482 && !(env->uncached_cpsr & CPSR_F)) {
483 env->exception_index = EXCP_FIQ;
487 /* ARMv7-M interrupt return works by loading a magic value
488 into the PC. On real hardware the load causes the
489 return to occur. The qemu implementation performs the
490 jump normally, then does the exception return when the
491 CPU tries to execute code at the magic address.
492 This will cause the magic PC value to be pushed to
493 the stack if an interrupt occured at the wrong time.
494 We avoid this by disabling interrupts when
495 pc contains a magic address. */
496 if (interrupt_request & CPU_INTERRUPT_HARD
497 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
498 || !(env->uncached_cpsr & CPSR_I))) {
499 env->exception_index = EXCP_IRQ;
503 #elif defined(TARGET_SH4)
504 if (interrupt_request & CPU_INTERRUPT_HARD) {
508 #elif defined(TARGET_ALPHA)
509 if (interrupt_request & CPU_INTERRUPT_HARD) {
513 #elif defined(TARGET_CRIS)
514 if (interrupt_request & CPU_INTERRUPT_HARD) {
518 #elif defined(TARGET_M68K)
519 if (interrupt_request & CPU_INTERRUPT_HARD
520 && ((env->sr & SR_I) >> SR_I_SHIFT)
521 < env->pending_level) {
522 /* Real hardware gets the interrupt vector via an
523 IACK cycle at this point. Current emulated
524 hardware doesn't rely on this, so we
525 provide/save the vector when the interrupt is
527 env->exception_index = env->pending_vector;
532 /* Don't use the cached interupt_request value,
533 do_interrupt may have updated the EXITTB flag. */
534 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
535 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
536 /* ensure that no TB jump will be modified as
537 the program flow was changed */
540 if (interrupt_request & CPU_INTERRUPT_EXIT) {
541 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
542 env->exception_index = EXCP_INTERRUPT;
547 if ((loglevel & CPU_LOG_TB_CPU)) {
548 /* restore flags in standard format */
550 #if defined(TARGET_I386)
551 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
552 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
553 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
554 #elif defined(TARGET_ARM)
555 cpu_dump_state(env, logfile, fprintf, 0);
556 #elif defined(TARGET_SPARC)
557 cpu_dump_state(env, logfile, fprintf, 0);
558 #elif defined(TARGET_PPC)
559 cpu_dump_state(env, logfile, fprintf, 0);
560 #elif defined(TARGET_M68K)
561 cpu_m68k_flush_flags(env, env->cc_op);
562 env->cc_op = CC_OP_FLAGS;
563 env->sr = (env->sr & 0xffe0)
564 | env->cc_dest | (env->cc_x << 4);
565 cpu_dump_state(env, logfile, fprintf, 0);
566 #elif defined(TARGET_MIPS)
567 cpu_dump_state(env, logfile, fprintf, 0);
568 #elif defined(TARGET_SH4)
569 cpu_dump_state(env, logfile, fprintf, 0);
570 #elif defined(TARGET_ALPHA)
571 cpu_dump_state(env, logfile, fprintf, 0);
572 #elif defined(TARGET_CRIS)
573 cpu_dump_state(env, logfile, fprintf, 0);
575 #error unsupported target CPU
581 if ((loglevel & CPU_LOG_EXEC)) {
582 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
583 (long)tb->tc_ptr, tb->pc,
584 lookup_symbol(tb->pc));
587 /* see if we can patch the calling TB. When the TB
588 spans two pages, we cannot safely do a direct
593 (env->kqemu_enabled != 2) &&
595 tb->page_addr[1] == -1) {
597 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
598 spin_unlock(&tb_lock);
602 env->current_tb = tb;
603 /* execute the generated code */
604 #if defined(__sparc__) && !defined(HOST_SOLARIS)
606 env = cpu_single_env;
607 #define env cpu_single_env
609 next_tb = tcg_qemu_tb_exec(tc_ptr);
610 env->current_tb = NULL;
611 /* reset soft MMU for next block (it can currently
612 only be set by a memory fault) */
613 #if defined(USE_KQEMU)
614 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
615 if (kqemu_is_ok(env) &&
616 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
627 #if defined(TARGET_I386)
628 /* restore flags in standard format */
629 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
630 #elif defined(TARGET_ARM)
631 /* XXX: Save/restore host fpu exception state?. */
632 #elif defined(TARGET_SPARC)
633 #elif defined(TARGET_PPC)
634 #elif defined(TARGET_M68K)
635 cpu_m68k_flush_flags(env, env->cc_op);
636 env->cc_op = CC_OP_FLAGS;
637 env->sr = (env->sr & 0xffe0)
638 | env->cc_dest | (env->cc_x << 4);
639 #elif defined(TARGET_MIPS)
640 #elif defined(TARGET_SH4)
641 #elif defined(TARGET_ALPHA)
642 #elif defined(TARGET_CRIS)
645 #error unsupported target CPU
648 /* restore global registers */
649 #include "hostregs_helper.h"
651 /* fail safe : never use cpu_single_env outside cpu_exec() */
652 cpu_single_env = NULL;
656 /* must only be called from the generated code as an exception can be
658 void tb_invalidate_page_range(target_ulong start, target_ulong end)
660 /* XXX: cannot enable it yet because it yields to MMU exception
661 where NIP != read address on PowerPC */
663 target_ulong phys_addr;
664 phys_addr = get_phys_addr_code(env, start);
665 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
669 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
671 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
673 CPUX86State *saved_env;
677 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
679 cpu_x86_load_seg_cache(env, seg_reg, selector,
680 (selector << 4), 0xffff, 0);
682 helper_load_seg(seg_reg, selector);
687 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
689 CPUX86State *saved_env;
694 helper_fsave(ptr, data32);
699 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
701 CPUX86State *saved_env;
706 helper_frstor(ptr, data32);
711 #endif /* TARGET_I386 */
713 #if !defined(CONFIG_SOFTMMU)
715 #if defined(TARGET_I386)
717 /* 'pc' is the host PC at which the exception was raised. 'address' is
718 the effective address of the memory exception. 'is_write' is 1 if a
719 write caused the exception and otherwise 0'. 'old_set' is the
720 signal set which should be restored */
721 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
722 int is_write, sigset_t *old_set,
725 TranslationBlock *tb;
729 env = cpu_single_env; /* XXX: find a correct solution for multithread */
730 #if defined(DEBUG_SIGNAL)
731 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
732 pc, address, is_write, *(unsigned long *)old_set);
734 /* XXX: locking issue */
735 if (is_write && page_unprotect(h2g(address), pc, puc)) {
739 /* see if it is an MMU fault */
740 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
742 return 0; /* not an MMU fault */
744 return 1; /* the MMU fault was handled without causing real CPU fault */
745 /* now we have a real cpu fault */
748 /* the PC is inside the translated code. It means that we have
749 a virtual CPU fault */
750 cpu_restore_state(tb, env, pc, puc);
754 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
755 env->eip, env->cr[2], env->error_code);
757 /* we restore the process signal mask as the sigreturn should
758 do it (XXX: use sigsetjmp) */
759 sigprocmask(SIG_SETMASK, old_set, NULL);
760 raise_exception_err(env->exception_index, env->error_code);
762 /* activate soft MMU for this block */
763 env->hflags |= HF_SOFTMMU_MASK;
764 cpu_resume_from_signal(env, puc);
766 /* never comes here */
770 #elif defined(TARGET_ARM)
771 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
772 int is_write, sigset_t *old_set,
775 TranslationBlock *tb;
779 env = cpu_single_env; /* XXX: find a correct solution for multithread */
780 #if defined(DEBUG_SIGNAL)
781 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
782 pc, address, is_write, *(unsigned long *)old_set);
784 /* XXX: locking issue */
785 if (is_write && page_unprotect(h2g(address), pc, puc)) {
788 /* see if it is an MMU fault */
789 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
791 return 0; /* not an MMU fault */
793 return 1; /* the MMU fault was handled without causing real CPU fault */
794 /* now we have a real cpu fault */
797 /* the PC is inside the translated code. It means that we have
798 a virtual CPU fault */
799 cpu_restore_state(tb, env, pc, puc);
801 /* we restore the process signal mask as the sigreturn should
802 do it (XXX: use sigsetjmp) */
803 sigprocmask(SIG_SETMASK, old_set, NULL);
805 /* never comes here */
808 #elif defined(TARGET_SPARC)
809 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
810 int is_write, sigset_t *old_set,
813 TranslationBlock *tb;
817 env = cpu_single_env; /* XXX: find a correct solution for multithread */
818 #if defined(DEBUG_SIGNAL)
819 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
820 pc, address, is_write, *(unsigned long *)old_set);
822 /* XXX: locking issue */
823 if (is_write && page_unprotect(h2g(address), pc, puc)) {
826 /* see if it is an MMU fault */
827 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
829 return 0; /* not an MMU fault */
831 return 1; /* the MMU fault was handled without causing real CPU fault */
832 /* now we have a real cpu fault */
835 /* the PC is inside the translated code. It means that we have
836 a virtual CPU fault */
837 cpu_restore_state(tb, env, pc, puc);
839 /* we restore the process signal mask as the sigreturn should
840 do it (XXX: use sigsetjmp) */
841 sigprocmask(SIG_SETMASK, old_set, NULL);
843 /* never comes here */
846 #elif defined (TARGET_PPC)
847 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
848 int is_write, sigset_t *old_set,
851 TranslationBlock *tb;
855 env = cpu_single_env; /* XXX: find a correct solution for multithread */
856 #if defined(DEBUG_SIGNAL)
857 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
858 pc, address, is_write, *(unsigned long *)old_set);
860 /* XXX: locking issue */
861 if (is_write && page_unprotect(h2g(address), pc, puc)) {
865 /* see if it is an MMU fault */
866 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
868 return 0; /* not an MMU fault */
870 return 1; /* the MMU fault was handled without causing real CPU fault */
872 /* now we have a real cpu fault */
875 /* the PC is inside the translated code. It means that we have
876 a virtual CPU fault */
877 cpu_restore_state(tb, env, pc, puc);
881 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
882 env->nip, env->error_code, tb);
884 /* we restore the process signal mask as the sigreturn should
885 do it (XXX: use sigsetjmp) */
886 sigprocmask(SIG_SETMASK, old_set, NULL);
887 do_raise_exception_err(env->exception_index, env->error_code);
889 /* activate soft MMU for this block */
890 cpu_resume_from_signal(env, puc);
892 /* never comes here */
896 #elif defined(TARGET_M68K)
897 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
898 int is_write, sigset_t *old_set,
901 TranslationBlock *tb;
905 env = cpu_single_env; /* XXX: find a correct solution for multithread */
906 #if defined(DEBUG_SIGNAL)
907 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
908 pc, address, is_write, *(unsigned long *)old_set);
910 /* XXX: locking issue */
911 if (is_write && page_unprotect(address, pc, puc)) {
914 /* see if it is an MMU fault */
915 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
917 return 0; /* not an MMU fault */
919 return 1; /* the MMU fault was handled without causing real CPU fault */
920 /* now we have a real cpu fault */
923 /* the PC is inside the translated code. It means that we have
924 a virtual CPU fault */
925 cpu_restore_state(tb, env, pc, puc);
927 /* we restore the process signal mask as the sigreturn should
928 do it (XXX: use sigsetjmp) */
929 sigprocmask(SIG_SETMASK, old_set, NULL);
931 /* never comes here */
935 #elif defined (TARGET_MIPS)
936 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
937 int is_write, sigset_t *old_set,
940 TranslationBlock *tb;
944 env = cpu_single_env; /* XXX: find a correct solution for multithread */
945 #if defined(DEBUG_SIGNAL)
946 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
947 pc, address, is_write, *(unsigned long *)old_set);
949 /* XXX: locking issue */
950 if (is_write && page_unprotect(h2g(address), pc, puc)) {
954 /* see if it is an MMU fault */
955 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
957 return 0; /* not an MMU fault */
959 return 1; /* the MMU fault was handled without causing real CPU fault */
961 /* now we have a real cpu fault */
964 /* the PC is inside the translated code. It means that we have
965 a virtual CPU fault */
966 cpu_restore_state(tb, env, pc, puc);
970 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
971 env->PC, env->error_code, tb);
973 /* we restore the process signal mask as the sigreturn should
974 do it (XXX: use sigsetjmp) */
975 sigprocmask(SIG_SETMASK, old_set, NULL);
976 do_raise_exception_err(env->exception_index, env->error_code);
978 /* activate soft MMU for this block */
979 cpu_resume_from_signal(env, puc);
981 /* never comes here */
985 #elif defined (TARGET_SH4)
986 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
987 int is_write, sigset_t *old_set,
990 TranslationBlock *tb;
994 env = cpu_single_env; /* XXX: find a correct solution for multithread */
995 #if defined(DEBUG_SIGNAL)
996 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
997 pc, address, is_write, *(unsigned long *)old_set);
999 /* XXX: locking issue */
1000 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1004 /* see if it is an MMU fault */
1005 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1007 return 0; /* not an MMU fault */
1009 return 1; /* the MMU fault was handled without causing real CPU fault */
1011 /* now we have a real cpu fault */
1012 tb = tb_find_pc(pc);
1014 /* the PC is inside the translated code. It means that we have
1015 a virtual CPU fault */
1016 cpu_restore_state(tb, env, pc, puc);
1019 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1020 env->nip, env->error_code, tb);
1022 /* we restore the process signal mask as the sigreturn should
1023 do it (XXX: use sigsetjmp) */
1024 sigprocmask(SIG_SETMASK, old_set, NULL);
1026 /* never comes here */
1030 #elif defined (TARGET_ALPHA)
1031 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1032 int is_write, sigset_t *old_set,
1035 TranslationBlock *tb;
1039 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1040 #if defined(DEBUG_SIGNAL)
1041 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1042 pc, address, is_write, *(unsigned long *)old_set);
1044 /* XXX: locking issue */
1045 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1049 /* see if it is an MMU fault */
1050 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1052 return 0; /* not an MMU fault */
1054 return 1; /* the MMU fault was handled without causing real CPU fault */
1056 /* now we have a real cpu fault */
1057 tb = tb_find_pc(pc);
1059 /* the PC is inside the translated code. It means that we have
1060 a virtual CPU fault */
1061 cpu_restore_state(tb, env, pc, puc);
1064 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1065 env->nip, env->error_code, tb);
1067 /* we restore the process signal mask as the sigreturn should
1068 do it (XXX: use sigsetjmp) */
1069 sigprocmask(SIG_SETMASK, old_set, NULL);
1071 /* never comes here */
1074 #elif defined (TARGET_CRIS)
1075 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1076 int is_write, sigset_t *old_set,
1079 TranslationBlock *tb;
1083 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1084 #if defined(DEBUG_SIGNAL)
1085 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1086 pc, address, is_write, *(unsigned long *)old_set);
1088 /* XXX: locking issue */
1089 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1093 /* see if it is an MMU fault */
1094 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1096 return 0; /* not an MMU fault */
1098 return 1; /* the MMU fault was handled without causing real CPU fault */
1100 /* now we have a real cpu fault */
1101 tb = tb_find_pc(pc);
1103 /* the PC is inside the translated code. It means that we have
1104 a virtual CPU fault */
1105 cpu_restore_state(tb, env, pc, puc);
1107 /* we restore the process signal mask as the sigreturn should
1108 do it (XXX: use sigsetjmp) */
1109 sigprocmask(SIG_SETMASK, old_set, NULL);
1111 /* never comes here */
1116 #error unsupported target CPU
1119 #if defined(__i386__)
1121 #if defined(__APPLE__)
1122 # include <sys/ucontext.h>
1124 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1125 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1126 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1128 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1129 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1130 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1133 int cpu_signal_handler(int host_signum, void *pinfo,
1136 siginfo_t *info = pinfo;
1137 struct ucontext *uc = puc;
1145 #define REG_TRAPNO TRAPNO
1148 trapno = TRAP_sig(uc);
1149 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1151 (ERROR_sig(uc) >> 1) & 1 : 0,
1152 &uc->uc_sigmask, puc);
1155 #elif defined(__x86_64__)
1157 int cpu_signal_handler(int host_signum, void *pinfo,
1160 siginfo_t *info = pinfo;
1161 struct ucontext *uc = puc;
1164 pc = uc->uc_mcontext.gregs[REG_RIP];
1165 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1166 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1167 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1168 &uc->uc_sigmask, puc);
1171 #elif defined(__powerpc__)
1173 /***********************************************************************
1174 * signal context platform-specific definitions
1178 /* All Registers access - only for local access */
1179 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1180 /* Gpr Registers access */
1181 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1182 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1183 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1184 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1185 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1186 # define LR_sig(context) REG_sig(link, context) /* Link register */
1187 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1188 /* Float Registers access */
1189 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1190 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1191 /* Exception Registers access */
1192 # define DAR_sig(context) REG_sig(dar, context)
1193 # define DSISR_sig(context) REG_sig(dsisr, context)
1194 # define TRAP_sig(context) REG_sig(trap, context)
1198 # include <sys/ucontext.h>
1199 typedef struct ucontext SIGCONTEXT;
1200 /* All Registers access - only for local access */
1201 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1202 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1203 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1204 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1205 /* Gpr Registers access */
1206 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1207 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1208 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1209 # define CTR_sig(context) REG_sig(ctr, context)
1210 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1211 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1212 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1213 /* Float Registers access */
1214 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1215 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1216 /* Exception Registers access */
1217 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1218 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1219 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1220 #endif /* __APPLE__ */
1222 int cpu_signal_handler(int host_signum, void *pinfo,
1225 siginfo_t *info = pinfo;
1226 struct ucontext *uc = puc;
1234 if (DSISR_sig(uc) & 0x00800000)
1237 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1240 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1241 is_write, &uc->uc_sigmask, puc);
1244 #elif defined(__alpha__)
1246 int cpu_signal_handler(int host_signum, void *pinfo,
1249 siginfo_t *info = pinfo;
1250 struct ucontext *uc = puc;
1251 uint32_t *pc = uc->uc_mcontext.sc_pc;
1252 uint32_t insn = *pc;
1255 /* XXX: need kernel patch to get write flag faster */
1256 switch (insn >> 26) {
1271 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1272 is_write, &uc->uc_sigmask, puc);
1274 #elif defined(__sparc__)
1276 int cpu_signal_handler(int host_signum, void *pinfo,
1279 siginfo_t *info = pinfo;
1282 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1283 uint32_t *regs = (uint32_t *)(info + 1);
1284 void *sigmask = (regs + 20);
1285 /* XXX: is there a standard glibc define ? */
1286 unsigned long pc = regs[1];
1288 struct sigcontext *sc = puc;
1289 unsigned long pc = sc->sigc_regs.tpc;
1290 void *sigmask = (void *)sc->sigc_mask;
1293 /* XXX: need kernel patch to get write flag faster */
1295 insn = *(uint32_t *)pc;
1296 if ((insn >> 30) == 3) {
1297 switch((insn >> 19) & 0x3f) {
1309 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1310 is_write, sigmask, NULL);
1313 #elif defined(__arm__)
1315 int cpu_signal_handler(int host_signum, void *pinfo,
1318 siginfo_t *info = pinfo;
1319 struct ucontext *uc = puc;
1323 pc = uc->uc_mcontext.arm_pc;
1324 /* XXX: compute is_write */
1326 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1328 &uc->uc_sigmask, puc);
1331 #elif defined(__mc68000)
1333 int cpu_signal_handler(int host_signum, void *pinfo,
1336 siginfo_t *info = pinfo;
1337 struct ucontext *uc = puc;
1341 pc = uc->uc_mcontext.gregs[16];
1342 /* XXX: compute is_write */
1344 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1346 &uc->uc_sigmask, puc);
1349 #elif defined(__ia64)
1352 /* This ought to be in <bits/siginfo.h>... */
1353 # define __ISR_VALID 1
1356 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1358 siginfo_t *info = pinfo;
1359 struct ucontext *uc = puc;
1363 ip = uc->uc_mcontext.sc_ip;
1364 switch (host_signum) {
1370 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1371 /* ISR.W (write-access) is bit 33: */
1372 is_write = (info->si_isr >> 33) & 1;
1378 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1380 &uc->uc_sigmask, puc);
1383 #elif defined(__s390__)
1385 int cpu_signal_handler(int host_signum, void *pinfo,
1388 siginfo_t *info = pinfo;
1389 struct ucontext *uc = puc;
1393 pc = uc->uc_mcontext.psw.addr;
1394 /* XXX: compute is_write */
1396 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1397 is_write, &uc->uc_sigmask, puc);
1400 #elif defined(__mips__)
1402 int cpu_signal_handler(int host_signum, void *pinfo,
1405 siginfo_t *info = pinfo;
1406 struct ucontext *uc = puc;
1407 greg_t pc = uc->uc_mcontext.pc;
1410 /* XXX: compute is_write */
1412 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1413 is_write, &uc->uc_sigmask, puc);
1416 #elif defined(__hppa__)
1418 int cpu_signal_handler(int host_signum, void *pinfo,
1421 struct siginfo *info = pinfo;
1422 struct ucontext *uc = puc;
1426 pc = uc->uc_mcontext.sc_iaoq[0];
1427 /* FIXME: compute is_write */
1429 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1431 &uc->uc_sigmask, puc);
1436 #error host CPU specific signal handler needed
1440 #endif /* !defined(CONFIG_SOFTMMU) */