2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #define CPU_NO_GLOBAL_REGS
27 #if !defined(CONFIG_SOFTMMU)
39 #include <sys/ucontext.h>
43 #if defined(__sparc__) && !defined(HOST_SOLARIS)
44 // Work around ugly bugs in glibc that mangle global register contents
46 #define env cpu_single_env
49 int tb_invalidated_flag;
52 //#define DEBUG_SIGNAL
54 void cpu_loop_exit(void)
56 /* NOTE: the register at this point must be saved by hand because
57 longjmp restore them */
59 longjmp(env->jmp_env, 1);
62 /* exit the current TB from a signal handler. The host registers are
63 restored in a state compatible with the CPU emulator
65 void cpu_resume_from_signal(CPUState *env1, void *puc)
67 #if !defined(CONFIG_SOFTMMU)
69 struct ucontext *uc = puc;
70 #elif defined(__OpenBSD__)
71 struct sigcontext *uc = puc;
77 /* XXX: restore cpu registers saved in host registers */
79 #if !defined(CONFIG_SOFTMMU)
81 /* XXX: use siglongjmp ? */
83 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
84 #elif defined(__OpenBSD__)
85 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
89 longjmp(env->jmp_env, 1);
92 /* Execute the code without caching the generated code. An interpreter
93 could be used if available. */
94 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
96 unsigned long next_tb;
99 /* Should never happen.
100 We only end up here when an existing TB is too long. */
101 if (max_cycles > CF_COUNT_MASK)
102 max_cycles = CF_COUNT_MASK;
104 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
106 env->current_tb = tb;
107 /* execute the generated code */
108 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
110 if ((next_tb & 3) == 2) {
111 /* Restore PC. This may happen if async event occurs before
112 the TB starts executing. */
113 cpu_pc_from_tb(env, tb);
115 tb_phys_invalidate(tb, -1);
119 static TranslationBlock *tb_find_slow(target_ulong pc,
120 target_ulong cs_base,
123 TranslationBlock *tb, **ptb1;
125 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
127 tb_invalidated_flag = 0;
129 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
131 /* find translated block using physical mappings */
132 phys_pc = get_phys_addr_code(env, pc);
133 phys_page1 = phys_pc & TARGET_PAGE_MASK;
135 h = tb_phys_hash_func(phys_pc);
136 ptb1 = &tb_phys_hash[h];
142 tb->page_addr[0] == phys_page1 &&
143 tb->cs_base == cs_base &&
144 tb->flags == flags) {
145 /* check next page if needed */
146 if (tb->page_addr[1] != -1) {
147 virt_page2 = (pc & TARGET_PAGE_MASK) +
149 phys_page2 = get_phys_addr_code(env, virt_page2);
150 if (tb->page_addr[1] == phys_page2)
156 ptb1 = &tb->phys_hash_next;
159 /* if no translated code available, then translate it now */
160 tb = tb_gen_code(env, pc, cs_base, flags, 0);
163 /* we add the TB in the virtual pc hash table */
164 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
168 static inline TranslationBlock *tb_find_fast(void)
170 TranslationBlock *tb;
171 target_ulong cs_base, pc;
174 /* we record a subset of the CPU state. It will
175 always be the same before a given translated block
177 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
178 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
179 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
180 tb->flags != flags)) {
181 tb = tb_find_slow(pc, cs_base, flags);
186 static void cpu_handle_debug_exception(CPUState *env)
190 if (!env->watchpoint_hit)
191 for (wp = env->watchpoints; wp != NULL; wp = wp->next)
192 wp->flags &= ~BP_WATCHPOINT_HIT;
195 /* main execution loop */
197 int cpu_exec(CPUState *env1)
199 #define DECLARE_HOST_REGS 1
200 #include "hostregs_helper.h"
201 int ret, interrupt_request;
202 TranslationBlock *tb;
204 unsigned long next_tb;
206 if (cpu_halted(env1) == EXCP_HALTED)
209 cpu_single_env = env1;
211 /* first we save global registers */
212 #define SAVE_HOST_REGS 1
213 #include "hostregs_helper.h"
217 #if defined(TARGET_I386)
218 /* put eflags in CPU temporary format */
219 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
220 DF = 1 - (2 * ((env->eflags >> 10) & 1));
221 CC_OP = CC_OP_EFLAGS;
222 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
223 #elif defined(TARGET_SPARC)
224 #elif defined(TARGET_M68K)
225 env->cc_op = CC_OP_FLAGS;
226 env->cc_dest = env->sr & 0xf;
227 env->cc_x = (env->sr >> 4) & 1;
228 #elif defined(TARGET_ALPHA)
229 #elif defined(TARGET_ARM)
230 #elif defined(TARGET_PPC)
231 #elif defined(TARGET_MIPS)
232 #elif defined(TARGET_SH4)
233 #elif defined(TARGET_CRIS)
236 #error unsupported target CPU
238 env->exception_index = -1;
240 /* prepare setjmp context for exception handling */
242 if (setjmp(env->jmp_env) == 0) {
243 env->current_tb = NULL;
244 /* if an exception is pending, we execute it here */
245 if (env->exception_index >= 0) {
246 if (env->exception_index >= EXCP_INTERRUPT) {
247 /* exit request from the cpu execution loop */
248 ret = env->exception_index;
249 if (ret == EXCP_DEBUG)
250 cpu_handle_debug_exception(env);
252 } else if (env->user_mode_only) {
253 /* if user mode only, we simulate a fake exception
254 which will be handled outside the cpu execution
256 #if defined(TARGET_I386)
257 do_interrupt_user(env->exception_index,
258 env->exception_is_int,
260 env->exception_next_eip);
261 /* successfully delivered */
262 env->old_exception = -1;
264 ret = env->exception_index;
267 #if defined(TARGET_I386)
268 /* simulate a real cpu exception. On i386, it can
269 trigger new exceptions, but we do not handle
270 double or triple faults yet. */
271 do_interrupt(env->exception_index,
272 env->exception_is_int,
274 env->exception_next_eip, 0);
275 /* successfully delivered */
276 env->old_exception = -1;
277 #elif defined(TARGET_PPC)
279 #elif defined(TARGET_MIPS)
281 #elif defined(TARGET_SPARC)
283 #elif defined(TARGET_ARM)
285 #elif defined(TARGET_SH4)
287 #elif defined(TARGET_ALPHA)
289 #elif defined(TARGET_CRIS)
291 #elif defined(TARGET_M68K)
295 env->exception_index = -1;
298 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
300 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
301 ret = kqemu_cpu_exec(env);
302 /* put eflags in CPU temporary format */
303 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
304 DF = 1 - (2 * ((env->eflags >> 10) & 1));
305 CC_OP = CC_OP_EFLAGS;
306 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
309 longjmp(env->jmp_env, 1);
310 } else if (ret == 2) {
311 /* softmmu execution needed */
313 if (env->interrupt_request != 0) {
314 /* hardware interrupt will be executed just after */
316 /* otherwise, we restart */
317 longjmp(env->jmp_env, 1);
325 longjmp(env->jmp_env, 1);
328 next_tb = 0; /* force lookup of first TB */
330 interrupt_request = env->interrupt_request;
331 if (unlikely(interrupt_request)) {
332 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
333 /* Mask out external interrupts for this step. */
334 interrupt_request &= ~(CPU_INTERRUPT_HARD |
339 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
340 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
341 env->exception_index = EXCP_DEBUG;
344 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
345 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
346 if (interrupt_request & CPU_INTERRUPT_HALT) {
347 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
349 env->exception_index = EXCP_HLT;
353 #if defined(TARGET_I386)
354 if (env->hflags2 & HF2_GIF_MASK) {
355 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
356 !(env->hflags & HF_SMM_MASK)) {
357 svm_check_intercept(SVM_EXIT_SMI);
358 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
361 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
362 !(env->hflags2 & HF2_NMI_MASK)) {
363 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
364 env->hflags2 |= HF2_NMI_MASK;
365 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
367 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
368 (((env->hflags2 & HF2_VINTR_MASK) &&
369 (env->hflags2 & HF2_HIF_MASK)) ||
370 (!(env->hflags2 & HF2_VINTR_MASK) &&
371 (env->eflags & IF_MASK &&
372 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
374 svm_check_intercept(SVM_EXIT_INTR);
375 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
376 intno = cpu_get_pic_interrupt(env);
377 if (loglevel & CPU_LOG_TB_IN_ASM) {
378 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
380 do_interrupt(intno, 0, 0, 0, 1);
381 /* ensure that no TB jump will be modified as
382 the program flow was changed */
384 #if !defined(CONFIG_USER_ONLY)
385 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
386 (env->eflags & IF_MASK) &&
387 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
389 /* FIXME: this should respect TPR */
390 svm_check_intercept(SVM_EXIT_VINTR);
391 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
392 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
393 if (loglevel & CPU_LOG_TB_IN_ASM)
394 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
395 do_interrupt(intno, 0, 0, 0, 1);
400 #elif defined(TARGET_PPC)
402 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
406 if (interrupt_request & CPU_INTERRUPT_HARD) {
407 ppc_hw_interrupt(env);
408 if (env->pending_interrupts == 0)
409 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
412 #elif defined(TARGET_MIPS)
413 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
414 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
415 (env->CP0_Status & (1 << CP0St_IE)) &&
416 !(env->CP0_Status & (1 << CP0St_EXL)) &&
417 !(env->CP0_Status & (1 << CP0St_ERL)) &&
418 !(env->hflags & MIPS_HFLAG_DM)) {
420 env->exception_index = EXCP_EXT_INTERRUPT;
425 #elif defined(TARGET_SPARC)
426 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
428 int pil = env->interrupt_index & 15;
429 int type = env->interrupt_index & 0xf0;
431 if (((type == TT_EXTINT) &&
432 (pil == 15 || pil > env->psrpil)) ||
434 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
435 env->exception_index = env->interrupt_index;
437 env->interrupt_index = 0;
438 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
443 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
444 //do_interrupt(0, 0, 0, 0, 0);
445 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
447 #elif defined(TARGET_ARM)
448 if (interrupt_request & CPU_INTERRUPT_FIQ
449 && !(env->uncached_cpsr & CPSR_F)) {
450 env->exception_index = EXCP_FIQ;
454 /* ARMv7-M interrupt return works by loading a magic value
455 into the PC. On real hardware the load causes the
456 return to occur. The qemu implementation performs the
457 jump normally, then does the exception return when the
458 CPU tries to execute code at the magic address.
459 This will cause the magic PC value to be pushed to
460 the stack if an interrupt occured at the wrong time.
461 We avoid this by disabling interrupts when
462 pc contains a magic address. */
463 if (interrupt_request & CPU_INTERRUPT_HARD
464 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
465 || !(env->uncached_cpsr & CPSR_I))) {
466 env->exception_index = EXCP_IRQ;
470 #elif defined(TARGET_SH4)
471 if (interrupt_request & CPU_INTERRUPT_HARD) {
475 #elif defined(TARGET_ALPHA)
476 if (interrupt_request & CPU_INTERRUPT_HARD) {
480 #elif defined(TARGET_CRIS)
481 if (interrupt_request & CPU_INTERRUPT_HARD
482 && (env->pregs[PR_CCS] & I_FLAG)) {
483 env->exception_index = EXCP_IRQ;
487 if (interrupt_request & CPU_INTERRUPT_NMI
488 && (env->pregs[PR_CCS] & M_FLAG)) {
489 env->exception_index = EXCP_NMI;
493 #elif defined(TARGET_M68K)
494 if (interrupt_request & CPU_INTERRUPT_HARD
495 && ((env->sr & SR_I) >> SR_I_SHIFT)
496 < env->pending_level) {
497 /* Real hardware gets the interrupt vector via an
498 IACK cycle at this point. Current emulated
499 hardware doesn't rely on this, so we
500 provide/save the vector when the interrupt is
502 env->exception_index = env->pending_vector;
507 /* Don't use the cached interupt_request value,
508 do_interrupt may have updated the EXITTB flag. */
509 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
510 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
511 /* ensure that no TB jump will be modified as
512 the program flow was changed */
515 if (interrupt_request & CPU_INTERRUPT_EXIT) {
516 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
517 env->exception_index = EXCP_INTERRUPT;
522 if ((loglevel & CPU_LOG_TB_CPU)) {
523 /* restore flags in standard format */
525 #if defined(TARGET_I386)
526 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
527 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
528 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
529 #elif defined(TARGET_ARM)
530 cpu_dump_state(env, logfile, fprintf, 0);
531 #elif defined(TARGET_SPARC)
532 cpu_dump_state(env, logfile, fprintf, 0);
533 #elif defined(TARGET_PPC)
534 cpu_dump_state(env, logfile, fprintf, 0);
535 #elif defined(TARGET_M68K)
536 cpu_m68k_flush_flags(env, env->cc_op);
537 env->cc_op = CC_OP_FLAGS;
538 env->sr = (env->sr & 0xffe0)
539 | env->cc_dest | (env->cc_x << 4);
540 cpu_dump_state(env, logfile, fprintf, 0);
541 #elif defined(TARGET_MIPS)
542 cpu_dump_state(env, logfile, fprintf, 0);
543 #elif defined(TARGET_SH4)
544 cpu_dump_state(env, logfile, fprintf, 0);
545 #elif defined(TARGET_ALPHA)
546 cpu_dump_state(env, logfile, fprintf, 0);
547 #elif defined(TARGET_CRIS)
548 cpu_dump_state(env, logfile, fprintf, 0);
550 #error unsupported target CPU
556 /* Note: we do it here to avoid a gcc bug on Mac OS X when
557 doing it in tb_find_slow */
558 if (tb_invalidated_flag) {
559 /* as some TB could have been invalidated because
560 of memory exceptions while generating the code, we
561 must recompute the hash index here */
563 tb_invalidated_flag = 0;
566 if ((loglevel & CPU_LOG_EXEC)) {
567 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
568 (long)tb->tc_ptr, tb->pc,
569 lookup_symbol(tb->pc));
572 /* see if we can patch the calling TB. When the TB
573 spans two pages, we cannot safely do a direct
578 (env->kqemu_enabled != 2) &&
580 tb->page_addr[1] == -1) {
581 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
584 spin_unlock(&tb_lock);
585 env->current_tb = tb;
587 /* cpu_interrupt might be called while translating the
588 TB, but before it is linked into a potentially
589 infinite loop and becomes env->current_tb. Avoid
590 starting execution if there is a pending interrupt. */
591 if (unlikely (env->interrupt_request & CPU_INTERRUPT_EXIT))
592 env->current_tb = NULL;
594 while (env->current_tb) {
596 /* execute the generated code */
597 #if defined(__sparc__) && !defined(HOST_SOLARIS)
599 env = cpu_single_env;
600 #define env cpu_single_env
602 next_tb = tcg_qemu_tb_exec(tc_ptr);
603 env->current_tb = NULL;
604 if ((next_tb & 3) == 2) {
605 /* Instruction counter expired. */
607 tb = (TranslationBlock *)(long)(next_tb & ~3);
609 cpu_pc_from_tb(env, tb);
610 insns_left = env->icount_decr.u32;
611 if (env->icount_extra && insns_left >= 0) {
612 /* Refill decrementer and continue execution. */
613 env->icount_extra += insns_left;
614 if (env->icount_extra > 0xffff) {
617 insns_left = env->icount_extra;
619 env->icount_extra -= insns_left;
620 env->icount_decr.u16.low = insns_left;
622 if (insns_left > 0) {
623 /* Execute remaining instructions. */
624 cpu_exec_nocache(insns_left, tb);
626 env->exception_index = EXCP_INTERRUPT;
632 /* reset soft MMU for next block (it can currently
633 only be set by a memory fault) */
634 #if defined(USE_KQEMU)
635 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
636 if (kqemu_is_ok(env) &&
637 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
648 #if defined(TARGET_I386)
649 /* restore flags in standard format */
650 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
651 #elif defined(TARGET_ARM)
652 /* XXX: Save/restore host fpu exception state?. */
653 #elif defined(TARGET_SPARC)
654 #elif defined(TARGET_PPC)
655 #elif defined(TARGET_M68K)
656 cpu_m68k_flush_flags(env, env->cc_op);
657 env->cc_op = CC_OP_FLAGS;
658 env->sr = (env->sr & 0xffe0)
659 | env->cc_dest | (env->cc_x << 4);
660 #elif defined(TARGET_MIPS)
661 #elif defined(TARGET_SH4)
662 #elif defined(TARGET_ALPHA)
663 #elif defined(TARGET_CRIS)
666 #error unsupported target CPU
669 /* restore global registers */
670 #include "hostregs_helper.h"
672 /* fail safe : never use cpu_single_env outside cpu_exec() */
673 cpu_single_env = NULL;
677 /* must only be called from the generated code as an exception can be
679 void tb_invalidate_page_range(target_ulong start, target_ulong end)
681 /* XXX: cannot enable it yet because it yields to MMU exception
682 where NIP != read address on PowerPC */
684 target_ulong phys_addr;
685 phys_addr = get_phys_addr_code(env, start);
686 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
690 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
692 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
694 CPUX86State *saved_env;
698 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
700 cpu_x86_load_seg_cache(env, seg_reg, selector,
701 (selector << 4), 0xffff, 0);
703 helper_load_seg(seg_reg, selector);
708 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
710 CPUX86State *saved_env;
715 helper_fsave(ptr, data32);
720 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
722 CPUX86State *saved_env;
727 helper_frstor(ptr, data32);
732 #endif /* TARGET_I386 */
734 #if !defined(CONFIG_SOFTMMU)
736 #if defined(TARGET_I386)
738 /* 'pc' is the host PC at which the exception was raised. 'address' is
739 the effective address of the memory exception. 'is_write' is 1 if a
740 write caused the exception and otherwise 0'. 'old_set' is the
741 signal set which should be restored */
742 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
743 int is_write, sigset_t *old_set,
746 TranslationBlock *tb;
750 env = cpu_single_env; /* XXX: find a correct solution for multithread */
751 #if defined(DEBUG_SIGNAL)
752 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
753 pc, address, is_write, *(unsigned long *)old_set);
755 /* XXX: locking issue */
756 if (is_write && page_unprotect(h2g(address), pc, puc)) {
760 /* see if it is an MMU fault */
761 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
763 return 0; /* not an MMU fault */
765 return 1; /* the MMU fault was handled without causing real CPU fault */
766 /* now we have a real cpu fault */
769 /* the PC is inside the translated code. It means that we have
770 a virtual CPU fault */
771 cpu_restore_state(tb, env, pc, puc);
775 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
776 env->eip, env->cr[2], env->error_code);
778 /* we restore the process signal mask as the sigreturn should
779 do it (XXX: use sigsetjmp) */
780 sigprocmask(SIG_SETMASK, old_set, NULL);
781 raise_exception_err(env->exception_index, env->error_code);
783 /* activate soft MMU for this block */
784 env->hflags |= HF_SOFTMMU_MASK;
785 cpu_resume_from_signal(env, puc);
787 /* never comes here */
791 #elif defined(TARGET_ARM)
792 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
793 int is_write, sigset_t *old_set,
796 TranslationBlock *tb;
800 env = cpu_single_env; /* XXX: find a correct solution for multithread */
801 #if defined(DEBUG_SIGNAL)
802 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
803 pc, address, is_write, *(unsigned long *)old_set);
805 /* XXX: locking issue */
806 if (is_write && page_unprotect(h2g(address), pc, puc)) {
809 /* see if it is an MMU fault */
810 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
812 return 0; /* not an MMU fault */
814 return 1; /* the MMU fault was handled without causing real CPU fault */
815 /* now we have a real cpu fault */
818 /* the PC is inside the translated code. It means that we have
819 a virtual CPU fault */
820 cpu_restore_state(tb, env, pc, puc);
822 /* we restore the process signal mask as the sigreturn should
823 do it (XXX: use sigsetjmp) */
824 sigprocmask(SIG_SETMASK, old_set, NULL);
826 /* never comes here */
829 #elif defined(TARGET_SPARC)
830 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
831 int is_write, sigset_t *old_set,
834 TranslationBlock *tb;
838 env = cpu_single_env; /* XXX: find a correct solution for multithread */
839 #if defined(DEBUG_SIGNAL)
840 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
841 pc, address, is_write, *(unsigned long *)old_set);
843 /* XXX: locking issue */
844 if (is_write && page_unprotect(h2g(address), pc, puc)) {
847 /* see if it is an MMU fault */
848 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
850 return 0; /* not an MMU fault */
852 return 1; /* the MMU fault was handled without causing real CPU fault */
853 /* now we have a real cpu fault */
856 /* the PC is inside the translated code. It means that we have
857 a virtual CPU fault */
858 cpu_restore_state(tb, env, pc, puc);
860 /* we restore the process signal mask as the sigreturn should
861 do it (XXX: use sigsetjmp) */
862 sigprocmask(SIG_SETMASK, old_set, NULL);
864 /* never comes here */
867 #elif defined (TARGET_PPC)
868 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
869 int is_write, sigset_t *old_set,
872 TranslationBlock *tb;
876 env = cpu_single_env; /* XXX: find a correct solution for multithread */
877 #if defined(DEBUG_SIGNAL)
878 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
879 pc, address, is_write, *(unsigned long *)old_set);
881 /* XXX: locking issue */
882 if (is_write && page_unprotect(h2g(address), pc, puc)) {
886 /* see if it is an MMU fault */
887 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
889 return 0; /* not an MMU fault */
891 return 1; /* the MMU fault was handled without causing real CPU fault */
893 /* now we have a real cpu fault */
896 /* the PC is inside the translated code. It means that we have
897 a virtual CPU fault */
898 cpu_restore_state(tb, env, pc, puc);
902 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
903 env->nip, env->error_code, tb);
905 /* we restore the process signal mask as the sigreturn should
906 do it (XXX: use sigsetjmp) */
907 sigprocmask(SIG_SETMASK, old_set, NULL);
908 do_raise_exception_err(env->exception_index, env->error_code);
910 /* activate soft MMU for this block */
911 cpu_resume_from_signal(env, puc);
913 /* never comes here */
917 #elif defined(TARGET_M68K)
918 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
919 int is_write, sigset_t *old_set,
922 TranslationBlock *tb;
926 env = cpu_single_env; /* XXX: find a correct solution for multithread */
927 #if defined(DEBUG_SIGNAL)
928 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
929 pc, address, is_write, *(unsigned long *)old_set);
931 /* XXX: locking issue */
932 if (is_write && page_unprotect(address, pc, puc)) {
935 /* see if it is an MMU fault */
936 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
938 return 0; /* not an MMU fault */
940 return 1; /* the MMU fault was handled without causing real CPU fault */
941 /* now we have a real cpu fault */
944 /* the PC is inside the translated code. It means that we have
945 a virtual CPU fault */
946 cpu_restore_state(tb, env, pc, puc);
948 /* we restore the process signal mask as the sigreturn should
949 do it (XXX: use sigsetjmp) */
950 sigprocmask(SIG_SETMASK, old_set, NULL);
952 /* never comes here */
956 #elif defined (TARGET_MIPS)
957 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
958 int is_write, sigset_t *old_set,
961 TranslationBlock *tb;
965 env = cpu_single_env; /* XXX: find a correct solution for multithread */
966 #if defined(DEBUG_SIGNAL)
967 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
968 pc, address, is_write, *(unsigned long *)old_set);
970 /* XXX: locking issue */
971 if (is_write && page_unprotect(h2g(address), pc, puc)) {
975 /* see if it is an MMU fault */
976 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
978 return 0; /* not an MMU fault */
980 return 1; /* the MMU fault was handled without causing real CPU fault */
982 /* now we have a real cpu fault */
985 /* the PC is inside the translated code. It means that we have
986 a virtual CPU fault */
987 cpu_restore_state(tb, env, pc, puc);
991 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
992 env->PC, env->error_code, tb);
994 /* we restore the process signal mask as the sigreturn should
995 do it (XXX: use sigsetjmp) */
996 sigprocmask(SIG_SETMASK, old_set, NULL);
997 do_raise_exception_err(env->exception_index, env->error_code);
999 /* activate soft MMU for this block */
1000 cpu_resume_from_signal(env, puc);
1002 /* never comes here */
1006 #elif defined (TARGET_SH4)
1007 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1008 int is_write, sigset_t *old_set,
1011 TranslationBlock *tb;
1015 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1016 #if defined(DEBUG_SIGNAL)
1017 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1018 pc, address, is_write, *(unsigned long *)old_set);
1020 /* XXX: locking issue */
1021 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1025 /* see if it is an MMU fault */
1026 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1028 return 0; /* not an MMU fault */
1030 return 1; /* the MMU fault was handled without causing real CPU fault */
1032 /* now we have a real cpu fault */
1033 tb = tb_find_pc(pc);
1035 /* the PC is inside the translated code. It means that we have
1036 a virtual CPU fault */
1037 cpu_restore_state(tb, env, pc, puc);
1040 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1041 env->nip, env->error_code, tb);
1043 /* we restore the process signal mask as the sigreturn should
1044 do it (XXX: use sigsetjmp) */
1045 sigprocmask(SIG_SETMASK, old_set, NULL);
1047 /* never comes here */
1051 #elif defined (TARGET_ALPHA)
1052 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1053 int is_write, sigset_t *old_set,
1056 TranslationBlock *tb;
1060 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1061 #if defined(DEBUG_SIGNAL)
1062 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1063 pc, address, is_write, *(unsigned long *)old_set);
1065 /* XXX: locking issue */
1066 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1070 /* see if it is an MMU fault */
1071 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1073 return 0; /* not an MMU fault */
1075 return 1; /* the MMU fault was handled without causing real CPU fault */
1077 /* now we have a real cpu fault */
1078 tb = tb_find_pc(pc);
1080 /* the PC is inside the translated code. It means that we have
1081 a virtual CPU fault */
1082 cpu_restore_state(tb, env, pc, puc);
1085 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1086 env->nip, env->error_code, tb);
1088 /* we restore the process signal mask as the sigreturn should
1089 do it (XXX: use sigsetjmp) */
1090 sigprocmask(SIG_SETMASK, old_set, NULL);
1092 /* never comes here */
1095 #elif defined (TARGET_CRIS)
1096 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1097 int is_write, sigset_t *old_set,
1100 TranslationBlock *tb;
1104 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1105 #if defined(DEBUG_SIGNAL)
1106 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1107 pc, address, is_write, *(unsigned long *)old_set);
1109 /* XXX: locking issue */
1110 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1114 /* see if it is an MMU fault */
1115 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1117 return 0; /* not an MMU fault */
1119 return 1; /* the MMU fault was handled without causing real CPU fault */
1121 /* now we have a real cpu fault */
1122 tb = tb_find_pc(pc);
1124 /* the PC is inside the translated code. It means that we have
1125 a virtual CPU fault */
1126 cpu_restore_state(tb, env, pc, puc);
1128 /* we restore the process signal mask as the sigreturn should
1129 do it (XXX: use sigsetjmp) */
1130 sigprocmask(SIG_SETMASK, old_set, NULL);
1132 /* never comes here */
1137 #error unsupported target CPU
1140 #if defined(__i386__)
1142 #if defined(__APPLE__)
1143 # include <sys/ucontext.h>
1145 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1146 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1147 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1149 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1150 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1151 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1154 int cpu_signal_handler(int host_signum, void *pinfo,
1157 siginfo_t *info = pinfo;
1158 struct ucontext *uc = puc;
1166 #define REG_TRAPNO TRAPNO
1169 trapno = TRAP_sig(uc);
1170 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1172 (ERROR_sig(uc) >> 1) & 1 : 0,
1173 &uc->uc_sigmask, puc);
1176 #elif defined(__x86_64__)
1178 int cpu_signal_handler(int host_signum, void *pinfo,
1181 siginfo_t *info = pinfo;
1182 struct ucontext *uc = puc;
1185 pc = uc->uc_mcontext.gregs[REG_RIP];
1186 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1187 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1188 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1189 &uc->uc_sigmask, puc);
1192 #elif defined(__powerpc__)
1194 /***********************************************************************
1195 * signal context platform-specific definitions
1199 /* All Registers access - only for local access */
1200 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1201 /* Gpr Registers access */
1202 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1203 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1204 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1205 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1206 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1207 # define LR_sig(context) REG_sig(link, context) /* Link register */
1208 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1209 /* Float Registers access */
1210 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1211 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1212 /* Exception Registers access */
1213 # define DAR_sig(context) REG_sig(dar, context)
1214 # define DSISR_sig(context) REG_sig(dsisr, context)
1215 # define TRAP_sig(context) REG_sig(trap, context)
1219 # include <sys/ucontext.h>
1220 typedef struct ucontext SIGCONTEXT;
1221 /* All Registers access - only for local access */
1222 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1223 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1224 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1225 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1226 /* Gpr Registers access */
1227 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1228 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1229 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1230 # define CTR_sig(context) REG_sig(ctr, context)
1231 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1232 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1233 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1234 /* Float Registers access */
1235 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1236 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1237 /* Exception Registers access */
1238 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1239 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1240 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1241 #endif /* __APPLE__ */
1243 int cpu_signal_handler(int host_signum, void *pinfo,
1246 siginfo_t *info = pinfo;
1247 struct ucontext *uc = puc;
1255 if (DSISR_sig(uc) & 0x00800000)
1258 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1261 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1262 is_write, &uc->uc_sigmask, puc);
1265 #elif defined(__alpha__)
1267 int cpu_signal_handler(int host_signum, void *pinfo,
1270 siginfo_t *info = pinfo;
1271 struct ucontext *uc = puc;
1272 uint32_t *pc = uc->uc_mcontext.sc_pc;
1273 uint32_t insn = *pc;
1276 /* XXX: need kernel patch to get write flag faster */
1277 switch (insn >> 26) {
1292 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1293 is_write, &uc->uc_sigmask, puc);
1295 #elif defined(__sparc__)
1297 int cpu_signal_handler(int host_signum, void *pinfo,
1300 siginfo_t *info = pinfo;
1303 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1304 uint32_t *regs = (uint32_t *)(info + 1);
1305 void *sigmask = (regs + 20);
1306 /* XXX: is there a standard glibc define ? */
1307 unsigned long pc = regs[1];
1310 struct sigcontext *sc = puc;
1311 unsigned long pc = sc->sigc_regs.tpc;
1312 void *sigmask = (void *)sc->sigc_mask;
1313 #elif defined(__OpenBSD__)
1314 struct sigcontext *uc = puc;
1315 unsigned long pc = uc->sc_pc;
1316 void *sigmask = (void *)(long)uc->sc_mask;
1320 /* XXX: need kernel patch to get write flag faster */
1322 insn = *(uint32_t *)pc;
1323 if ((insn >> 30) == 3) {
1324 switch((insn >> 19) & 0x3f) {
1336 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1337 is_write, sigmask, NULL);
1340 #elif defined(__arm__)
1342 int cpu_signal_handler(int host_signum, void *pinfo,
1345 siginfo_t *info = pinfo;
1346 struct ucontext *uc = puc;
1350 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1351 pc = uc->uc_mcontext.gregs[R15];
1353 pc = uc->uc_mcontext.arm_pc;
1355 /* XXX: compute is_write */
1357 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1359 &uc->uc_sigmask, puc);
1362 #elif defined(__mc68000)
1364 int cpu_signal_handler(int host_signum, void *pinfo,
1367 siginfo_t *info = pinfo;
1368 struct ucontext *uc = puc;
1372 pc = uc->uc_mcontext.gregs[16];
1373 /* XXX: compute is_write */
1375 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1377 &uc->uc_sigmask, puc);
1380 #elif defined(__ia64)
1383 /* This ought to be in <bits/siginfo.h>... */
1384 # define __ISR_VALID 1
1387 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1389 siginfo_t *info = pinfo;
1390 struct ucontext *uc = puc;
1394 ip = uc->uc_mcontext.sc_ip;
1395 switch (host_signum) {
1401 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1402 /* ISR.W (write-access) is bit 33: */
1403 is_write = (info->si_isr >> 33) & 1;
1409 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1411 &uc->uc_sigmask, puc);
1414 #elif defined(__s390__)
1416 int cpu_signal_handler(int host_signum, void *pinfo,
1419 siginfo_t *info = pinfo;
1420 struct ucontext *uc = puc;
1424 pc = uc->uc_mcontext.psw.addr;
1425 /* XXX: compute is_write */
1427 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1428 is_write, &uc->uc_sigmask, puc);
1431 #elif defined(__mips__)
1433 int cpu_signal_handler(int host_signum, void *pinfo,
1436 siginfo_t *info = pinfo;
1437 struct ucontext *uc = puc;
1438 greg_t pc = uc->uc_mcontext.pc;
1441 /* XXX: compute is_write */
1443 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1444 is_write, &uc->uc_sigmask, puc);
1447 #elif defined(__hppa__)
1449 int cpu_signal_handler(int host_signum, void *pinfo,
1452 struct siginfo *info = pinfo;
1453 struct ucontext *uc = puc;
1457 pc = uc->uc_mcontext.sc_iaoq[0];
1458 /* FIXME: compute is_write */
1460 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1462 &uc->uc_sigmask, puc);
1467 #error host CPU specific signal handler needed
1471 #endif /* !defined(CONFIG_SOFTMMU) */