2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #define CPU_NO_GLOBAL_REGS
27 #if !defined(CONFIG_SOFTMMU)
39 #include <sys/ucontext.h>
43 #if defined(__sparc__) && !defined(HOST_SOLARIS)
44 // Work around ugly bugs in glibc that mangle global register contents
46 #define env cpu_single_env
49 int tb_invalidated_flag;
52 //#define DEBUG_SIGNAL
54 void cpu_loop_exit(void)
56 /* NOTE: the register at this point must be saved by hand because
57 longjmp restore them */
59 longjmp(env->jmp_env, 1);
62 /* exit the current TB from a signal handler. The host registers are
63 restored in a state compatible with the CPU emulator
65 void cpu_resume_from_signal(CPUState *env1, void *puc)
67 #if !defined(CONFIG_SOFTMMU)
69 struct ucontext *uc = puc;
70 #elif defined(__OpenBSD__)
71 struct sigcontext *uc = puc;
77 /* XXX: restore cpu registers saved in host registers */
79 #if !defined(CONFIG_SOFTMMU)
81 /* XXX: use siglongjmp ? */
83 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
84 #elif defined(__OpenBSD__)
85 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
89 longjmp(env->jmp_env, 1);
92 /* Execute the code without caching the generated code. An interpreter
93 could be used if available. */
94 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
96 unsigned long next_tb;
99 /* Should never happen.
100 We only end up here when an existing TB is too long. */
101 if (max_cycles > CF_COUNT_MASK)
102 max_cycles = CF_COUNT_MASK;
104 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
106 env->current_tb = tb;
107 /* execute the generated code */
108 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
110 if ((next_tb & 3) == 2) {
111 /* Restore PC. This may happen if async event occurs before
112 the TB starts executing. */
113 CPU_PC_FROM_TB(env, tb);
115 tb_phys_invalidate(tb, -1);
119 static TranslationBlock *tb_find_slow(target_ulong pc,
120 target_ulong cs_base,
123 TranslationBlock *tb, **ptb1;
125 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
127 tb_invalidated_flag = 0;
129 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
131 /* find translated block using physical mappings */
132 phys_pc = get_phys_addr_code(env, pc);
133 phys_page1 = phys_pc & TARGET_PAGE_MASK;
135 h = tb_phys_hash_func(phys_pc);
136 ptb1 = &tb_phys_hash[h];
142 tb->page_addr[0] == phys_page1 &&
143 tb->cs_base == cs_base &&
144 tb->flags == flags) {
145 /* check next page if needed */
146 if (tb->page_addr[1] != -1) {
147 virt_page2 = (pc & TARGET_PAGE_MASK) +
149 phys_page2 = get_phys_addr_code(env, virt_page2);
150 if (tb->page_addr[1] == phys_page2)
156 ptb1 = &tb->phys_hash_next;
159 /* if no translated code available, then translate it now */
160 tb = tb_gen_code(env, pc, cs_base, flags, 0);
163 /* we add the TB in the virtual pc hash table */
164 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
168 static inline TranslationBlock *tb_find_fast(void)
170 TranslationBlock *tb;
171 target_ulong cs_base, pc;
174 /* we record a subset of the CPU state. It will
175 always be the same before a given translated block
177 #if defined(TARGET_I386)
179 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
180 cs_base = env->segs[R_CS].base;
181 pc = cs_base + env->eip;
182 #elif defined(TARGET_ARM)
183 flags = env->thumb | (env->vfp.vec_len << 1)
184 | (env->vfp.vec_stride << 4);
185 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
187 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
189 flags |= (env->condexec_bits << 8);
192 #elif defined(TARGET_SPARC)
193 #ifdef TARGET_SPARC64
194 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
195 flags = ((env->pstate & PS_AM) << 2)
196 | (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
197 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
199 // FPU enable . Supervisor
200 flags = (env->psref << 4) | env->psrs;
204 #elif defined(TARGET_PPC)
208 #elif defined(TARGET_MIPS)
209 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
211 pc = env->active_tc.PC;
212 #elif defined(TARGET_M68K)
213 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
214 | (env->sr & SR_S) /* Bit 13 */
215 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
218 #elif defined(TARGET_SH4)
219 flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL
220 | DELAY_SLOT_TRUE | DELAY_SLOT_CLEARME)) /* Bits 0- 3 */
221 | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
222 | (env->sr & (SR_MD | SR_RB)); /* Bits 29-30 */
225 #elif defined(TARGET_ALPHA)
229 #elif defined(TARGET_CRIS)
230 flags = env->pregs[PR_CCS] & (S_FLAG | P_FLAG | U_FLAG | X_FLAG);
235 #error unsupported CPU
237 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
238 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
239 tb->flags != flags)) {
240 tb = tb_find_slow(pc, cs_base, flags);
245 /* main execution loop */
247 int cpu_exec(CPUState *env1)
249 #define DECLARE_HOST_REGS 1
250 #include "hostregs_helper.h"
251 int ret, interrupt_request;
252 TranslationBlock *tb;
254 unsigned long next_tb;
256 if (cpu_halted(env1) == EXCP_HALTED)
259 cpu_single_env = env1;
261 /* first we save global registers */
262 #define SAVE_HOST_REGS 1
263 #include "hostregs_helper.h"
267 #if defined(TARGET_I386)
268 /* put eflags in CPU temporary format */
269 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
270 DF = 1 - (2 * ((env->eflags >> 10) & 1));
271 CC_OP = CC_OP_EFLAGS;
272 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
273 #elif defined(TARGET_SPARC)
274 #elif defined(TARGET_M68K)
275 env->cc_op = CC_OP_FLAGS;
276 env->cc_dest = env->sr & 0xf;
277 env->cc_x = (env->sr >> 4) & 1;
278 #elif defined(TARGET_ALPHA)
279 #elif defined(TARGET_ARM)
280 #elif defined(TARGET_PPC)
281 #elif defined(TARGET_MIPS)
282 #elif defined(TARGET_SH4)
283 #elif defined(TARGET_CRIS)
286 #error unsupported target CPU
288 env->exception_index = -1;
290 /* prepare setjmp context for exception handling */
292 if (setjmp(env->jmp_env) == 0) {
293 env->current_tb = NULL;
294 /* if an exception is pending, we execute it here */
295 if (env->exception_index >= 0) {
296 if (env->exception_index >= EXCP_INTERRUPT) {
297 /* exit request from the cpu execution loop */
298 ret = env->exception_index;
300 } else if (env->user_mode_only) {
301 /* if user mode only, we simulate a fake exception
302 which will be handled outside the cpu execution
304 #if defined(TARGET_I386)
305 do_interrupt_user(env->exception_index,
306 env->exception_is_int,
308 env->exception_next_eip);
309 /* successfully delivered */
310 env->old_exception = -1;
312 ret = env->exception_index;
315 #if defined(TARGET_I386)
316 /* simulate a real cpu exception. On i386, it can
317 trigger new exceptions, but we do not handle
318 double or triple faults yet. */
319 do_interrupt(env->exception_index,
320 env->exception_is_int,
322 env->exception_next_eip, 0);
323 /* successfully delivered */
324 env->old_exception = -1;
325 #elif defined(TARGET_PPC)
327 #elif defined(TARGET_MIPS)
329 #elif defined(TARGET_SPARC)
331 #elif defined(TARGET_ARM)
333 #elif defined(TARGET_SH4)
335 #elif defined(TARGET_ALPHA)
337 #elif defined(TARGET_CRIS)
339 #elif defined(TARGET_M68K)
343 env->exception_index = -1;
346 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
348 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
349 ret = kqemu_cpu_exec(env);
350 /* put eflags in CPU temporary format */
351 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
352 DF = 1 - (2 * ((env->eflags >> 10) & 1));
353 CC_OP = CC_OP_EFLAGS;
354 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
357 longjmp(env->jmp_env, 1);
358 } else if (ret == 2) {
359 /* softmmu execution needed */
361 if (env->interrupt_request != 0) {
362 /* hardware interrupt will be executed just after */
364 /* otherwise, we restart */
365 longjmp(env->jmp_env, 1);
373 ret = kvm_cpu_exec(env);
374 if ((env->interrupt_request & CPU_INTERRUPT_EXIT)) {
375 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
376 env->exception_index = EXCP_INTERRUPT;
378 } else if (env->halted) {
381 longjmp(env->jmp_env, 1);
384 next_tb = 0; /* force lookup of first TB */
386 interrupt_request = env->interrupt_request;
387 if (unlikely(interrupt_request)) {
388 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
389 /* Mask out external interrupts for this step. */
390 interrupt_request &= ~(CPU_INTERRUPT_HARD |
395 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
396 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
397 env->exception_index = EXCP_DEBUG;
400 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
401 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
402 if (interrupt_request & CPU_INTERRUPT_HALT) {
403 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
405 env->exception_index = EXCP_HLT;
409 #if defined(TARGET_I386)
410 if (env->hflags2 & HF2_GIF_MASK) {
411 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
412 !(env->hflags & HF_SMM_MASK)) {
413 svm_check_intercept(SVM_EXIT_SMI);
414 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
417 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
418 !(env->hflags2 & HF2_NMI_MASK)) {
419 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
420 env->hflags2 |= HF2_NMI_MASK;
421 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
423 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
424 (((env->hflags2 & HF2_VINTR_MASK) &&
425 (env->hflags2 & HF2_HIF_MASK)) ||
426 (!(env->hflags2 & HF2_VINTR_MASK) &&
427 (env->eflags & IF_MASK &&
428 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
430 svm_check_intercept(SVM_EXIT_INTR);
431 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
432 intno = cpu_get_pic_interrupt(env);
433 if (loglevel & CPU_LOG_TB_IN_ASM) {
434 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
436 do_interrupt(intno, 0, 0, 0, 1);
437 /* ensure that no TB jump will be modified as
438 the program flow was changed */
440 #if !defined(CONFIG_USER_ONLY)
441 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
442 (env->eflags & IF_MASK) &&
443 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
445 /* FIXME: this should respect TPR */
446 svm_check_intercept(SVM_EXIT_VINTR);
447 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
448 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
449 if (loglevel & CPU_LOG_TB_IN_ASM)
450 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
451 do_interrupt(intno, 0, 0, 0, 1);
456 #elif defined(TARGET_PPC)
458 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
462 if (interrupt_request & CPU_INTERRUPT_HARD) {
463 ppc_hw_interrupt(env);
464 if (env->pending_interrupts == 0)
465 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
468 #elif defined(TARGET_MIPS)
469 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
470 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
471 (env->CP0_Status & (1 << CP0St_IE)) &&
472 !(env->CP0_Status & (1 << CP0St_EXL)) &&
473 !(env->CP0_Status & (1 << CP0St_ERL)) &&
474 !(env->hflags & MIPS_HFLAG_DM)) {
476 env->exception_index = EXCP_EXT_INTERRUPT;
481 #elif defined(TARGET_SPARC)
482 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
484 int pil = env->interrupt_index & 15;
485 int type = env->interrupt_index & 0xf0;
487 if (((type == TT_EXTINT) &&
488 (pil == 15 || pil > env->psrpil)) ||
490 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
491 env->exception_index = env->interrupt_index;
493 env->interrupt_index = 0;
494 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
499 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
500 //do_interrupt(0, 0, 0, 0, 0);
501 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
503 #elif defined(TARGET_ARM)
504 if (interrupt_request & CPU_INTERRUPT_FIQ
505 && !(env->uncached_cpsr & CPSR_F)) {
506 env->exception_index = EXCP_FIQ;
510 /* ARMv7-M interrupt return works by loading a magic value
511 into the PC. On real hardware the load causes the
512 return to occur. The qemu implementation performs the
513 jump normally, then does the exception return when the
514 CPU tries to execute code at the magic address.
515 This will cause the magic PC value to be pushed to
516 the stack if an interrupt occured at the wrong time.
517 We avoid this by disabling interrupts when
518 pc contains a magic address. */
519 if (interrupt_request & CPU_INTERRUPT_HARD
520 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
521 || !(env->uncached_cpsr & CPSR_I))) {
522 env->exception_index = EXCP_IRQ;
526 #elif defined(TARGET_SH4)
527 if (interrupt_request & CPU_INTERRUPT_HARD) {
531 #elif defined(TARGET_ALPHA)
532 if (interrupt_request & CPU_INTERRUPT_HARD) {
536 #elif defined(TARGET_CRIS)
537 if (interrupt_request & CPU_INTERRUPT_HARD
538 && (env->pregs[PR_CCS] & I_FLAG)) {
539 env->exception_index = EXCP_IRQ;
543 if (interrupt_request & CPU_INTERRUPT_NMI
544 && (env->pregs[PR_CCS] & M_FLAG)) {
545 env->exception_index = EXCP_NMI;
549 #elif defined(TARGET_M68K)
550 if (interrupt_request & CPU_INTERRUPT_HARD
551 && ((env->sr & SR_I) >> SR_I_SHIFT)
552 < env->pending_level) {
553 /* Real hardware gets the interrupt vector via an
554 IACK cycle at this point. Current emulated
555 hardware doesn't rely on this, so we
556 provide/save the vector when the interrupt is
558 env->exception_index = env->pending_vector;
563 /* Don't use the cached interupt_request value,
564 do_interrupt may have updated the EXITTB flag. */
565 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
566 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
567 /* ensure that no TB jump will be modified as
568 the program flow was changed */
571 if (interrupt_request & CPU_INTERRUPT_EXIT) {
572 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
573 env->exception_index = EXCP_INTERRUPT;
578 if ((loglevel & CPU_LOG_TB_CPU)) {
579 /* restore flags in standard format */
581 #if defined(TARGET_I386)
582 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
583 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
584 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
585 #elif defined(TARGET_ARM)
586 cpu_dump_state(env, logfile, fprintf, 0);
587 #elif defined(TARGET_SPARC)
588 cpu_dump_state(env, logfile, fprintf, 0);
589 #elif defined(TARGET_PPC)
590 cpu_dump_state(env, logfile, fprintf, 0);
591 #elif defined(TARGET_M68K)
592 cpu_m68k_flush_flags(env, env->cc_op);
593 env->cc_op = CC_OP_FLAGS;
594 env->sr = (env->sr & 0xffe0)
595 | env->cc_dest | (env->cc_x << 4);
596 cpu_dump_state(env, logfile, fprintf, 0);
597 #elif defined(TARGET_MIPS)
598 cpu_dump_state(env, logfile, fprintf, 0);
599 #elif defined(TARGET_SH4)
600 cpu_dump_state(env, logfile, fprintf, 0);
601 #elif defined(TARGET_ALPHA)
602 cpu_dump_state(env, logfile, fprintf, 0);
603 #elif defined(TARGET_CRIS)
604 cpu_dump_state(env, logfile, fprintf, 0);
606 #error unsupported target CPU
612 /* Note: we do it here to avoid a gcc bug on Mac OS X when
613 doing it in tb_find_slow */
614 if (tb_invalidated_flag) {
615 /* as some TB could have been invalidated because
616 of memory exceptions while generating the code, we
617 must recompute the hash index here */
619 tb_invalidated_flag = 0;
622 if ((loglevel & CPU_LOG_EXEC)) {
623 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
624 (long)tb->tc_ptr, tb->pc,
625 lookup_symbol(tb->pc));
628 /* see if we can patch the calling TB. When the TB
629 spans two pages, we cannot safely do a direct
634 (env->kqemu_enabled != 2) &&
636 tb->page_addr[1] == -1) {
637 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
640 spin_unlock(&tb_lock);
641 env->current_tb = tb;
643 /* cpu_interrupt might be called while translating the
644 TB, but before it is linked into a potentially
645 infinite loop and becomes env->current_tb. Avoid
646 starting execution if there is a pending interrupt. */
647 if (unlikely (env->interrupt_request & CPU_INTERRUPT_EXIT))
648 env->current_tb = NULL;
650 while (env->current_tb) {
652 /* execute the generated code */
653 #if defined(__sparc__) && !defined(HOST_SOLARIS)
655 env = cpu_single_env;
656 #define env cpu_single_env
658 next_tb = tcg_qemu_tb_exec(tc_ptr);
659 env->current_tb = NULL;
660 if ((next_tb & 3) == 2) {
661 /* Instruction counter expired. */
663 tb = (TranslationBlock *)(long)(next_tb & ~3);
665 CPU_PC_FROM_TB(env, tb);
666 insns_left = env->icount_decr.u32;
667 if (env->icount_extra && insns_left >= 0) {
668 /* Refill decrementer and continue execution. */
669 env->icount_extra += insns_left;
670 if (env->icount_extra > 0xffff) {
673 insns_left = env->icount_extra;
675 env->icount_extra -= insns_left;
676 env->icount_decr.u16.low = insns_left;
678 if (insns_left > 0) {
679 /* Execute remaining instructions. */
680 cpu_exec_nocache(insns_left, tb);
682 env->exception_index = EXCP_INTERRUPT;
688 /* reset soft MMU for next block (it can currently
689 only be set by a memory fault) */
690 #if defined(USE_KQEMU)
691 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
692 if (kqemu_is_ok(env) &&
693 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
704 #if defined(TARGET_I386)
705 /* restore flags in standard format */
706 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
707 #elif defined(TARGET_ARM)
708 /* XXX: Save/restore host fpu exception state?. */
709 #elif defined(TARGET_SPARC)
710 #elif defined(TARGET_PPC)
711 #elif defined(TARGET_M68K)
712 cpu_m68k_flush_flags(env, env->cc_op);
713 env->cc_op = CC_OP_FLAGS;
714 env->sr = (env->sr & 0xffe0)
715 | env->cc_dest | (env->cc_x << 4);
716 #elif defined(TARGET_MIPS)
717 #elif defined(TARGET_SH4)
718 #elif defined(TARGET_ALPHA)
719 #elif defined(TARGET_CRIS)
722 #error unsupported target CPU
725 /* restore global registers */
726 #include "hostregs_helper.h"
728 /* fail safe : never use cpu_single_env outside cpu_exec() */
729 cpu_single_env = NULL;
733 /* must only be called from the generated code as an exception can be
735 void tb_invalidate_page_range(target_ulong start, target_ulong end)
737 /* XXX: cannot enable it yet because it yields to MMU exception
738 where NIP != read address on PowerPC */
740 target_ulong phys_addr;
741 phys_addr = get_phys_addr_code(env, start);
742 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
746 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
748 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
750 CPUX86State *saved_env;
754 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
756 cpu_x86_load_seg_cache(env, seg_reg, selector,
757 (selector << 4), 0xffff, 0);
759 helper_load_seg(seg_reg, selector);
764 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
766 CPUX86State *saved_env;
771 helper_fsave(ptr, data32);
776 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
778 CPUX86State *saved_env;
783 helper_frstor(ptr, data32);
788 #endif /* TARGET_I386 */
790 #if !defined(CONFIG_SOFTMMU)
792 #if defined(TARGET_I386)
794 /* 'pc' is the host PC at which the exception was raised. 'address' is
795 the effective address of the memory exception. 'is_write' is 1 if a
796 write caused the exception and otherwise 0'. 'old_set' is the
797 signal set which should be restored */
798 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
799 int is_write, sigset_t *old_set,
802 TranslationBlock *tb;
806 env = cpu_single_env; /* XXX: find a correct solution for multithread */
807 #if defined(DEBUG_SIGNAL)
808 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
809 pc, address, is_write, *(unsigned long *)old_set);
811 /* XXX: locking issue */
812 if (is_write && page_unprotect(h2g(address), pc, puc)) {
816 /* see if it is an MMU fault */
817 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
819 return 0; /* not an MMU fault */
821 return 1; /* the MMU fault was handled without causing real CPU fault */
822 /* now we have a real cpu fault */
825 /* the PC is inside the translated code. It means that we have
826 a virtual CPU fault */
827 cpu_restore_state(tb, env, pc, puc);
831 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
832 env->eip, env->cr[2], env->error_code);
834 /* we restore the process signal mask as the sigreturn should
835 do it (XXX: use sigsetjmp) */
836 sigprocmask(SIG_SETMASK, old_set, NULL);
837 raise_exception_err(env->exception_index, env->error_code);
839 /* activate soft MMU for this block */
840 env->hflags |= HF_SOFTMMU_MASK;
841 cpu_resume_from_signal(env, puc);
843 /* never comes here */
847 #elif defined(TARGET_ARM)
848 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
849 int is_write, sigset_t *old_set,
852 TranslationBlock *tb;
856 env = cpu_single_env; /* XXX: find a correct solution for multithread */
857 #if defined(DEBUG_SIGNAL)
858 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
859 pc, address, is_write, *(unsigned long *)old_set);
861 /* XXX: locking issue */
862 if (is_write && page_unprotect(h2g(address), pc, puc)) {
865 /* see if it is an MMU fault */
866 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
868 return 0; /* not an MMU fault */
870 return 1; /* the MMU fault was handled without causing real CPU fault */
871 /* now we have a real cpu fault */
874 /* the PC is inside the translated code. It means that we have
875 a virtual CPU fault */
876 cpu_restore_state(tb, env, pc, puc);
878 /* we restore the process signal mask as the sigreturn should
879 do it (XXX: use sigsetjmp) */
880 sigprocmask(SIG_SETMASK, old_set, NULL);
882 /* never comes here */
885 #elif defined(TARGET_SPARC)
886 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
887 int is_write, sigset_t *old_set,
890 TranslationBlock *tb;
894 env = cpu_single_env; /* XXX: find a correct solution for multithread */
895 #if defined(DEBUG_SIGNAL)
896 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
897 pc, address, is_write, *(unsigned long *)old_set);
899 /* XXX: locking issue */
900 if (is_write && page_unprotect(h2g(address), pc, puc)) {
903 /* see if it is an MMU fault */
904 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
906 return 0; /* not an MMU fault */
908 return 1; /* the MMU fault was handled without causing real CPU fault */
909 /* now we have a real cpu fault */
912 /* the PC is inside the translated code. It means that we have
913 a virtual CPU fault */
914 cpu_restore_state(tb, env, pc, puc);
916 /* we restore the process signal mask as the sigreturn should
917 do it (XXX: use sigsetjmp) */
918 sigprocmask(SIG_SETMASK, old_set, NULL);
920 /* never comes here */
923 #elif defined (TARGET_PPC)
924 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
925 int is_write, sigset_t *old_set,
928 TranslationBlock *tb;
932 env = cpu_single_env; /* XXX: find a correct solution for multithread */
933 #if defined(DEBUG_SIGNAL)
934 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
935 pc, address, is_write, *(unsigned long *)old_set);
937 /* XXX: locking issue */
938 if (is_write && page_unprotect(h2g(address), pc, puc)) {
942 /* see if it is an MMU fault */
943 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
945 return 0; /* not an MMU fault */
947 return 1; /* the MMU fault was handled without causing real CPU fault */
949 /* now we have a real cpu fault */
952 /* the PC is inside the translated code. It means that we have
953 a virtual CPU fault */
954 cpu_restore_state(tb, env, pc, puc);
958 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
959 env->nip, env->error_code, tb);
961 /* we restore the process signal mask as the sigreturn should
962 do it (XXX: use sigsetjmp) */
963 sigprocmask(SIG_SETMASK, old_set, NULL);
964 do_raise_exception_err(env->exception_index, env->error_code);
966 /* activate soft MMU for this block */
967 cpu_resume_from_signal(env, puc);
969 /* never comes here */
973 #elif defined(TARGET_M68K)
974 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
975 int is_write, sigset_t *old_set,
978 TranslationBlock *tb;
982 env = cpu_single_env; /* XXX: find a correct solution for multithread */
983 #if defined(DEBUG_SIGNAL)
984 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
985 pc, address, is_write, *(unsigned long *)old_set);
987 /* XXX: locking issue */
988 if (is_write && page_unprotect(address, pc, puc)) {
991 /* see if it is an MMU fault */
992 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
994 return 0; /* not an MMU fault */
996 return 1; /* the MMU fault was handled without causing real CPU fault */
997 /* now we have a real cpu fault */
1000 /* the PC is inside the translated code. It means that we have
1001 a virtual CPU fault */
1002 cpu_restore_state(tb, env, pc, puc);
1004 /* we restore the process signal mask as the sigreturn should
1005 do it (XXX: use sigsetjmp) */
1006 sigprocmask(SIG_SETMASK, old_set, NULL);
1008 /* never comes here */
1012 #elif defined (TARGET_MIPS)
1013 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1014 int is_write, sigset_t *old_set,
1017 TranslationBlock *tb;
1021 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1022 #if defined(DEBUG_SIGNAL)
1023 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1024 pc, address, is_write, *(unsigned long *)old_set);
1026 /* XXX: locking issue */
1027 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1031 /* see if it is an MMU fault */
1032 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1034 return 0; /* not an MMU fault */
1036 return 1; /* the MMU fault was handled without causing real CPU fault */
1038 /* now we have a real cpu fault */
1039 tb = tb_find_pc(pc);
1041 /* the PC is inside the translated code. It means that we have
1042 a virtual CPU fault */
1043 cpu_restore_state(tb, env, pc, puc);
1047 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1048 env->PC, env->error_code, tb);
1050 /* we restore the process signal mask as the sigreturn should
1051 do it (XXX: use sigsetjmp) */
1052 sigprocmask(SIG_SETMASK, old_set, NULL);
1053 do_raise_exception_err(env->exception_index, env->error_code);
1055 /* activate soft MMU for this block */
1056 cpu_resume_from_signal(env, puc);
1058 /* never comes here */
1062 #elif defined (TARGET_SH4)
1063 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1064 int is_write, sigset_t *old_set,
1067 TranslationBlock *tb;
1071 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1072 #if defined(DEBUG_SIGNAL)
1073 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1074 pc, address, is_write, *(unsigned long *)old_set);
1076 /* XXX: locking issue */
1077 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1081 /* see if it is an MMU fault */
1082 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1084 return 0; /* not an MMU fault */
1086 return 1; /* the MMU fault was handled without causing real CPU fault */
1088 /* now we have a real cpu fault */
1089 tb = tb_find_pc(pc);
1091 /* the PC is inside the translated code. It means that we have
1092 a virtual CPU fault */
1093 cpu_restore_state(tb, env, pc, puc);
1096 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1097 env->nip, env->error_code, tb);
1099 /* we restore the process signal mask as the sigreturn should
1100 do it (XXX: use sigsetjmp) */
1101 sigprocmask(SIG_SETMASK, old_set, NULL);
1103 /* never comes here */
1107 #elif defined (TARGET_ALPHA)
1108 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1109 int is_write, sigset_t *old_set,
1112 TranslationBlock *tb;
1116 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1117 #if defined(DEBUG_SIGNAL)
1118 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1119 pc, address, is_write, *(unsigned long *)old_set);
1121 /* XXX: locking issue */
1122 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1126 /* see if it is an MMU fault */
1127 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1129 return 0; /* not an MMU fault */
1131 return 1; /* the MMU fault was handled without causing real CPU fault */
1133 /* now we have a real cpu fault */
1134 tb = tb_find_pc(pc);
1136 /* the PC is inside the translated code. It means that we have
1137 a virtual CPU fault */
1138 cpu_restore_state(tb, env, pc, puc);
1141 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1142 env->nip, env->error_code, tb);
1144 /* we restore the process signal mask as the sigreturn should
1145 do it (XXX: use sigsetjmp) */
1146 sigprocmask(SIG_SETMASK, old_set, NULL);
1148 /* never comes here */
1151 #elif defined (TARGET_CRIS)
1152 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1153 int is_write, sigset_t *old_set,
1156 TranslationBlock *tb;
1160 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1161 #if defined(DEBUG_SIGNAL)
1162 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1163 pc, address, is_write, *(unsigned long *)old_set);
1165 /* XXX: locking issue */
1166 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1170 /* see if it is an MMU fault */
1171 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1173 return 0; /* not an MMU fault */
1175 return 1; /* the MMU fault was handled without causing real CPU fault */
1177 /* now we have a real cpu fault */
1178 tb = tb_find_pc(pc);
1180 /* the PC is inside the translated code. It means that we have
1181 a virtual CPU fault */
1182 cpu_restore_state(tb, env, pc, puc);
1184 /* we restore the process signal mask as the sigreturn should
1185 do it (XXX: use sigsetjmp) */
1186 sigprocmask(SIG_SETMASK, old_set, NULL);
1188 /* never comes here */
1193 #error unsupported target CPU
1196 #if defined(__i386__)
1198 #if defined(__APPLE__)
1199 # include <sys/ucontext.h>
1201 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1202 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1203 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1205 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1206 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1207 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1210 int cpu_signal_handler(int host_signum, void *pinfo,
1213 siginfo_t *info = pinfo;
1214 struct ucontext *uc = puc;
1222 #define REG_TRAPNO TRAPNO
1225 trapno = TRAP_sig(uc);
1226 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1228 (ERROR_sig(uc) >> 1) & 1 : 0,
1229 &uc->uc_sigmask, puc);
1232 #elif defined(__x86_64__)
1234 int cpu_signal_handler(int host_signum, void *pinfo,
1237 siginfo_t *info = pinfo;
1238 struct ucontext *uc = puc;
1241 pc = uc->uc_mcontext.gregs[REG_RIP];
1242 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1243 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1244 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1245 &uc->uc_sigmask, puc);
1248 #elif defined(__powerpc__)
1250 /***********************************************************************
1251 * signal context platform-specific definitions
1255 /* All Registers access - only for local access */
1256 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1257 /* Gpr Registers access */
1258 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1259 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1260 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1261 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1262 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1263 # define LR_sig(context) REG_sig(link, context) /* Link register */
1264 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1265 /* Float Registers access */
1266 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1267 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1268 /* Exception Registers access */
1269 # define DAR_sig(context) REG_sig(dar, context)
1270 # define DSISR_sig(context) REG_sig(dsisr, context)
1271 # define TRAP_sig(context) REG_sig(trap, context)
1275 # include <sys/ucontext.h>
1276 typedef struct ucontext SIGCONTEXT;
1277 /* All Registers access - only for local access */
1278 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1279 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1280 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1281 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1282 /* Gpr Registers access */
1283 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1284 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1285 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1286 # define CTR_sig(context) REG_sig(ctr, context)
1287 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1288 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1289 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1290 /* Float Registers access */
1291 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1292 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1293 /* Exception Registers access */
1294 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1295 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1296 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1297 #endif /* __APPLE__ */
1299 int cpu_signal_handler(int host_signum, void *pinfo,
1302 siginfo_t *info = pinfo;
1303 struct ucontext *uc = puc;
1311 if (DSISR_sig(uc) & 0x00800000)
1314 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1317 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1318 is_write, &uc->uc_sigmask, puc);
1321 #elif defined(__alpha__)
1323 int cpu_signal_handler(int host_signum, void *pinfo,
1326 siginfo_t *info = pinfo;
1327 struct ucontext *uc = puc;
1328 uint32_t *pc = uc->uc_mcontext.sc_pc;
1329 uint32_t insn = *pc;
1332 /* XXX: need kernel patch to get write flag faster */
1333 switch (insn >> 26) {
1348 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1349 is_write, &uc->uc_sigmask, puc);
1351 #elif defined(__sparc__)
1353 int cpu_signal_handler(int host_signum, void *pinfo,
1356 siginfo_t *info = pinfo;
1359 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1360 uint32_t *regs = (uint32_t *)(info + 1);
1361 void *sigmask = (regs + 20);
1362 /* XXX: is there a standard glibc define ? */
1363 unsigned long pc = regs[1];
1366 struct sigcontext *sc = puc;
1367 unsigned long pc = sc->sigc_regs.tpc;
1368 void *sigmask = (void *)sc->sigc_mask;
1369 #elif defined(__OpenBSD__)
1370 struct sigcontext *uc = puc;
1371 unsigned long pc = uc->sc_pc;
1372 void *sigmask = (void *)(long)uc->sc_mask;
1376 /* XXX: need kernel patch to get write flag faster */
1378 insn = *(uint32_t *)pc;
1379 if ((insn >> 30) == 3) {
1380 switch((insn >> 19) & 0x3f) {
1392 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1393 is_write, sigmask, NULL);
1396 #elif defined(__arm__)
1398 int cpu_signal_handler(int host_signum, void *pinfo,
1401 siginfo_t *info = pinfo;
1402 struct ucontext *uc = puc;
1406 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1407 pc = uc->uc_mcontext.gregs[R15];
1409 pc = uc->uc_mcontext.arm_pc;
1411 /* XXX: compute is_write */
1413 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1415 &uc->uc_sigmask, puc);
1418 #elif defined(__mc68000)
1420 int cpu_signal_handler(int host_signum, void *pinfo,
1423 siginfo_t *info = pinfo;
1424 struct ucontext *uc = puc;
1428 pc = uc->uc_mcontext.gregs[16];
1429 /* XXX: compute is_write */
1431 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1433 &uc->uc_sigmask, puc);
1436 #elif defined(__ia64)
1439 /* This ought to be in <bits/siginfo.h>... */
1440 # define __ISR_VALID 1
1443 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1445 siginfo_t *info = pinfo;
1446 struct ucontext *uc = puc;
1450 ip = uc->uc_mcontext.sc_ip;
1451 switch (host_signum) {
1457 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1458 /* ISR.W (write-access) is bit 33: */
1459 is_write = (info->si_isr >> 33) & 1;
1465 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1467 &uc->uc_sigmask, puc);
1470 #elif defined(__s390__)
1472 int cpu_signal_handler(int host_signum, void *pinfo,
1475 siginfo_t *info = pinfo;
1476 struct ucontext *uc = puc;
1480 pc = uc->uc_mcontext.psw.addr;
1481 /* XXX: compute is_write */
1483 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1484 is_write, &uc->uc_sigmask, puc);
1487 #elif defined(__mips__)
1489 int cpu_signal_handler(int host_signum, void *pinfo,
1492 siginfo_t *info = pinfo;
1493 struct ucontext *uc = puc;
1494 greg_t pc = uc->uc_mcontext.pc;
1497 /* XXX: compute is_write */
1499 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1500 is_write, &uc->uc_sigmask, puc);
1503 #elif defined(__hppa__)
1505 int cpu_signal_handler(int host_signum, void *pinfo,
1508 struct siginfo *info = pinfo;
1509 struct ucontext *uc = puc;
1513 pc = uc->uc_mcontext.sc_iaoq[0];
1514 /* FIXME: compute is_write */
1516 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1518 &uc->uc_sigmask, puc);
1523 #error host CPU specific signal handler needed
1527 #endif /* !defined(CONFIG_SOFTMMU) */