2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #define CPU_NO_GLOBAL_REGS
27 #if !defined(CONFIG_SOFTMMU)
39 #include <sys/ucontext.h>
43 #if defined(__sparc__) && !defined(HOST_SOLARIS)
44 // Work around ugly bugs in glibc that mangle global register contents
46 #define env cpu_single_env
49 int tb_invalidated_flag;
52 //#define DEBUG_SIGNAL
54 void cpu_loop_exit(void)
56 /* NOTE: the register at this point must be saved by hand because
57 longjmp restore them */
59 longjmp(env->jmp_env, 1);
62 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
66 /* exit the current TB from a signal handler. The host registers are
67 restored in a state compatible with the CPU emulator
69 void cpu_resume_from_signal(CPUState *env1, void *puc)
71 #if !defined(CONFIG_SOFTMMU)
73 struct ucontext *uc = puc;
74 #elif defined(__OpenBSD__)
75 struct sigcontext *uc = puc;
81 /* XXX: restore cpu registers saved in host registers */
83 #if !defined(CONFIG_SOFTMMU)
85 /* XXX: use siglongjmp ? */
87 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
88 #elif defined(__OpenBSD__)
89 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
93 longjmp(env->jmp_env, 1);
96 /* Execute the code without caching the generated code. An interpreter
97 could be used if available. */
98 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
100 unsigned long next_tb;
101 TranslationBlock *tb;
103 /* Should never happen.
104 We only end up here when an existing TB is too long. */
105 if (max_cycles > CF_COUNT_MASK)
106 max_cycles = CF_COUNT_MASK;
108 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
110 env->current_tb = tb;
111 /* execute the generated code */
112 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
114 if ((next_tb & 3) == 2) {
115 /* Restore PC. This may happen if async event occurs before
116 the TB starts executing. */
117 CPU_PC_FROM_TB(env, tb);
119 tb_phys_invalidate(tb, -1);
123 static TranslationBlock *tb_find_slow(target_ulong pc,
124 target_ulong cs_base,
127 TranslationBlock *tb, **ptb1;
129 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
131 tb_invalidated_flag = 0;
133 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
135 /* find translated block using physical mappings */
136 phys_pc = get_phys_addr_code(env, pc);
137 phys_page1 = phys_pc & TARGET_PAGE_MASK;
139 h = tb_phys_hash_func(phys_pc);
140 ptb1 = &tb_phys_hash[h];
146 tb->page_addr[0] == phys_page1 &&
147 tb->cs_base == cs_base &&
148 tb->flags == flags) {
149 /* check next page if needed */
150 if (tb->page_addr[1] != -1) {
151 virt_page2 = (pc & TARGET_PAGE_MASK) +
153 phys_page2 = get_phys_addr_code(env, virt_page2);
154 if (tb->page_addr[1] == phys_page2)
160 ptb1 = &tb->phys_hash_next;
163 /* if no translated code available, then translate it now */
164 tb = tb_gen_code(env, pc, cs_base, flags, 0);
167 /* we add the TB in the virtual pc hash table */
168 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
172 static inline TranslationBlock *tb_find_fast(void)
174 TranslationBlock *tb;
175 target_ulong cs_base, pc;
178 /* we record a subset of the CPU state. It will
179 always be the same before a given translated block
181 #if defined(TARGET_I386)
183 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
184 cs_base = env->segs[R_CS].base;
185 pc = cs_base + env->eip;
186 #elif defined(TARGET_ARM)
187 flags = env->thumb | (env->vfp.vec_len << 1)
188 | (env->vfp.vec_stride << 4);
189 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
191 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
193 flags |= (env->condexec_bits << 8);
196 #elif defined(TARGET_SPARC)
197 #ifdef TARGET_SPARC64
198 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
199 flags = ((env->pstate & PS_AM) << 2)
200 | (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
201 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
203 // FPU enable . Supervisor
204 flags = (env->psref << 4) | env->psrs;
208 #elif defined(TARGET_PPC)
212 #elif defined(TARGET_MIPS)
213 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
215 pc = env->active_tc.PC;
216 #elif defined(TARGET_M68K)
217 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
218 | (env->sr & SR_S) /* Bit 13 */
219 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
222 #elif defined(TARGET_SH4)
223 flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL
224 | DELAY_SLOT_TRUE | DELAY_SLOT_CLEARME)) /* Bits 0- 3 */
225 | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
226 | (env->sr & (SR_MD | SR_RB)); /* Bits 29-30 */
229 #elif defined(TARGET_ALPHA)
233 #elif defined(TARGET_CRIS)
234 flags = env->pregs[PR_CCS] & (S_FLAG | P_FLAG | U_FLAG | X_FLAG);
239 #error unsupported CPU
241 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
242 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
243 tb->flags != flags)) {
244 tb = tb_find_slow(pc, cs_base, flags);
249 /* main execution loop */
251 int cpu_exec(CPUState *env1)
253 #define DECLARE_HOST_REGS 1
254 #include "hostregs_helper.h"
255 int ret, interrupt_request;
256 TranslationBlock *tb;
258 unsigned long next_tb;
260 if (cpu_halted(env1) == EXCP_HALTED)
263 cpu_single_env = env1;
265 /* first we save global registers */
266 #define SAVE_HOST_REGS 1
267 #include "hostregs_helper.h"
271 #if defined(TARGET_I386)
272 /* put eflags in CPU temporary format */
273 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
274 DF = 1 - (2 * ((env->eflags >> 10) & 1));
275 CC_OP = CC_OP_EFLAGS;
276 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
277 #elif defined(TARGET_SPARC)
278 #elif defined(TARGET_M68K)
279 env->cc_op = CC_OP_FLAGS;
280 env->cc_dest = env->sr & 0xf;
281 env->cc_x = (env->sr >> 4) & 1;
282 #elif defined(TARGET_ALPHA)
283 #elif defined(TARGET_ARM)
284 #elif defined(TARGET_PPC)
285 #elif defined(TARGET_MIPS)
286 #elif defined(TARGET_SH4)
287 #elif defined(TARGET_CRIS)
290 #error unsupported target CPU
292 env->exception_index = -1;
294 /* prepare setjmp context for exception handling */
296 if (setjmp(env->jmp_env) == 0) {
297 env->current_tb = NULL;
298 /* if an exception is pending, we execute it here */
299 if (env->exception_index >= 0) {
300 if (env->exception_index >= EXCP_INTERRUPT) {
301 /* exit request from the cpu execution loop */
302 ret = env->exception_index;
304 } else if (env->user_mode_only) {
305 /* if user mode only, we simulate a fake exception
306 which will be handled outside the cpu execution
308 #if defined(TARGET_I386)
309 do_interrupt_user(env->exception_index,
310 env->exception_is_int,
312 env->exception_next_eip);
313 /* successfully delivered */
314 env->old_exception = -1;
316 ret = env->exception_index;
319 #if defined(TARGET_I386)
320 /* simulate a real cpu exception. On i386, it can
321 trigger new exceptions, but we do not handle
322 double or triple faults yet. */
323 do_interrupt(env->exception_index,
324 env->exception_is_int,
326 env->exception_next_eip, 0);
327 /* successfully delivered */
328 env->old_exception = -1;
329 #elif defined(TARGET_PPC)
331 #elif defined(TARGET_MIPS)
333 #elif defined(TARGET_SPARC)
335 #elif defined(TARGET_ARM)
337 #elif defined(TARGET_SH4)
339 #elif defined(TARGET_ALPHA)
341 #elif defined(TARGET_CRIS)
343 #elif defined(TARGET_M68K)
347 env->exception_index = -1;
350 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
352 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
353 ret = kqemu_cpu_exec(env);
354 /* put eflags in CPU temporary format */
355 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
356 DF = 1 - (2 * ((env->eflags >> 10) & 1));
357 CC_OP = CC_OP_EFLAGS;
358 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
361 longjmp(env->jmp_env, 1);
362 } else if (ret == 2) {
363 /* softmmu execution needed */
365 if (env->interrupt_request != 0) {
366 /* hardware interrupt will be executed just after */
368 /* otherwise, we restart */
369 longjmp(env->jmp_env, 1);
377 ret = kvm_cpu_exec(env);
378 if ((env->interrupt_request & CPU_INTERRUPT_EXIT)) {
379 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
380 env->exception_index = EXCP_INTERRUPT;
382 } else if (env->halted) {
385 longjmp(env->jmp_env, 1);
388 next_tb = 0; /* force lookup of first TB */
390 interrupt_request = env->interrupt_request;
391 if (unlikely(interrupt_request) &&
392 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
393 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
394 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
395 env->exception_index = EXCP_DEBUG;
398 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
399 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
400 if (interrupt_request & CPU_INTERRUPT_HALT) {
401 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
403 env->exception_index = EXCP_HLT;
407 #if defined(TARGET_I386)
408 if (env->hflags2 & HF2_GIF_MASK) {
409 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
410 !(env->hflags & HF_SMM_MASK)) {
411 svm_check_intercept(SVM_EXIT_SMI);
412 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
415 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
416 !(env->hflags2 & HF2_NMI_MASK)) {
417 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
418 env->hflags2 |= HF2_NMI_MASK;
419 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
421 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
422 (((env->hflags2 & HF2_VINTR_MASK) &&
423 (env->hflags2 & HF2_HIF_MASK)) ||
424 (!(env->hflags2 & HF2_VINTR_MASK) &&
425 (env->eflags & IF_MASK &&
426 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
428 svm_check_intercept(SVM_EXIT_INTR);
429 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
430 intno = cpu_get_pic_interrupt(env);
431 if (loglevel & CPU_LOG_TB_IN_ASM) {
432 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
434 do_interrupt(intno, 0, 0, 0, 1);
435 /* ensure that no TB jump will be modified as
436 the program flow was changed */
438 #if !defined(CONFIG_USER_ONLY)
439 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
440 (env->eflags & IF_MASK) &&
441 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
443 /* FIXME: this should respect TPR */
444 svm_check_intercept(SVM_EXIT_VINTR);
445 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
446 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
447 if (loglevel & CPU_LOG_TB_IN_ASM)
448 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
449 do_interrupt(intno, 0, 0, 0, 1);
454 #elif defined(TARGET_PPC)
456 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
460 if (interrupt_request & CPU_INTERRUPT_HARD) {
461 ppc_hw_interrupt(env);
462 if (env->pending_interrupts == 0)
463 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
466 #elif defined(TARGET_MIPS)
467 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
468 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
469 (env->CP0_Status & (1 << CP0St_IE)) &&
470 !(env->CP0_Status & (1 << CP0St_EXL)) &&
471 !(env->CP0_Status & (1 << CP0St_ERL)) &&
472 !(env->hflags & MIPS_HFLAG_DM)) {
474 env->exception_index = EXCP_EXT_INTERRUPT;
479 #elif defined(TARGET_SPARC)
480 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
482 int pil = env->interrupt_index & 15;
483 int type = env->interrupt_index & 0xf0;
485 if (((type == TT_EXTINT) &&
486 (pil == 15 || pil > env->psrpil)) ||
488 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
489 env->exception_index = env->interrupt_index;
491 env->interrupt_index = 0;
492 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
497 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
498 //do_interrupt(0, 0, 0, 0, 0);
499 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
501 #elif defined(TARGET_ARM)
502 if (interrupt_request & CPU_INTERRUPT_FIQ
503 && !(env->uncached_cpsr & CPSR_F)) {
504 env->exception_index = EXCP_FIQ;
508 /* ARMv7-M interrupt return works by loading a magic value
509 into the PC. On real hardware the load causes the
510 return to occur. The qemu implementation performs the
511 jump normally, then does the exception return when the
512 CPU tries to execute code at the magic address.
513 This will cause the magic PC value to be pushed to
514 the stack if an interrupt occured at the wrong time.
515 We avoid this by disabling interrupts when
516 pc contains a magic address. */
517 if (interrupt_request & CPU_INTERRUPT_HARD
518 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
519 || !(env->uncached_cpsr & CPSR_I))) {
520 env->exception_index = EXCP_IRQ;
524 #elif defined(TARGET_SH4)
525 if (interrupt_request & CPU_INTERRUPT_HARD) {
529 #elif defined(TARGET_ALPHA)
530 if (interrupt_request & CPU_INTERRUPT_HARD) {
534 #elif defined(TARGET_CRIS)
535 if (interrupt_request & CPU_INTERRUPT_HARD
536 && (env->pregs[PR_CCS] & I_FLAG)) {
537 env->exception_index = EXCP_IRQ;
541 if (interrupt_request & CPU_INTERRUPT_NMI
542 && (env->pregs[PR_CCS] & M_FLAG)) {
543 env->exception_index = EXCP_NMI;
547 #elif defined(TARGET_M68K)
548 if (interrupt_request & CPU_INTERRUPT_HARD
549 && ((env->sr & SR_I) >> SR_I_SHIFT)
550 < env->pending_level) {
551 /* Real hardware gets the interrupt vector via an
552 IACK cycle at this point. Current emulated
553 hardware doesn't rely on this, so we
554 provide/save the vector when the interrupt is
556 env->exception_index = env->pending_vector;
561 /* Don't use the cached interupt_request value,
562 do_interrupt may have updated the EXITTB flag. */
563 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
564 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
565 /* ensure that no TB jump will be modified as
566 the program flow was changed */
569 if (interrupt_request & CPU_INTERRUPT_EXIT) {
570 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
571 env->exception_index = EXCP_INTERRUPT;
576 if ((loglevel & CPU_LOG_TB_CPU)) {
577 /* restore flags in standard format */
579 #if defined(TARGET_I386)
580 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
581 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
582 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
583 #elif defined(TARGET_ARM)
584 cpu_dump_state(env, logfile, fprintf, 0);
585 #elif defined(TARGET_SPARC)
586 cpu_dump_state(env, logfile, fprintf, 0);
587 #elif defined(TARGET_PPC)
588 cpu_dump_state(env, logfile, fprintf, 0);
589 #elif defined(TARGET_M68K)
590 cpu_m68k_flush_flags(env, env->cc_op);
591 env->cc_op = CC_OP_FLAGS;
592 env->sr = (env->sr & 0xffe0)
593 | env->cc_dest | (env->cc_x << 4);
594 cpu_dump_state(env, logfile, fprintf, 0);
595 #elif defined(TARGET_MIPS)
596 cpu_dump_state(env, logfile, fprintf, 0);
597 #elif defined(TARGET_SH4)
598 cpu_dump_state(env, logfile, fprintf, 0);
599 #elif defined(TARGET_ALPHA)
600 cpu_dump_state(env, logfile, fprintf, 0);
601 #elif defined(TARGET_CRIS)
602 cpu_dump_state(env, logfile, fprintf, 0);
604 #error unsupported target CPU
610 /* Note: we do it here to avoid a gcc bug on Mac OS X when
611 doing it in tb_find_slow */
612 if (tb_invalidated_flag) {
613 /* as some TB could have been invalidated because
614 of memory exceptions while generating the code, we
615 must recompute the hash index here */
617 tb_invalidated_flag = 0;
620 if ((loglevel & CPU_LOG_EXEC)) {
621 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
622 (long)tb->tc_ptr, tb->pc,
623 lookup_symbol(tb->pc));
626 /* see if we can patch the calling TB. When the TB
627 spans two pages, we cannot safely do a direct
632 (env->kqemu_enabled != 2) &&
634 tb->page_addr[1] == -1) {
635 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
638 spin_unlock(&tb_lock);
639 env->current_tb = tb;
641 /* cpu_interrupt might be called while translating the
642 TB, but before it is linked into a potentially
643 infinite loop and becomes env->current_tb. Avoid
644 starting execution if there is a pending interrupt. */
645 if (unlikely (env->interrupt_request & CPU_INTERRUPT_EXIT))
646 env->current_tb = NULL;
648 while (env->current_tb) {
650 /* execute the generated code */
651 #if defined(__sparc__) && !defined(HOST_SOLARIS)
653 env = cpu_single_env;
654 #define env cpu_single_env
656 next_tb = tcg_qemu_tb_exec(tc_ptr);
657 env->current_tb = NULL;
658 if ((next_tb & 3) == 2) {
659 /* Instruction counter expired. */
661 tb = (TranslationBlock *)(long)(next_tb & ~3);
663 CPU_PC_FROM_TB(env, tb);
664 insns_left = env->icount_decr.u32;
665 if (env->icount_extra && insns_left >= 0) {
666 /* Refill decrementer and continue execution. */
667 env->icount_extra += insns_left;
668 if (env->icount_extra > 0xffff) {
671 insns_left = env->icount_extra;
673 env->icount_extra -= insns_left;
674 env->icount_decr.u16.low = insns_left;
676 if (insns_left > 0) {
677 /* Execute remaining instructions. */
678 cpu_exec_nocache(insns_left, tb);
680 env->exception_index = EXCP_INTERRUPT;
686 /* reset soft MMU for next block (it can currently
687 only be set by a memory fault) */
688 #if defined(USE_KQEMU)
689 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
690 if (kqemu_is_ok(env) &&
691 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
702 #if defined(TARGET_I386)
703 /* restore flags in standard format */
704 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
705 #elif defined(TARGET_ARM)
706 /* XXX: Save/restore host fpu exception state?. */
707 #elif defined(TARGET_SPARC)
708 #elif defined(TARGET_PPC)
709 #elif defined(TARGET_M68K)
710 cpu_m68k_flush_flags(env, env->cc_op);
711 env->cc_op = CC_OP_FLAGS;
712 env->sr = (env->sr & 0xffe0)
713 | env->cc_dest | (env->cc_x << 4);
714 #elif defined(TARGET_MIPS)
715 #elif defined(TARGET_SH4)
716 #elif defined(TARGET_ALPHA)
717 #elif defined(TARGET_CRIS)
720 #error unsupported target CPU
723 /* restore global registers */
724 #include "hostregs_helper.h"
726 /* fail safe : never use cpu_single_env outside cpu_exec() */
727 cpu_single_env = NULL;
731 /* must only be called from the generated code as an exception can be
733 void tb_invalidate_page_range(target_ulong start, target_ulong end)
735 /* XXX: cannot enable it yet because it yields to MMU exception
736 where NIP != read address on PowerPC */
738 target_ulong phys_addr;
739 phys_addr = get_phys_addr_code(env, start);
740 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
744 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
746 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
748 CPUX86State *saved_env;
752 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
754 cpu_x86_load_seg_cache(env, seg_reg, selector,
755 (selector << 4), 0xffff, 0);
757 helper_load_seg(seg_reg, selector);
762 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
764 CPUX86State *saved_env;
769 helper_fsave(ptr, data32);
774 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
776 CPUX86State *saved_env;
781 helper_frstor(ptr, data32);
786 #endif /* TARGET_I386 */
788 #if !defined(CONFIG_SOFTMMU)
790 #if defined(TARGET_I386)
792 /* 'pc' is the host PC at which the exception was raised. 'address' is
793 the effective address of the memory exception. 'is_write' is 1 if a
794 write caused the exception and otherwise 0'. 'old_set' is the
795 signal set which should be restored */
796 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
797 int is_write, sigset_t *old_set,
800 TranslationBlock *tb;
804 env = cpu_single_env; /* XXX: find a correct solution for multithread */
805 #if defined(DEBUG_SIGNAL)
806 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
807 pc, address, is_write, *(unsigned long *)old_set);
809 /* XXX: locking issue */
810 if (is_write && page_unprotect(h2g(address), pc, puc)) {
814 /* see if it is an MMU fault */
815 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
817 return 0; /* not an MMU fault */
819 return 1; /* the MMU fault was handled without causing real CPU fault */
820 /* now we have a real cpu fault */
823 /* the PC is inside the translated code. It means that we have
824 a virtual CPU fault */
825 cpu_restore_state(tb, env, pc, puc);
829 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
830 env->eip, env->cr[2], env->error_code);
832 /* we restore the process signal mask as the sigreturn should
833 do it (XXX: use sigsetjmp) */
834 sigprocmask(SIG_SETMASK, old_set, NULL);
835 raise_exception_err(env->exception_index, env->error_code);
837 /* activate soft MMU for this block */
838 env->hflags |= HF_SOFTMMU_MASK;
839 cpu_resume_from_signal(env, puc);
841 /* never comes here */
845 #elif defined(TARGET_ARM)
846 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
847 int is_write, sigset_t *old_set,
850 TranslationBlock *tb;
854 env = cpu_single_env; /* XXX: find a correct solution for multithread */
855 #if defined(DEBUG_SIGNAL)
856 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
857 pc, address, is_write, *(unsigned long *)old_set);
859 /* XXX: locking issue */
860 if (is_write && page_unprotect(h2g(address), pc, puc)) {
863 /* see if it is an MMU fault */
864 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
866 return 0; /* not an MMU fault */
868 return 1; /* the MMU fault was handled without causing real CPU fault */
869 /* now we have a real cpu fault */
872 /* the PC is inside the translated code. It means that we have
873 a virtual CPU fault */
874 cpu_restore_state(tb, env, pc, puc);
876 /* we restore the process signal mask as the sigreturn should
877 do it (XXX: use sigsetjmp) */
878 sigprocmask(SIG_SETMASK, old_set, NULL);
880 /* never comes here */
883 #elif defined(TARGET_SPARC)
884 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
885 int is_write, sigset_t *old_set,
888 TranslationBlock *tb;
892 env = cpu_single_env; /* XXX: find a correct solution for multithread */
893 #if defined(DEBUG_SIGNAL)
894 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
895 pc, address, is_write, *(unsigned long *)old_set);
897 /* XXX: locking issue */
898 if (is_write && page_unprotect(h2g(address), pc, puc)) {
901 /* see if it is an MMU fault */
902 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
904 return 0; /* not an MMU fault */
906 return 1; /* the MMU fault was handled without causing real CPU fault */
907 /* now we have a real cpu fault */
910 /* the PC is inside the translated code. It means that we have
911 a virtual CPU fault */
912 cpu_restore_state(tb, env, pc, puc);
914 /* we restore the process signal mask as the sigreturn should
915 do it (XXX: use sigsetjmp) */
916 sigprocmask(SIG_SETMASK, old_set, NULL);
918 /* never comes here */
921 #elif defined (TARGET_PPC)
922 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
923 int is_write, sigset_t *old_set,
926 TranslationBlock *tb;
930 env = cpu_single_env; /* XXX: find a correct solution for multithread */
931 #if defined(DEBUG_SIGNAL)
932 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
933 pc, address, is_write, *(unsigned long *)old_set);
935 /* XXX: locking issue */
936 if (is_write && page_unprotect(h2g(address), pc, puc)) {
940 /* see if it is an MMU fault */
941 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
943 return 0; /* not an MMU fault */
945 return 1; /* the MMU fault was handled without causing real CPU fault */
947 /* now we have a real cpu fault */
950 /* the PC is inside the translated code. It means that we have
951 a virtual CPU fault */
952 cpu_restore_state(tb, env, pc, puc);
956 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
957 env->nip, env->error_code, tb);
959 /* we restore the process signal mask as the sigreturn should
960 do it (XXX: use sigsetjmp) */
961 sigprocmask(SIG_SETMASK, old_set, NULL);
962 do_raise_exception_err(env->exception_index, env->error_code);
964 /* activate soft MMU for this block */
965 cpu_resume_from_signal(env, puc);
967 /* never comes here */
971 #elif defined(TARGET_M68K)
972 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
973 int is_write, sigset_t *old_set,
976 TranslationBlock *tb;
980 env = cpu_single_env; /* XXX: find a correct solution for multithread */
981 #if defined(DEBUG_SIGNAL)
982 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
983 pc, address, is_write, *(unsigned long *)old_set);
985 /* XXX: locking issue */
986 if (is_write && page_unprotect(address, pc, puc)) {
989 /* see if it is an MMU fault */
990 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
992 return 0; /* not an MMU fault */
994 return 1; /* the MMU fault was handled without causing real CPU fault */
995 /* now we have a real cpu fault */
998 /* the PC is inside the translated code. It means that we have
999 a virtual CPU fault */
1000 cpu_restore_state(tb, env, pc, puc);
1002 /* we restore the process signal mask as the sigreturn should
1003 do it (XXX: use sigsetjmp) */
1004 sigprocmask(SIG_SETMASK, old_set, NULL);
1006 /* never comes here */
1010 #elif defined (TARGET_MIPS)
1011 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1012 int is_write, sigset_t *old_set,
1015 TranslationBlock *tb;
1019 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1020 #if defined(DEBUG_SIGNAL)
1021 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1022 pc, address, is_write, *(unsigned long *)old_set);
1024 /* XXX: locking issue */
1025 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1029 /* see if it is an MMU fault */
1030 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1032 return 0; /* not an MMU fault */
1034 return 1; /* the MMU fault was handled without causing real CPU fault */
1036 /* now we have a real cpu fault */
1037 tb = tb_find_pc(pc);
1039 /* the PC is inside the translated code. It means that we have
1040 a virtual CPU fault */
1041 cpu_restore_state(tb, env, pc, puc);
1045 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1046 env->PC, env->error_code, tb);
1048 /* we restore the process signal mask as the sigreturn should
1049 do it (XXX: use sigsetjmp) */
1050 sigprocmask(SIG_SETMASK, old_set, NULL);
1051 do_raise_exception_err(env->exception_index, env->error_code);
1053 /* activate soft MMU for this block */
1054 cpu_resume_from_signal(env, puc);
1056 /* never comes here */
1060 #elif defined (TARGET_SH4)
1061 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1062 int is_write, sigset_t *old_set,
1065 TranslationBlock *tb;
1069 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1070 #if defined(DEBUG_SIGNAL)
1071 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1072 pc, address, is_write, *(unsigned long *)old_set);
1074 /* XXX: locking issue */
1075 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1079 /* see if it is an MMU fault */
1080 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1082 return 0; /* not an MMU fault */
1084 return 1; /* the MMU fault was handled without causing real CPU fault */
1086 /* now we have a real cpu fault */
1087 tb = tb_find_pc(pc);
1089 /* the PC is inside the translated code. It means that we have
1090 a virtual CPU fault */
1091 cpu_restore_state(tb, env, pc, puc);
1094 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1095 env->nip, env->error_code, tb);
1097 /* we restore the process signal mask as the sigreturn should
1098 do it (XXX: use sigsetjmp) */
1099 sigprocmask(SIG_SETMASK, old_set, NULL);
1101 /* never comes here */
1105 #elif defined (TARGET_ALPHA)
1106 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1107 int is_write, sigset_t *old_set,
1110 TranslationBlock *tb;
1114 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1115 #if defined(DEBUG_SIGNAL)
1116 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1117 pc, address, is_write, *(unsigned long *)old_set);
1119 /* XXX: locking issue */
1120 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1124 /* see if it is an MMU fault */
1125 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1127 return 0; /* not an MMU fault */
1129 return 1; /* the MMU fault was handled without causing real CPU fault */
1131 /* now we have a real cpu fault */
1132 tb = tb_find_pc(pc);
1134 /* the PC is inside the translated code. It means that we have
1135 a virtual CPU fault */
1136 cpu_restore_state(tb, env, pc, puc);
1139 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1140 env->nip, env->error_code, tb);
1142 /* we restore the process signal mask as the sigreturn should
1143 do it (XXX: use sigsetjmp) */
1144 sigprocmask(SIG_SETMASK, old_set, NULL);
1146 /* never comes here */
1149 #elif defined (TARGET_CRIS)
1150 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1151 int is_write, sigset_t *old_set,
1154 TranslationBlock *tb;
1158 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1159 #if defined(DEBUG_SIGNAL)
1160 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1161 pc, address, is_write, *(unsigned long *)old_set);
1163 /* XXX: locking issue */
1164 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1168 /* see if it is an MMU fault */
1169 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1171 return 0; /* not an MMU fault */
1173 return 1; /* the MMU fault was handled without causing real CPU fault */
1175 /* now we have a real cpu fault */
1176 tb = tb_find_pc(pc);
1178 /* the PC is inside the translated code. It means that we have
1179 a virtual CPU fault */
1180 cpu_restore_state(tb, env, pc, puc);
1182 /* we restore the process signal mask as the sigreturn should
1183 do it (XXX: use sigsetjmp) */
1184 sigprocmask(SIG_SETMASK, old_set, NULL);
1186 /* never comes here */
1191 #error unsupported target CPU
1194 #if defined(__i386__)
1196 #if defined(__APPLE__)
1197 # include <sys/ucontext.h>
1199 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1200 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1201 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1203 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1204 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1205 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1208 int cpu_signal_handler(int host_signum, void *pinfo,
1211 siginfo_t *info = pinfo;
1212 struct ucontext *uc = puc;
1220 #define REG_TRAPNO TRAPNO
1223 trapno = TRAP_sig(uc);
1224 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1226 (ERROR_sig(uc) >> 1) & 1 : 0,
1227 &uc->uc_sigmask, puc);
1230 #elif defined(__x86_64__)
1232 int cpu_signal_handler(int host_signum, void *pinfo,
1235 siginfo_t *info = pinfo;
1236 struct ucontext *uc = puc;
1239 pc = uc->uc_mcontext.gregs[REG_RIP];
1240 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1241 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1242 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1243 &uc->uc_sigmask, puc);
1246 #elif defined(__powerpc__)
1248 /***********************************************************************
1249 * signal context platform-specific definitions
1253 /* All Registers access - only for local access */
1254 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1255 /* Gpr Registers access */
1256 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1257 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1258 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1259 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1260 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1261 # define LR_sig(context) REG_sig(link, context) /* Link register */
1262 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1263 /* Float Registers access */
1264 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1265 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1266 /* Exception Registers access */
1267 # define DAR_sig(context) REG_sig(dar, context)
1268 # define DSISR_sig(context) REG_sig(dsisr, context)
1269 # define TRAP_sig(context) REG_sig(trap, context)
1273 # include <sys/ucontext.h>
1274 typedef struct ucontext SIGCONTEXT;
1275 /* All Registers access - only for local access */
1276 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1277 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1278 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1279 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1280 /* Gpr Registers access */
1281 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1282 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1283 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1284 # define CTR_sig(context) REG_sig(ctr, context)
1285 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1286 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1287 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1288 /* Float Registers access */
1289 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1290 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1291 /* Exception Registers access */
1292 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1293 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1294 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1295 #endif /* __APPLE__ */
1297 int cpu_signal_handler(int host_signum, void *pinfo,
1300 siginfo_t *info = pinfo;
1301 struct ucontext *uc = puc;
1309 if (DSISR_sig(uc) & 0x00800000)
1312 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1315 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1316 is_write, &uc->uc_sigmask, puc);
1319 #elif defined(__alpha__)
1321 int cpu_signal_handler(int host_signum, void *pinfo,
1324 siginfo_t *info = pinfo;
1325 struct ucontext *uc = puc;
1326 uint32_t *pc = uc->uc_mcontext.sc_pc;
1327 uint32_t insn = *pc;
1330 /* XXX: need kernel patch to get write flag faster */
1331 switch (insn >> 26) {
1346 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1347 is_write, &uc->uc_sigmask, puc);
1349 #elif defined(__sparc__)
1351 int cpu_signal_handler(int host_signum, void *pinfo,
1354 siginfo_t *info = pinfo;
1357 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1358 uint32_t *regs = (uint32_t *)(info + 1);
1359 void *sigmask = (regs + 20);
1360 /* XXX: is there a standard glibc define ? */
1361 unsigned long pc = regs[1];
1364 struct sigcontext *sc = puc;
1365 unsigned long pc = sc->sigc_regs.tpc;
1366 void *sigmask = (void *)sc->sigc_mask;
1367 #elif defined(__OpenBSD__)
1368 struct sigcontext *uc = puc;
1369 unsigned long pc = uc->sc_pc;
1370 void *sigmask = (void *)(long)uc->sc_mask;
1374 /* XXX: need kernel patch to get write flag faster */
1376 insn = *(uint32_t *)pc;
1377 if ((insn >> 30) == 3) {
1378 switch((insn >> 19) & 0x3f) {
1390 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1391 is_write, sigmask, NULL);
1394 #elif defined(__arm__)
1396 int cpu_signal_handler(int host_signum, void *pinfo,
1399 siginfo_t *info = pinfo;
1400 struct ucontext *uc = puc;
1404 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1405 pc = uc->uc_mcontext.gregs[R15];
1407 pc = uc->uc_mcontext.arm_pc;
1409 /* XXX: compute is_write */
1411 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1413 &uc->uc_sigmask, puc);
1416 #elif defined(__mc68000)
1418 int cpu_signal_handler(int host_signum, void *pinfo,
1421 siginfo_t *info = pinfo;
1422 struct ucontext *uc = puc;
1426 pc = uc->uc_mcontext.gregs[16];
1427 /* XXX: compute is_write */
1429 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1431 &uc->uc_sigmask, puc);
1434 #elif defined(__ia64)
1437 /* This ought to be in <bits/siginfo.h>... */
1438 # define __ISR_VALID 1
1441 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1443 siginfo_t *info = pinfo;
1444 struct ucontext *uc = puc;
1448 ip = uc->uc_mcontext.sc_ip;
1449 switch (host_signum) {
1455 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1456 /* ISR.W (write-access) is bit 33: */
1457 is_write = (info->si_isr >> 33) & 1;
1463 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1465 &uc->uc_sigmask, puc);
1468 #elif defined(__s390__)
1470 int cpu_signal_handler(int host_signum, void *pinfo,
1473 siginfo_t *info = pinfo;
1474 struct ucontext *uc = puc;
1478 pc = uc->uc_mcontext.psw.addr;
1479 /* XXX: compute is_write */
1481 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1482 is_write, &uc->uc_sigmask, puc);
1485 #elif defined(__mips__)
1487 int cpu_signal_handler(int host_signum, void *pinfo,
1490 siginfo_t *info = pinfo;
1491 struct ucontext *uc = puc;
1492 greg_t pc = uc->uc_mcontext.pc;
1495 /* XXX: compute is_write */
1497 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1498 is_write, &uc->uc_sigmask, puc);
1501 #elif defined(__hppa__)
1503 int cpu_signal_handler(int host_signum, void *pinfo,
1506 struct siginfo *info = pinfo;
1507 struct ucontext *uc = puc;
1511 pc = uc->uc_mcontext.sc_iaoq[0];
1512 /* FIXME: compute is_write */
1514 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1516 &uc->uc_sigmask, puc);
1521 #error host CPU specific signal handler needed
1525 #endif /* !defined(CONFIG_SOFTMMU) */