2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #define CPU_NO_GLOBAL_REGS
27 #if !defined(CONFIG_SOFTMMU)
39 #include <sys/ucontext.h>
43 #if defined(__sparc__) && !defined(HOST_SOLARIS)
44 // Work around ugly bugs in glibc that mangle global register contents
46 #define env cpu_single_env
49 int tb_invalidated_flag;
52 //#define DEBUG_SIGNAL
54 void cpu_loop_exit(void)
56 /* NOTE: the register at this point must be saved by hand because
57 longjmp restore them */
59 longjmp(env->jmp_env, 1);
62 /* exit the current TB from a signal handler. The host registers are
63 restored in a state compatible with the CPU emulator
65 void cpu_resume_from_signal(CPUState *env1, void *puc)
67 #if !defined(CONFIG_SOFTMMU)
69 struct ucontext *uc = puc;
70 #elif defined(__OpenBSD__)
71 struct sigcontext *uc = puc;
77 /* XXX: restore cpu registers saved in host registers */
79 #if !defined(CONFIG_SOFTMMU)
81 /* XXX: use siglongjmp ? */
83 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
84 #elif defined(__OpenBSD__)
85 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
89 longjmp(env->jmp_env, 1);
92 /* Execute the code without caching the generated code. An interpreter
93 could be used if available. */
94 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
96 unsigned long next_tb;
99 /* Should never happen.
100 We only end up here when an existing TB is too long. */
101 if (max_cycles > CF_COUNT_MASK)
102 max_cycles = CF_COUNT_MASK;
104 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
106 env->current_tb = tb;
107 /* execute the generated code */
108 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
110 if ((next_tb & 3) == 2) {
111 /* Restore PC. This may happen if async event occurs before
112 the TB starts executing. */
113 CPU_PC_FROM_TB(env, tb);
115 tb_phys_invalidate(tb, -1);
119 static TranslationBlock *tb_find_slow(target_ulong pc,
120 target_ulong cs_base,
123 TranslationBlock *tb, **ptb1;
125 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
127 tb_invalidated_flag = 0;
129 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
131 /* find translated block using physical mappings */
132 phys_pc = get_phys_addr_code(env, pc);
133 phys_page1 = phys_pc & TARGET_PAGE_MASK;
135 h = tb_phys_hash_func(phys_pc);
136 ptb1 = &tb_phys_hash[h];
142 tb->page_addr[0] == phys_page1 &&
143 tb->cs_base == cs_base &&
144 tb->flags == flags) {
145 /* check next page if needed */
146 if (tb->page_addr[1] != -1) {
147 virt_page2 = (pc & TARGET_PAGE_MASK) +
149 phys_page2 = get_phys_addr_code(env, virt_page2);
150 if (tb->page_addr[1] == phys_page2)
156 ptb1 = &tb->phys_hash_next;
159 /* if no translated code available, then translate it now */
160 tb = tb_gen_code(env, pc, cs_base, flags, 0);
163 /* we add the TB in the virtual pc hash table */
164 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
168 static inline TranslationBlock *tb_find_fast(void)
170 TranslationBlock *tb;
171 target_ulong cs_base, pc;
174 /* we record a subset of the CPU state. It will
175 always be the same before a given translated block
177 #if defined(TARGET_I386)
179 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
180 cs_base = env->segs[R_CS].base;
181 pc = cs_base + env->eip;
182 #elif defined(TARGET_ARM)
183 flags = env->thumb | (env->vfp.vec_len << 1)
184 | (env->vfp.vec_stride << 4);
185 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
187 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
189 flags |= (env->condexec_bits << 8);
192 #elif defined(TARGET_SPARC)
193 #ifdef TARGET_SPARC64
194 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
195 flags = ((env->pstate & PS_AM) << 2)
196 | (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
197 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
199 // FPU enable . Supervisor
200 flags = (env->psref << 4) | env->psrs;
204 #elif defined(TARGET_PPC)
208 #elif defined(TARGET_MIPS)
209 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
211 pc = env->active_tc.PC;
212 #elif defined(TARGET_M68K)
213 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
214 | (env->sr & SR_S) /* Bit 13 */
215 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
218 #elif defined(TARGET_SH4)
219 flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL
220 | DELAY_SLOT_TRUE | DELAY_SLOT_CLEARME)) /* Bits 0- 3 */
221 | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
222 | (env->sr & (SR_MD | SR_RB)); /* Bits 29-30 */
225 #elif defined(TARGET_ALPHA)
229 #elif defined(TARGET_CRIS)
230 flags = env->pregs[PR_CCS] & (S_FLAG | P_FLAG | U_FLAG | X_FLAG);
235 #error unsupported CPU
237 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
238 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
239 tb->flags != flags)) {
240 tb = tb_find_slow(pc, cs_base, flags);
245 /* main execution loop */
247 int cpu_exec(CPUState *env1)
249 #define DECLARE_HOST_REGS 1
250 #include "hostregs_helper.h"
251 int ret, interrupt_request;
252 TranslationBlock *tb;
254 unsigned long next_tb;
256 if (cpu_halted(env1) == EXCP_HALTED)
259 cpu_single_env = env1;
261 /* first we save global registers */
262 #define SAVE_HOST_REGS 1
263 #include "hostregs_helper.h"
267 #if defined(TARGET_I386)
268 /* put eflags in CPU temporary format */
269 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
270 DF = 1 - (2 * ((env->eflags >> 10) & 1));
271 CC_OP = CC_OP_EFLAGS;
272 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
273 #elif defined(TARGET_SPARC)
274 #elif defined(TARGET_M68K)
275 env->cc_op = CC_OP_FLAGS;
276 env->cc_dest = env->sr & 0xf;
277 env->cc_x = (env->sr >> 4) & 1;
278 #elif defined(TARGET_ALPHA)
279 #elif defined(TARGET_ARM)
280 #elif defined(TARGET_PPC)
281 #elif defined(TARGET_MIPS)
282 #elif defined(TARGET_SH4)
283 #elif defined(TARGET_CRIS)
286 #error unsupported target CPU
288 env->exception_index = -1;
290 /* prepare setjmp context for exception handling */
292 if (setjmp(env->jmp_env) == 0) {
293 env->current_tb = NULL;
294 /* if an exception is pending, we execute it here */
295 if (env->exception_index >= 0) {
296 if (env->exception_index >= EXCP_INTERRUPT) {
297 /* exit request from the cpu execution loop */
298 ret = env->exception_index;
300 } else if (env->user_mode_only) {
301 /* if user mode only, we simulate a fake exception
302 which will be handled outside the cpu execution
304 #if defined(TARGET_I386)
305 do_interrupt_user(env->exception_index,
306 env->exception_is_int,
308 env->exception_next_eip);
309 /* successfully delivered */
310 env->old_exception = -1;
312 ret = env->exception_index;
315 #if defined(TARGET_I386)
316 /* simulate a real cpu exception. On i386, it can
317 trigger new exceptions, but we do not handle
318 double or triple faults yet. */
319 do_interrupt(env->exception_index,
320 env->exception_is_int,
322 env->exception_next_eip, 0);
323 /* successfully delivered */
324 env->old_exception = -1;
325 #elif defined(TARGET_PPC)
327 #elif defined(TARGET_MIPS)
329 #elif defined(TARGET_SPARC)
331 #elif defined(TARGET_ARM)
333 #elif defined(TARGET_SH4)
335 #elif defined(TARGET_ALPHA)
337 #elif defined(TARGET_CRIS)
339 #elif defined(TARGET_M68K)
343 env->exception_index = -1;
346 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
348 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
349 ret = kqemu_cpu_exec(env);
350 /* put eflags in CPU temporary format */
351 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
352 DF = 1 - (2 * ((env->eflags >> 10) & 1));
353 CC_OP = CC_OP_EFLAGS;
354 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
357 longjmp(env->jmp_env, 1);
358 } else if (ret == 2) {
359 /* softmmu execution needed */
361 if (env->interrupt_request != 0) {
362 /* hardware interrupt will be executed just after */
364 /* otherwise, we restart */
365 longjmp(env->jmp_env, 1);
373 ret = kvm_cpu_exec(env);
374 if ((env->interrupt_request & CPU_INTERRUPT_EXIT)) {
375 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
376 env->exception_index = EXCP_INTERRUPT;
378 } else if (env->halted) {
381 longjmp(env->jmp_env, 1);
384 next_tb = 0; /* force lookup of first TB */
386 interrupt_request = env->interrupt_request;
387 if (unlikely(interrupt_request) &&
388 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
389 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
390 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
391 env->exception_index = EXCP_DEBUG;
394 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
395 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
396 if (interrupt_request & CPU_INTERRUPT_HALT) {
397 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
399 env->exception_index = EXCP_HLT;
403 #if defined(TARGET_I386)
404 if (env->hflags2 & HF2_GIF_MASK) {
405 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
406 !(env->hflags & HF_SMM_MASK)) {
407 svm_check_intercept(SVM_EXIT_SMI);
408 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
411 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
412 !(env->hflags2 & HF2_NMI_MASK)) {
413 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
414 env->hflags2 |= HF2_NMI_MASK;
415 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
417 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
418 (((env->hflags2 & HF2_VINTR_MASK) &&
419 (env->hflags2 & HF2_HIF_MASK)) ||
420 (!(env->hflags2 & HF2_VINTR_MASK) &&
421 (env->eflags & IF_MASK &&
422 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
424 svm_check_intercept(SVM_EXIT_INTR);
425 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
426 intno = cpu_get_pic_interrupt(env);
427 if (loglevel & CPU_LOG_TB_IN_ASM) {
428 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
430 do_interrupt(intno, 0, 0, 0, 1);
431 /* ensure that no TB jump will be modified as
432 the program flow was changed */
434 #if !defined(CONFIG_USER_ONLY)
435 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
436 (env->eflags & IF_MASK) &&
437 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
439 /* FIXME: this should respect TPR */
440 svm_check_intercept(SVM_EXIT_VINTR);
441 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
442 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
443 if (loglevel & CPU_LOG_TB_IN_ASM)
444 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
445 do_interrupt(intno, 0, 0, 0, 1);
450 #elif defined(TARGET_PPC)
452 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
456 if (interrupt_request & CPU_INTERRUPT_HARD) {
457 ppc_hw_interrupt(env);
458 if (env->pending_interrupts == 0)
459 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
462 #elif defined(TARGET_MIPS)
463 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
464 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
465 (env->CP0_Status & (1 << CP0St_IE)) &&
466 !(env->CP0_Status & (1 << CP0St_EXL)) &&
467 !(env->CP0_Status & (1 << CP0St_ERL)) &&
468 !(env->hflags & MIPS_HFLAG_DM)) {
470 env->exception_index = EXCP_EXT_INTERRUPT;
475 #elif defined(TARGET_SPARC)
476 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
478 int pil = env->interrupt_index & 15;
479 int type = env->interrupt_index & 0xf0;
481 if (((type == TT_EXTINT) &&
482 (pil == 15 || pil > env->psrpil)) ||
484 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
485 env->exception_index = env->interrupt_index;
487 env->interrupt_index = 0;
488 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
493 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
494 //do_interrupt(0, 0, 0, 0, 0);
495 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
497 #elif defined(TARGET_ARM)
498 if (interrupt_request & CPU_INTERRUPT_FIQ
499 && !(env->uncached_cpsr & CPSR_F)) {
500 env->exception_index = EXCP_FIQ;
504 /* ARMv7-M interrupt return works by loading a magic value
505 into the PC. On real hardware the load causes the
506 return to occur. The qemu implementation performs the
507 jump normally, then does the exception return when the
508 CPU tries to execute code at the magic address.
509 This will cause the magic PC value to be pushed to
510 the stack if an interrupt occured at the wrong time.
511 We avoid this by disabling interrupts when
512 pc contains a magic address. */
513 if (interrupt_request & CPU_INTERRUPT_HARD
514 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
515 || !(env->uncached_cpsr & CPSR_I))) {
516 env->exception_index = EXCP_IRQ;
520 #elif defined(TARGET_SH4)
521 if (interrupt_request & CPU_INTERRUPT_HARD) {
525 #elif defined(TARGET_ALPHA)
526 if (interrupt_request & CPU_INTERRUPT_HARD) {
530 #elif defined(TARGET_CRIS)
531 if (interrupt_request & CPU_INTERRUPT_HARD
532 && (env->pregs[PR_CCS] & I_FLAG)) {
533 env->exception_index = EXCP_IRQ;
537 if (interrupt_request & CPU_INTERRUPT_NMI
538 && (env->pregs[PR_CCS] & M_FLAG)) {
539 env->exception_index = EXCP_NMI;
543 #elif defined(TARGET_M68K)
544 if (interrupt_request & CPU_INTERRUPT_HARD
545 && ((env->sr & SR_I) >> SR_I_SHIFT)
546 < env->pending_level) {
547 /* Real hardware gets the interrupt vector via an
548 IACK cycle at this point. Current emulated
549 hardware doesn't rely on this, so we
550 provide/save the vector when the interrupt is
552 env->exception_index = env->pending_vector;
557 /* Don't use the cached interupt_request value,
558 do_interrupt may have updated the EXITTB flag. */
559 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
560 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
561 /* ensure that no TB jump will be modified as
562 the program flow was changed */
565 if (interrupt_request & CPU_INTERRUPT_EXIT) {
566 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
567 env->exception_index = EXCP_INTERRUPT;
572 if ((loglevel & CPU_LOG_TB_CPU)) {
573 /* restore flags in standard format */
575 #if defined(TARGET_I386)
576 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
577 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
578 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
579 #elif defined(TARGET_ARM)
580 cpu_dump_state(env, logfile, fprintf, 0);
581 #elif defined(TARGET_SPARC)
582 cpu_dump_state(env, logfile, fprintf, 0);
583 #elif defined(TARGET_PPC)
584 cpu_dump_state(env, logfile, fprintf, 0);
585 #elif defined(TARGET_M68K)
586 cpu_m68k_flush_flags(env, env->cc_op);
587 env->cc_op = CC_OP_FLAGS;
588 env->sr = (env->sr & 0xffe0)
589 | env->cc_dest | (env->cc_x << 4);
590 cpu_dump_state(env, logfile, fprintf, 0);
591 #elif defined(TARGET_MIPS)
592 cpu_dump_state(env, logfile, fprintf, 0);
593 #elif defined(TARGET_SH4)
594 cpu_dump_state(env, logfile, fprintf, 0);
595 #elif defined(TARGET_ALPHA)
596 cpu_dump_state(env, logfile, fprintf, 0);
597 #elif defined(TARGET_CRIS)
598 cpu_dump_state(env, logfile, fprintf, 0);
600 #error unsupported target CPU
606 /* Note: we do it here to avoid a gcc bug on Mac OS X when
607 doing it in tb_find_slow */
608 if (tb_invalidated_flag) {
609 /* as some TB could have been invalidated because
610 of memory exceptions while generating the code, we
611 must recompute the hash index here */
613 tb_invalidated_flag = 0;
616 if ((loglevel & CPU_LOG_EXEC)) {
617 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
618 (long)tb->tc_ptr, tb->pc,
619 lookup_symbol(tb->pc));
622 /* see if we can patch the calling TB. When the TB
623 spans two pages, we cannot safely do a direct
628 (env->kqemu_enabled != 2) &&
630 tb->page_addr[1] == -1) {
631 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
634 spin_unlock(&tb_lock);
635 env->current_tb = tb;
637 /* cpu_interrupt might be called while translating the
638 TB, but before it is linked into a potentially
639 infinite loop and becomes env->current_tb. Avoid
640 starting execution if there is a pending interrupt. */
641 if (unlikely (env->interrupt_request & CPU_INTERRUPT_EXIT))
642 env->current_tb = NULL;
644 while (env->current_tb) {
646 /* execute the generated code */
647 #if defined(__sparc__) && !defined(HOST_SOLARIS)
649 env = cpu_single_env;
650 #define env cpu_single_env
652 next_tb = tcg_qemu_tb_exec(tc_ptr);
653 env->current_tb = NULL;
654 if ((next_tb & 3) == 2) {
655 /* Instruction counter expired. */
657 tb = (TranslationBlock *)(long)(next_tb & ~3);
659 CPU_PC_FROM_TB(env, tb);
660 insns_left = env->icount_decr.u32;
661 if (env->icount_extra && insns_left >= 0) {
662 /* Refill decrementer and continue execution. */
663 env->icount_extra += insns_left;
664 if (env->icount_extra > 0xffff) {
667 insns_left = env->icount_extra;
669 env->icount_extra -= insns_left;
670 env->icount_decr.u16.low = insns_left;
672 if (insns_left > 0) {
673 /* Execute remaining instructions. */
674 cpu_exec_nocache(insns_left, tb);
676 env->exception_index = EXCP_INTERRUPT;
682 /* reset soft MMU for next block (it can currently
683 only be set by a memory fault) */
684 #if defined(USE_KQEMU)
685 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
686 if (kqemu_is_ok(env) &&
687 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
698 #if defined(TARGET_I386)
699 /* restore flags in standard format */
700 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
701 #elif defined(TARGET_ARM)
702 /* XXX: Save/restore host fpu exception state?. */
703 #elif defined(TARGET_SPARC)
704 #elif defined(TARGET_PPC)
705 #elif defined(TARGET_M68K)
706 cpu_m68k_flush_flags(env, env->cc_op);
707 env->cc_op = CC_OP_FLAGS;
708 env->sr = (env->sr & 0xffe0)
709 | env->cc_dest | (env->cc_x << 4);
710 #elif defined(TARGET_MIPS)
711 #elif defined(TARGET_SH4)
712 #elif defined(TARGET_ALPHA)
713 #elif defined(TARGET_CRIS)
716 #error unsupported target CPU
719 /* restore global registers */
720 #include "hostregs_helper.h"
722 /* fail safe : never use cpu_single_env outside cpu_exec() */
723 cpu_single_env = NULL;
727 /* must only be called from the generated code as an exception can be
729 void tb_invalidate_page_range(target_ulong start, target_ulong end)
731 /* XXX: cannot enable it yet because it yields to MMU exception
732 where NIP != read address on PowerPC */
734 target_ulong phys_addr;
735 phys_addr = get_phys_addr_code(env, start);
736 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
740 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
742 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
744 CPUX86State *saved_env;
748 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
750 cpu_x86_load_seg_cache(env, seg_reg, selector,
751 (selector << 4), 0xffff, 0);
753 helper_load_seg(seg_reg, selector);
758 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
760 CPUX86State *saved_env;
765 helper_fsave(ptr, data32);
770 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
772 CPUX86State *saved_env;
777 helper_frstor(ptr, data32);
782 #endif /* TARGET_I386 */
784 #if !defined(CONFIG_SOFTMMU)
786 #if defined(TARGET_I386)
788 /* 'pc' is the host PC at which the exception was raised. 'address' is
789 the effective address of the memory exception. 'is_write' is 1 if a
790 write caused the exception and otherwise 0'. 'old_set' is the
791 signal set which should be restored */
792 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
793 int is_write, sigset_t *old_set,
796 TranslationBlock *tb;
800 env = cpu_single_env; /* XXX: find a correct solution for multithread */
801 #if defined(DEBUG_SIGNAL)
802 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
803 pc, address, is_write, *(unsigned long *)old_set);
805 /* XXX: locking issue */
806 if (is_write && page_unprotect(h2g(address), pc, puc)) {
810 /* see if it is an MMU fault */
811 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
813 return 0; /* not an MMU fault */
815 return 1; /* the MMU fault was handled without causing real CPU fault */
816 /* now we have a real cpu fault */
819 /* the PC is inside the translated code. It means that we have
820 a virtual CPU fault */
821 cpu_restore_state(tb, env, pc, puc);
825 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
826 env->eip, env->cr[2], env->error_code);
828 /* we restore the process signal mask as the sigreturn should
829 do it (XXX: use sigsetjmp) */
830 sigprocmask(SIG_SETMASK, old_set, NULL);
831 raise_exception_err(env->exception_index, env->error_code);
833 /* activate soft MMU for this block */
834 env->hflags |= HF_SOFTMMU_MASK;
835 cpu_resume_from_signal(env, puc);
837 /* never comes here */
841 #elif defined(TARGET_ARM)
842 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
843 int is_write, sigset_t *old_set,
846 TranslationBlock *tb;
850 env = cpu_single_env; /* XXX: find a correct solution for multithread */
851 #if defined(DEBUG_SIGNAL)
852 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
853 pc, address, is_write, *(unsigned long *)old_set);
855 /* XXX: locking issue */
856 if (is_write && page_unprotect(h2g(address), pc, puc)) {
859 /* see if it is an MMU fault */
860 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
862 return 0; /* not an MMU fault */
864 return 1; /* the MMU fault was handled without causing real CPU fault */
865 /* now we have a real cpu fault */
868 /* the PC is inside the translated code. It means that we have
869 a virtual CPU fault */
870 cpu_restore_state(tb, env, pc, puc);
872 /* we restore the process signal mask as the sigreturn should
873 do it (XXX: use sigsetjmp) */
874 sigprocmask(SIG_SETMASK, old_set, NULL);
876 /* never comes here */
879 #elif defined(TARGET_SPARC)
880 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
881 int is_write, sigset_t *old_set,
884 TranslationBlock *tb;
888 env = cpu_single_env; /* XXX: find a correct solution for multithread */
889 #if defined(DEBUG_SIGNAL)
890 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
891 pc, address, is_write, *(unsigned long *)old_set);
893 /* XXX: locking issue */
894 if (is_write && page_unprotect(h2g(address), pc, puc)) {
897 /* see if it is an MMU fault */
898 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
900 return 0; /* not an MMU fault */
902 return 1; /* the MMU fault was handled without causing real CPU fault */
903 /* now we have a real cpu fault */
906 /* the PC is inside the translated code. It means that we have
907 a virtual CPU fault */
908 cpu_restore_state(tb, env, pc, puc);
910 /* we restore the process signal mask as the sigreturn should
911 do it (XXX: use sigsetjmp) */
912 sigprocmask(SIG_SETMASK, old_set, NULL);
914 /* never comes here */
917 #elif defined (TARGET_PPC)
918 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
919 int is_write, sigset_t *old_set,
922 TranslationBlock *tb;
926 env = cpu_single_env; /* XXX: find a correct solution for multithread */
927 #if defined(DEBUG_SIGNAL)
928 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
929 pc, address, is_write, *(unsigned long *)old_set);
931 /* XXX: locking issue */
932 if (is_write && page_unprotect(h2g(address), pc, puc)) {
936 /* see if it is an MMU fault */
937 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
939 return 0; /* not an MMU fault */
941 return 1; /* the MMU fault was handled without causing real CPU fault */
943 /* now we have a real cpu fault */
946 /* the PC is inside the translated code. It means that we have
947 a virtual CPU fault */
948 cpu_restore_state(tb, env, pc, puc);
952 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
953 env->nip, env->error_code, tb);
955 /* we restore the process signal mask as the sigreturn should
956 do it (XXX: use sigsetjmp) */
957 sigprocmask(SIG_SETMASK, old_set, NULL);
958 do_raise_exception_err(env->exception_index, env->error_code);
960 /* activate soft MMU for this block */
961 cpu_resume_from_signal(env, puc);
963 /* never comes here */
967 #elif defined(TARGET_M68K)
968 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
969 int is_write, sigset_t *old_set,
972 TranslationBlock *tb;
976 env = cpu_single_env; /* XXX: find a correct solution for multithread */
977 #if defined(DEBUG_SIGNAL)
978 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
979 pc, address, is_write, *(unsigned long *)old_set);
981 /* XXX: locking issue */
982 if (is_write && page_unprotect(address, pc, puc)) {
985 /* see if it is an MMU fault */
986 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
988 return 0; /* not an MMU fault */
990 return 1; /* the MMU fault was handled without causing real CPU fault */
991 /* now we have a real cpu fault */
994 /* the PC is inside the translated code. It means that we have
995 a virtual CPU fault */
996 cpu_restore_state(tb, env, pc, puc);
998 /* we restore the process signal mask as the sigreturn should
999 do it (XXX: use sigsetjmp) */
1000 sigprocmask(SIG_SETMASK, old_set, NULL);
1002 /* never comes here */
1006 #elif defined (TARGET_MIPS)
1007 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1008 int is_write, sigset_t *old_set,
1011 TranslationBlock *tb;
1015 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1016 #if defined(DEBUG_SIGNAL)
1017 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1018 pc, address, is_write, *(unsigned long *)old_set);
1020 /* XXX: locking issue */
1021 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1025 /* see if it is an MMU fault */
1026 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1028 return 0; /* not an MMU fault */
1030 return 1; /* the MMU fault was handled without causing real CPU fault */
1032 /* now we have a real cpu fault */
1033 tb = tb_find_pc(pc);
1035 /* the PC is inside the translated code. It means that we have
1036 a virtual CPU fault */
1037 cpu_restore_state(tb, env, pc, puc);
1041 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1042 env->PC, env->error_code, tb);
1044 /* we restore the process signal mask as the sigreturn should
1045 do it (XXX: use sigsetjmp) */
1046 sigprocmask(SIG_SETMASK, old_set, NULL);
1047 do_raise_exception_err(env->exception_index, env->error_code);
1049 /* activate soft MMU for this block */
1050 cpu_resume_from_signal(env, puc);
1052 /* never comes here */
1056 #elif defined (TARGET_SH4)
1057 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1058 int is_write, sigset_t *old_set,
1061 TranslationBlock *tb;
1065 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1066 #if defined(DEBUG_SIGNAL)
1067 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1068 pc, address, is_write, *(unsigned long *)old_set);
1070 /* XXX: locking issue */
1071 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1075 /* see if it is an MMU fault */
1076 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1078 return 0; /* not an MMU fault */
1080 return 1; /* the MMU fault was handled without causing real CPU fault */
1082 /* now we have a real cpu fault */
1083 tb = tb_find_pc(pc);
1085 /* the PC is inside the translated code. It means that we have
1086 a virtual CPU fault */
1087 cpu_restore_state(tb, env, pc, puc);
1090 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1091 env->nip, env->error_code, tb);
1093 /* we restore the process signal mask as the sigreturn should
1094 do it (XXX: use sigsetjmp) */
1095 sigprocmask(SIG_SETMASK, old_set, NULL);
1097 /* never comes here */
1101 #elif defined (TARGET_ALPHA)
1102 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1103 int is_write, sigset_t *old_set,
1106 TranslationBlock *tb;
1110 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1111 #if defined(DEBUG_SIGNAL)
1112 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1113 pc, address, is_write, *(unsigned long *)old_set);
1115 /* XXX: locking issue */
1116 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1120 /* see if it is an MMU fault */
1121 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1123 return 0; /* not an MMU fault */
1125 return 1; /* the MMU fault was handled without causing real CPU fault */
1127 /* now we have a real cpu fault */
1128 tb = tb_find_pc(pc);
1130 /* the PC is inside the translated code. It means that we have
1131 a virtual CPU fault */
1132 cpu_restore_state(tb, env, pc, puc);
1135 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1136 env->nip, env->error_code, tb);
1138 /* we restore the process signal mask as the sigreturn should
1139 do it (XXX: use sigsetjmp) */
1140 sigprocmask(SIG_SETMASK, old_set, NULL);
1142 /* never comes here */
1145 #elif defined (TARGET_CRIS)
1146 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1147 int is_write, sigset_t *old_set,
1150 TranslationBlock *tb;
1154 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1155 #if defined(DEBUG_SIGNAL)
1156 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1157 pc, address, is_write, *(unsigned long *)old_set);
1159 /* XXX: locking issue */
1160 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1164 /* see if it is an MMU fault */
1165 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1167 return 0; /* not an MMU fault */
1169 return 1; /* the MMU fault was handled without causing real CPU fault */
1171 /* now we have a real cpu fault */
1172 tb = tb_find_pc(pc);
1174 /* the PC is inside the translated code. It means that we have
1175 a virtual CPU fault */
1176 cpu_restore_state(tb, env, pc, puc);
1178 /* we restore the process signal mask as the sigreturn should
1179 do it (XXX: use sigsetjmp) */
1180 sigprocmask(SIG_SETMASK, old_set, NULL);
1182 /* never comes here */
1187 #error unsupported target CPU
1190 #if defined(__i386__)
1192 #if defined(__APPLE__)
1193 # include <sys/ucontext.h>
1195 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1196 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1197 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1199 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1200 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1201 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1204 int cpu_signal_handler(int host_signum, void *pinfo,
1207 siginfo_t *info = pinfo;
1208 struct ucontext *uc = puc;
1216 #define REG_TRAPNO TRAPNO
1219 trapno = TRAP_sig(uc);
1220 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1222 (ERROR_sig(uc) >> 1) & 1 : 0,
1223 &uc->uc_sigmask, puc);
1226 #elif defined(__x86_64__)
1228 int cpu_signal_handler(int host_signum, void *pinfo,
1231 siginfo_t *info = pinfo;
1232 struct ucontext *uc = puc;
1235 pc = uc->uc_mcontext.gregs[REG_RIP];
1236 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1237 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1238 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1239 &uc->uc_sigmask, puc);
1242 #elif defined(__powerpc__)
1244 /***********************************************************************
1245 * signal context platform-specific definitions
1249 /* All Registers access - only for local access */
1250 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1251 /* Gpr Registers access */
1252 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1253 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1254 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1255 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1256 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1257 # define LR_sig(context) REG_sig(link, context) /* Link register */
1258 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1259 /* Float Registers access */
1260 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1261 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1262 /* Exception Registers access */
1263 # define DAR_sig(context) REG_sig(dar, context)
1264 # define DSISR_sig(context) REG_sig(dsisr, context)
1265 # define TRAP_sig(context) REG_sig(trap, context)
1269 # include <sys/ucontext.h>
1270 typedef struct ucontext SIGCONTEXT;
1271 /* All Registers access - only for local access */
1272 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1273 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1274 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1275 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1276 /* Gpr Registers access */
1277 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1278 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1279 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1280 # define CTR_sig(context) REG_sig(ctr, context)
1281 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1282 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1283 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1284 /* Float Registers access */
1285 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1286 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1287 /* Exception Registers access */
1288 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1289 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1290 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1291 #endif /* __APPLE__ */
1293 int cpu_signal_handler(int host_signum, void *pinfo,
1296 siginfo_t *info = pinfo;
1297 struct ucontext *uc = puc;
1305 if (DSISR_sig(uc) & 0x00800000)
1308 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1311 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1312 is_write, &uc->uc_sigmask, puc);
1315 #elif defined(__alpha__)
1317 int cpu_signal_handler(int host_signum, void *pinfo,
1320 siginfo_t *info = pinfo;
1321 struct ucontext *uc = puc;
1322 uint32_t *pc = uc->uc_mcontext.sc_pc;
1323 uint32_t insn = *pc;
1326 /* XXX: need kernel patch to get write flag faster */
1327 switch (insn >> 26) {
1342 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1343 is_write, &uc->uc_sigmask, puc);
1345 #elif defined(__sparc__)
1347 int cpu_signal_handler(int host_signum, void *pinfo,
1350 siginfo_t *info = pinfo;
1353 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1354 uint32_t *regs = (uint32_t *)(info + 1);
1355 void *sigmask = (regs + 20);
1356 /* XXX: is there a standard glibc define ? */
1357 unsigned long pc = regs[1];
1360 struct sigcontext *sc = puc;
1361 unsigned long pc = sc->sigc_regs.tpc;
1362 void *sigmask = (void *)sc->sigc_mask;
1363 #elif defined(__OpenBSD__)
1364 struct sigcontext *uc = puc;
1365 unsigned long pc = uc->sc_pc;
1366 void *sigmask = (void *)(long)uc->sc_mask;
1370 /* XXX: need kernel patch to get write flag faster */
1372 insn = *(uint32_t *)pc;
1373 if ((insn >> 30) == 3) {
1374 switch((insn >> 19) & 0x3f) {
1386 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1387 is_write, sigmask, NULL);
1390 #elif defined(__arm__)
1392 int cpu_signal_handler(int host_signum, void *pinfo,
1395 siginfo_t *info = pinfo;
1396 struct ucontext *uc = puc;
1400 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1401 pc = uc->uc_mcontext.gregs[R15];
1403 pc = uc->uc_mcontext.arm_pc;
1405 /* XXX: compute is_write */
1407 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1409 &uc->uc_sigmask, puc);
1412 #elif defined(__mc68000)
1414 int cpu_signal_handler(int host_signum, void *pinfo,
1417 siginfo_t *info = pinfo;
1418 struct ucontext *uc = puc;
1422 pc = uc->uc_mcontext.gregs[16];
1423 /* XXX: compute is_write */
1425 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1427 &uc->uc_sigmask, puc);
1430 #elif defined(__ia64)
1433 /* This ought to be in <bits/siginfo.h>... */
1434 # define __ISR_VALID 1
1437 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1439 siginfo_t *info = pinfo;
1440 struct ucontext *uc = puc;
1444 ip = uc->uc_mcontext.sc_ip;
1445 switch (host_signum) {
1451 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1452 /* ISR.W (write-access) is bit 33: */
1453 is_write = (info->si_isr >> 33) & 1;
1459 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1461 &uc->uc_sigmask, puc);
1464 #elif defined(__s390__)
1466 int cpu_signal_handler(int host_signum, void *pinfo,
1469 siginfo_t *info = pinfo;
1470 struct ucontext *uc = puc;
1474 pc = uc->uc_mcontext.psw.addr;
1475 /* XXX: compute is_write */
1477 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1478 is_write, &uc->uc_sigmask, puc);
1481 #elif defined(__mips__)
1483 int cpu_signal_handler(int host_signum, void *pinfo,
1486 siginfo_t *info = pinfo;
1487 struct ucontext *uc = puc;
1488 greg_t pc = uc->uc_mcontext.pc;
1491 /* XXX: compute is_write */
1493 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1494 is_write, &uc->uc_sigmask, puc);
1497 #elif defined(__hppa__)
1499 int cpu_signal_handler(int host_signum, void *pinfo,
1502 struct siginfo *info = pinfo;
1503 struct ucontext *uc = puc;
1507 pc = uc->uc_mcontext.sc_iaoq[0];
1508 /* FIXME: compute is_write */
1510 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1512 &uc->uc_sigmask, puc);
1517 #error host CPU specific signal handler needed
1521 #endif /* !defined(CONFIG_SOFTMMU) */