2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #if !defined(CONFIG_SOFTMMU)
35 #include <sys/ucontext.h>
38 int tb_invalidated_flag;
41 //#define DEBUG_SIGNAL
43 #define SAVE_GLOBALS()
44 #define RESTORE_GLOBALS()
46 #if defined(__sparc__) && !defined(HOST_SOLARIS)
48 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
49 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
50 // Work around ugly bugs in glibc that mangle global register contents
52 static volatile void *saved_env;
53 static volatile unsigned long saved_t0, saved_i7;
55 #define SAVE_GLOBALS() do { \
58 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
61 #undef RESTORE_GLOBALS
62 #define RESTORE_GLOBALS() do { \
63 env = (void *)saved_env; \
65 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
68 static int sparc_setjmp(jmp_buf buf)
78 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
80 static void sparc_longjmp(jmp_buf buf, int val)
85 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
89 void cpu_loop_exit(void)
91 /* NOTE: the register at this point must be saved by hand because
92 longjmp restore them */
94 longjmp(env->jmp_env, 1);
97 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
101 /* exit the current TB from a signal handler. The host registers are
102 restored in a state compatible with the CPU emulator
104 void cpu_resume_from_signal(CPUState *env1, void *puc)
106 #if !defined(CONFIG_SOFTMMU)
107 struct ucontext *uc = puc;
112 /* XXX: restore cpu registers saved in host registers */
114 #if !defined(CONFIG_SOFTMMU)
116 /* XXX: use siglongjmp ? */
117 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
120 longjmp(env->jmp_env, 1);
123 static TranslationBlock *tb_find_slow(target_ulong pc,
124 target_ulong cs_base,
127 TranslationBlock *tb, **ptb1;
130 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
135 tb_invalidated_flag = 0;
137 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
139 /* find translated block using physical mappings */
140 phys_pc = get_phys_addr_code(env, pc);
141 phys_page1 = phys_pc & TARGET_PAGE_MASK;
143 h = tb_phys_hash_func(phys_pc);
144 ptb1 = &tb_phys_hash[h];
150 tb->page_addr[0] == phys_page1 &&
151 tb->cs_base == cs_base &&
152 tb->flags == flags) {
153 /* check next page if needed */
154 if (tb->page_addr[1] != -1) {
155 virt_page2 = (pc & TARGET_PAGE_MASK) +
157 phys_page2 = get_phys_addr_code(env, virt_page2);
158 if (tb->page_addr[1] == phys_page2)
164 ptb1 = &tb->phys_hash_next;
167 /* if no translated code available, then translate it now */
170 /* flush must be done */
172 /* cannot fail at this point */
174 /* don't forget to invalidate previous TB info */
175 tb_invalidated_flag = 1;
177 tc_ptr = code_gen_ptr;
179 tb->cs_base = cs_base;
182 cpu_gen_code(env, tb, &code_gen_size);
184 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
186 /* check next page if needed */
187 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
189 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
190 phys_page2 = get_phys_addr_code(env, virt_page2);
192 tb_link_phys(tb, phys_pc, phys_page2);
195 /* we add the TB in the virtual pc hash table */
196 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
197 spin_unlock(&tb_lock);
201 static inline TranslationBlock *tb_find_fast(void)
203 TranslationBlock *tb;
204 target_ulong cs_base, pc;
207 /* we record a subset of the CPU state. It will
208 always be the same before a given translated block
210 #if defined(TARGET_I386)
212 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
213 flags |= env->intercept;
214 cs_base = env->segs[R_CS].base;
215 pc = cs_base + env->eip;
216 #elif defined(TARGET_ARM)
217 flags = env->thumb | (env->vfp.vec_len << 1)
218 | (env->vfp.vec_stride << 4);
219 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
221 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
223 flags |= (env->condexec_bits << 8);
226 #elif defined(TARGET_SPARC)
227 #ifdef TARGET_SPARC64
228 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
229 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
230 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
232 // FPU enable . Supervisor
233 flags = (env->psref << 4) | env->psrs;
237 #elif defined(TARGET_PPC)
241 #elif defined(TARGET_MIPS)
242 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
244 pc = env->PC[env->current_tc];
245 #elif defined(TARGET_M68K)
246 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
247 | (env->sr & SR_S) /* Bit 13 */
248 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
251 #elif defined(TARGET_SH4)
255 #elif defined(TARGET_ALPHA)
259 #elif defined(TARGET_CRIS)
260 flags = env->pregs[PR_CCS] & (U_FLAG | X_FLAG);
264 #error unsupported CPU
266 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
267 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
268 tb->flags != flags, 0)) {
269 tb = tb_find_slow(pc, cs_base, flags);
270 /* Note: we do it here to avoid a gcc bug on Mac OS X when
271 doing it in tb_find_slow */
272 if (tb_invalidated_flag) {
273 /* as some TB could have been invalidated because
274 of memory exceptions while generating the code, we
275 must recompute the hash index here */
282 #define BREAK_CHAIN T0 = 0
284 /* main execution loop */
286 int cpu_exec(CPUState *env1)
288 #define DECLARE_HOST_REGS 1
289 #include "hostregs_helper.h"
290 #if defined(TARGET_SPARC)
291 #if defined(reg_REGWPTR)
292 uint32_t *saved_regwptr;
295 int ret, interrupt_request;
296 long (*gen_func)(void);
297 TranslationBlock *tb;
300 if (cpu_halted(env1) == EXCP_HALTED)
303 cpu_single_env = env1;
305 /* first we save global registers */
306 #define SAVE_HOST_REGS 1
307 #include "hostregs_helper.h"
312 #if defined(TARGET_I386)
313 /* put eflags in CPU temporary format */
314 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
315 DF = 1 - (2 * ((env->eflags >> 10) & 1));
316 CC_OP = CC_OP_EFLAGS;
317 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
318 #elif defined(TARGET_SPARC)
319 #if defined(reg_REGWPTR)
320 saved_regwptr = REGWPTR;
322 #elif defined(TARGET_M68K)
323 env->cc_op = CC_OP_FLAGS;
324 env->cc_dest = env->sr & 0xf;
325 env->cc_x = (env->sr >> 4) & 1;
326 #elif defined(TARGET_ALPHA)
327 #elif defined(TARGET_ARM)
328 #elif defined(TARGET_PPC)
329 #elif defined(TARGET_MIPS)
330 #elif defined(TARGET_SH4)
331 #elif defined(TARGET_CRIS)
334 #error unsupported target CPU
336 env->exception_index = -1;
338 /* prepare setjmp context for exception handling */
340 if (setjmp(env->jmp_env) == 0) {
341 env->current_tb = NULL;
342 /* if an exception is pending, we execute it here */
343 if (env->exception_index >= 0) {
344 if (env->exception_index >= EXCP_INTERRUPT) {
345 /* exit request from the cpu execution loop */
346 ret = env->exception_index;
348 } else if (env->user_mode_only) {
349 /* if user mode only, we simulate a fake exception
350 which will be handled outside the cpu execution
352 #if defined(TARGET_I386)
353 do_interrupt_user(env->exception_index,
354 env->exception_is_int,
356 env->exception_next_eip);
358 ret = env->exception_index;
361 #if defined(TARGET_I386)
362 /* simulate a real cpu exception. On i386, it can
363 trigger new exceptions, but we do not handle
364 double or triple faults yet. */
365 do_interrupt(env->exception_index,
366 env->exception_is_int,
368 env->exception_next_eip, 0);
369 /* successfully delivered */
370 env->old_exception = -1;
371 #elif defined(TARGET_PPC)
373 #elif defined(TARGET_MIPS)
375 #elif defined(TARGET_SPARC)
376 do_interrupt(env->exception_index);
377 #elif defined(TARGET_ARM)
379 #elif defined(TARGET_SH4)
381 #elif defined(TARGET_ALPHA)
383 #elif defined(TARGET_CRIS)
385 #elif defined(TARGET_M68K)
389 env->exception_index = -1;
392 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
394 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
395 ret = kqemu_cpu_exec(env);
396 /* put eflags in CPU temporary format */
397 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
398 DF = 1 - (2 * ((env->eflags >> 10) & 1));
399 CC_OP = CC_OP_EFLAGS;
400 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
403 longjmp(env->jmp_env, 1);
404 } else if (ret == 2) {
405 /* softmmu execution needed */
407 if (env->interrupt_request != 0) {
408 /* hardware interrupt will be executed just after */
410 /* otherwise, we restart */
411 longjmp(env->jmp_env, 1);
417 T0 = 0; /* force lookup of first TB */
420 interrupt_request = env->interrupt_request;
421 if (__builtin_expect(interrupt_request, 0)
422 #if defined(TARGET_I386)
423 && env->hflags & HF_GIF_MASK
426 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
427 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
428 env->exception_index = EXCP_DEBUG;
431 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
432 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
433 if (interrupt_request & CPU_INTERRUPT_HALT) {
434 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
436 env->exception_index = EXCP_HLT;
440 #if defined(TARGET_I386)
441 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
442 !(env->hflags & HF_SMM_MASK)) {
443 svm_check_intercept(SVM_EXIT_SMI);
444 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
447 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
448 !(env->hflags & HF_NMI_MASK)) {
449 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
450 env->hflags |= HF_NMI_MASK;
451 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
453 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
454 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
455 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
457 svm_check_intercept(SVM_EXIT_INTR);
458 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
459 intno = cpu_get_pic_interrupt(env);
460 if (loglevel & CPU_LOG_TB_IN_ASM) {
461 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
463 do_interrupt(intno, 0, 0, 0, 1);
464 /* ensure that no TB jump will be modified as
465 the program flow was changed */
467 #if !defined(CONFIG_USER_ONLY)
468 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
469 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
471 /* FIXME: this should respect TPR */
472 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
473 svm_check_intercept(SVM_EXIT_VINTR);
474 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
475 if (loglevel & CPU_LOG_TB_IN_ASM)
476 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
477 do_interrupt(intno, 0, 0, -1, 1);
478 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
479 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
483 #elif defined(TARGET_PPC)
485 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
489 if (interrupt_request & CPU_INTERRUPT_HARD) {
490 ppc_hw_interrupt(env);
491 if (env->pending_interrupts == 0)
492 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
495 #elif defined(TARGET_MIPS)
496 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
497 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
498 (env->CP0_Status & (1 << CP0St_IE)) &&
499 !(env->CP0_Status & (1 << CP0St_EXL)) &&
500 !(env->CP0_Status & (1 << CP0St_ERL)) &&
501 !(env->hflags & MIPS_HFLAG_DM)) {
503 env->exception_index = EXCP_EXT_INTERRUPT;
508 #elif defined(TARGET_SPARC)
509 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
511 int pil = env->interrupt_index & 15;
512 int type = env->interrupt_index & 0xf0;
514 if (((type == TT_EXTINT) &&
515 (pil == 15 || pil > env->psrpil)) ||
517 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
518 do_interrupt(env->interrupt_index);
519 env->interrupt_index = 0;
520 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
525 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
526 //do_interrupt(0, 0, 0, 0, 0);
527 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
529 #elif defined(TARGET_ARM)
530 if (interrupt_request & CPU_INTERRUPT_FIQ
531 && !(env->uncached_cpsr & CPSR_F)) {
532 env->exception_index = EXCP_FIQ;
536 /* ARMv7-M interrupt return works by loading a magic value
537 into the PC. On real hardware the load causes the
538 return to occur. The qemu implementation performs the
539 jump normally, then does the exception return when the
540 CPU tries to execute code at the magic address.
541 This will cause the magic PC value to be pushed to
542 the stack if an interrupt occured at the wrong time.
543 We avoid this by disabling interrupts when
544 pc contains a magic address. */
545 if (interrupt_request & CPU_INTERRUPT_HARD
546 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
547 || !(env->uncached_cpsr & CPSR_I))) {
548 env->exception_index = EXCP_IRQ;
552 #elif defined(TARGET_SH4)
553 if (interrupt_request & CPU_INTERRUPT_HARD) {
557 #elif defined(TARGET_ALPHA)
558 if (interrupt_request & CPU_INTERRUPT_HARD) {
562 #elif defined(TARGET_CRIS)
563 if (interrupt_request & CPU_INTERRUPT_HARD) {
567 #elif defined(TARGET_M68K)
568 if (interrupt_request & CPU_INTERRUPT_HARD
569 && ((env->sr & SR_I) >> SR_I_SHIFT)
570 < env->pending_level) {
571 /* Real hardware gets the interrupt vector via an
572 IACK cycle at this point. Current emulated
573 hardware doesn't rely on this, so we
574 provide/save the vector when the interrupt is
576 env->exception_index = env->pending_vector;
581 /* Don't use the cached interupt_request value,
582 do_interrupt may have updated the EXITTB flag. */
583 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
584 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
585 /* ensure that no TB jump will be modified as
586 the program flow was changed */
589 if (interrupt_request & CPU_INTERRUPT_EXIT) {
590 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
591 env->exception_index = EXCP_INTERRUPT;
596 if ((loglevel & CPU_LOG_TB_CPU)) {
597 /* restore flags in standard format */
599 #if defined(TARGET_I386)
600 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
601 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
602 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
603 #elif defined(TARGET_ARM)
604 cpu_dump_state(env, logfile, fprintf, 0);
605 #elif defined(TARGET_SPARC)
606 REGWPTR = env->regbase + (env->cwp * 16);
607 env->regwptr = REGWPTR;
608 cpu_dump_state(env, logfile, fprintf, 0);
609 #elif defined(TARGET_PPC)
610 cpu_dump_state(env, logfile, fprintf, 0);
611 #elif defined(TARGET_M68K)
612 cpu_m68k_flush_flags(env, env->cc_op);
613 env->cc_op = CC_OP_FLAGS;
614 env->sr = (env->sr & 0xffe0)
615 | env->cc_dest | (env->cc_x << 4);
616 cpu_dump_state(env, logfile, fprintf, 0);
617 #elif defined(TARGET_MIPS)
618 cpu_dump_state(env, logfile, fprintf, 0);
619 #elif defined(TARGET_SH4)
620 cpu_dump_state(env, logfile, fprintf, 0);
621 #elif defined(TARGET_ALPHA)
622 cpu_dump_state(env, logfile, fprintf, 0);
623 #elif defined(TARGET_CRIS)
624 cpu_dump_state(env, logfile, fprintf, 0);
626 #error unsupported target CPU
632 if ((loglevel & CPU_LOG_EXEC)) {
633 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
634 (long)tb->tc_ptr, tb->pc,
635 lookup_symbol(tb->pc));
639 /* see if we can patch the calling TB. When the TB
640 spans two pages, we cannot safely do a direct
645 (env->kqemu_enabled != 2) &&
647 tb->page_addr[1] == -1) {
649 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
650 spin_unlock(&tb_lock);
654 env->current_tb = tb;
655 /* execute the generated code */
656 gen_func = (void *)tc_ptr;
657 #if defined(__sparc__)
658 __asm__ __volatile__("call %0\n\t"
662 : "i0", "i1", "i2", "i3", "i4", "i5",
663 "o0", "o1", "o2", "o3", "o4", "o5",
664 "l0", "l1", "l2", "l3", "l4", "l5",
666 #elif defined(__hppa__)
667 asm volatile ("ble 0(%%sr4,%1)\n"
672 : "r1", "r2", "r3", "r4", "r5", "r6", "r7",
673 "r8", "r9", "r10", "r11", "r12", "r13",
674 "r18", "r19", "r20", "r21", "r22", "r23",
675 "r24", "r25", "r26", "r27", "r28", "r29",
677 #elif defined(__arm__)
678 asm volatile ("mov pc, %0\n\t"
679 ".global exec_loop\n\t"
683 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
684 #elif defined(__ia64)
691 fp.gp = code_gen_buffer + 2 * (1 << 20);
692 (*(void (*)(void)) &fp)();
696 env->current_tb = NULL;
697 /* reset soft MMU for next block (it can currently
698 only be set by a memory fault) */
699 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
700 if (env->hflags & HF_SOFTMMU_MASK) {
701 env->hflags &= ~HF_SOFTMMU_MASK;
702 /* do not allow linking to another block */
706 #if defined(USE_KQEMU)
707 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
708 if (kqemu_is_ok(env) &&
709 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
720 #if defined(TARGET_I386)
721 /* restore flags in standard format */
722 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
723 #elif defined(TARGET_ARM)
724 /* XXX: Save/restore host fpu exception state?. */
725 #elif defined(TARGET_SPARC)
726 #if defined(reg_REGWPTR)
727 REGWPTR = saved_regwptr;
729 #elif defined(TARGET_PPC)
730 #elif defined(TARGET_M68K)
731 cpu_m68k_flush_flags(env, env->cc_op);
732 env->cc_op = CC_OP_FLAGS;
733 env->sr = (env->sr & 0xffe0)
734 | env->cc_dest | (env->cc_x << 4);
735 #elif defined(TARGET_MIPS)
736 #elif defined(TARGET_SH4)
737 #elif defined(TARGET_ALPHA)
738 #elif defined(TARGET_CRIS)
741 #error unsupported target CPU
744 /* restore global registers */
746 #include "hostregs_helper.h"
748 /* fail safe : never use cpu_single_env outside cpu_exec() */
749 cpu_single_env = NULL;
753 /* must only be called from the generated code as an exception can be
755 void tb_invalidate_page_range(target_ulong start, target_ulong end)
757 /* XXX: cannot enable it yet because it yields to MMU exception
758 where NIP != read address on PowerPC */
760 target_ulong phys_addr;
761 phys_addr = get_phys_addr_code(env, start);
762 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
766 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
768 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
770 CPUX86State *saved_env;
774 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
776 cpu_x86_load_seg_cache(env, seg_reg, selector,
777 (selector << 4), 0xffff, 0);
779 load_seg(seg_reg, selector);
784 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
786 CPUX86State *saved_env;
791 helper_fsave(ptr, data32);
796 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
798 CPUX86State *saved_env;
803 helper_frstor(ptr, data32);
808 #endif /* TARGET_I386 */
810 #if !defined(CONFIG_SOFTMMU)
812 #if defined(TARGET_I386)
814 /* 'pc' is the host PC at which the exception was raised. 'address' is
815 the effective address of the memory exception. 'is_write' is 1 if a
816 write caused the exception and otherwise 0'. 'old_set' is the
817 signal set which should be restored */
818 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
819 int is_write, sigset_t *old_set,
822 TranslationBlock *tb;
826 env = cpu_single_env; /* XXX: find a correct solution for multithread */
827 #if defined(DEBUG_SIGNAL)
828 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
829 pc, address, is_write, *(unsigned long *)old_set);
831 /* XXX: locking issue */
832 if (is_write && page_unprotect(h2g(address), pc, puc)) {
836 /* see if it is an MMU fault */
837 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
839 return 0; /* not an MMU fault */
841 return 1; /* the MMU fault was handled without causing real CPU fault */
842 /* now we have a real cpu fault */
845 /* the PC is inside the translated code. It means that we have
846 a virtual CPU fault */
847 cpu_restore_state(tb, env, pc, puc);
851 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
852 env->eip, env->cr[2], env->error_code);
854 /* we restore the process signal mask as the sigreturn should
855 do it (XXX: use sigsetjmp) */
856 sigprocmask(SIG_SETMASK, old_set, NULL);
857 raise_exception_err(env->exception_index, env->error_code);
859 /* activate soft MMU for this block */
860 env->hflags |= HF_SOFTMMU_MASK;
861 cpu_resume_from_signal(env, puc);
863 /* never comes here */
867 #elif defined(TARGET_ARM)
868 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
869 int is_write, sigset_t *old_set,
872 TranslationBlock *tb;
876 env = cpu_single_env; /* XXX: find a correct solution for multithread */
877 #if defined(DEBUG_SIGNAL)
878 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
879 pc, address, is_write, *(unsigned long *)old_set);
881 /* XXX: locking issue */
882 if (is_write && page_unprotect(h2g(address), pc, puc)) {
885 /* see if it is an MMU fault */
886 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
888 return 0; /* not an MMU fault */
890 return 1; /* the MMU fault was handled without causing real CPU fault */
891 /* now we have a real cpu fault */
894 /* the PC is inside the translated code. It means that we have
895 a virtual CPU fault */
896 cpu_restore_state(tb, env, pc, puc);
898 /* we restore the process signal mask as the sigreturn should
899 do it (XXX: use sigsetjmp) */
900 sigprocmask(SIG_SETMASK, old_set, NULL);
902 /* never comes here */
905 #elif defined(TARGET_SPARC)
906 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
907 int is_write, sigset_t *old_set,
910 TranslationBlock *tb;
914 env = cpu_single_env; /* XXX: find a correct solution for multithread */
915 #if defined(DEBUG_SIGNAL)
916 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
917 pc, address, is_write, *(unsigned long *)old_set);
919 /* XXX: locking issue */
920 if (is_write && page_unprotect(h2g(address), pc, puc)) {
923 /* see if it is an MMU fault */
924 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
926 return 0; /* not an MMU fault */
928 return 1; /* the MMU fault was handled without causing real CPU fault */
929 /* now we have a real cpu fault */
932 /* the PC is inside the translated code. It means that we have
933 a virtual CPU fault */
934 cpu_restore_state(tb, env, pc, puc);
936 /* we restore the process signal mask as the sigreturn should
937 do it (XXX: use sigsetjmp) */
938 sigprocmask(SIG_SETMASK, old_set, NULL);
940 /* never comes here */
943 #elif defined (TARGET_PPC)
944 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
945 int is_write, sigset_t *old_set,
948 TranslationBlock *tb;
952 env = cpu_single_env; /* XXX: find a correct solution for multithread */
953 #if defined(DEBUG_SIGNAL)
954 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
955 pc, address, is_write, *(unsigned long *)old_set);
957 /* XXX: locking issue */
958 if (is_write && page_unprotect(h2g(address), pc, puc)) {
962 /* see if it is an MMU fault */
963 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
965 return 0; /* not an MMU fault */
967 return 1; /* the MMU fault was handled without causing real CPU fault */
969 /* now we have a real cpu fault */
972 /* the PC is inside the translated code. It means that we have
973 a virtual CPU fault */
974 cpu_restore_state(tb, env, pc, puc);
978 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
979 env->nip, env->error_code, tb);
981 /* we restore the process signal mask as the sigreturn should
982 do it (XXX: use sigsetjmp) */
983 sigprocmask(SIG_SETMASK, old_set, NULL);
984 do_raise_exception_err(env->exception_index, env->error_code);
986 /* activate soft MMU for this block */
987 cpu_resume_from_signal(env, puc);
989 /* never comes here */
993 #elif defined(TARGET_M68K)
994 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
995 int is_write, sigset_t *old_set,
998 TranslationBlock *tb;
1002 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1003 #if defined(DEBUG_SIGNAL)
1004 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1005 pc, address, is_write, *(unsigned long *)old_set);
1007 /* XXX: locking issue */
1008 if (is_write && page_unprotect(address, pc, puc)) {
1011 /* see if it is an MMU fault */
1012 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1014 return 0; /* not an MMU fault */
1016 return 1; /* the MMU fault was handled without causing real CPU fault */
1017 /* now we have a real cpu fault */
1018 tb = tb_find_pc(pc);
1020 /* the PC is inside the translated code. It means that we have
1021 a virtual CPU fault */
1022 cpu_restore_state(tb, env, pc, puc);
1024 /* we restore the process signal mask as the sigreturn should
1025 do it (XXX: use sigsetjmp) */
1026 sigprocmask(SIG_SETMASK, old_set, NULL);
1028 /* never comes here */
1032 #elif defined (TARGET_MIPS)
1033 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1034 int is_write, sigset_t *old_set,
1037 TranslationBlock *tb;
1041 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1042 #if defined(DEBUG_SIGNAL)
1043 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1044 pc, address, is_write, *(unsigned long *)old_set);
1046 /* XXX: locking issue */
1047 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1051 /* see if it is an MMU fault */
1052 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1054 return 0; /* not an MMU fault */
1056 return 1; /* the MMU fault was handled without causing real CPU fault */
1058 /* now we have a real cpu fault */
1059 tb = tb_find_pc(pc);
1061 /* the PC is inside the translated code. It means that we have
1062 a virtual CPU fault */
1063 cpu_restore_state(tb, env, pc, puc);
1067 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1068 env->PC, env->error_code, tb);
1070 /* we restore the process signal mask as the sigreturn should
1071 do it (XXX: use sigsetjmp) */
1072 sigprocmask(SIG_SETMASK, old_set, NULL);
1073 do_raise_exception_err(env->exception_index, env->error_code);
1075 /* activate soft MMU for this block */
1076 cpu_resume_from_signal(env, puc);
1078 /* never comes here */
1082 #elif defined (TARGET_SH4)
1083 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1084 int is_write, sigset_t *old_set,
1087 TranslationBlock *tb;
1091 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1092 #if defined(DEBUG_SIGNAL)
1093 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1094 pc, address, is_write, *(unsigned long *)old_set);
1096 /* XXX: locking issue */
1097 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1101 /* see if it is an MMU fault */
1102 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1104 return 0; /* not an MMU fault */
1106 return 1; /* the MMU fault was handled without causing real CPU fault */
1108 /* now we have a real cpu fault */
1109 tb = tb_find_pc(pc);
1111 /* the PC is inside the translated code. It means that we have
1112 a virtual CPU fault */
1113 cpu_restore_state(tb, env, pc, puc);
1116 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1117 env->nip, env->error_code, tb);
1119 /* we restore the process signal mask as the sigreturn should
1120 do it (XXX: use sigsetjmp) */
1121 sigprocmask(SIG_SETMASK, old_set, NULL);
1123 /* never comes here */
1127 #elif defined (TARGET_ALPHA)
1128 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1129 int is_write, sigset_t *old_set,
1132 TranslationBlock *tb;
1136 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1137 #if defined(DEBUG_SIGNAL)
1138 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1139 pc, address, is_write, *(unsigned long *)old_set);
1141 /* XXX: locking issue */
1142 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1146 /* see if it is an MMU fault */
1147 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1149 return 0; /* not an MMU fault */
1151 return 1; /* the MMU fault was handled without causing real CPU fault */
1153 /* now we have a real cpu fault */
1154 tb = tb_find_pc(pc);
1156 /* the PC is inside the translated code. It means that we have
1157 a virtual CPU fault */
1158 cpu_restore_state(tb, env, pc, puc);
1161 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1162 env->nip, env->error_code, tb);
1164 /* we restore the process signal mask as the sigreturn should
1165 do it (XXX: use sigsetjmp) */
1166 sigprocmask(SIG_SETMASK, old_set, NULL);
1168 /* never comes here */
1171 #elif defined (TARGET_CRIS)
1172 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1173 int is_write, sigset_t *old_set,
1176 TranslationBlock *tb;
1180 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1181 #if defined(DEBUG_SIGNAL)
1182 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1183 pc, address, is_write, *(unsigned long *)old_set);
1185 /* XXX: locking issue */
1186 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1190 /* see if it is an MMU fault */
1191 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1193 return 0; /* not an MMU fault */
1195 return 1; /* the MMU fault was handled without causing real CPU fault */
1197 /* now we have a real cpu fault */
1198 tb = tb_find_pc(pc);
1200 /* the PC is inside the translated code. It means that we have
1201 a virtual CPU fault */
1202 cpu_restore_state(tb, env, pc, puc);
1204 /* we restore the process signal mask as the sigreturn should
1205 do it (XXX: use sigsetjmp) */
1206 sigprocmask(SIG_SETMASK, old_set, NULL);
1208 /* never comes here */
1213 #error unsupported target CPU
1216 #if defined(__i386__)
1218 #if defined(__APPLE__)
1219 # include <sys/ucontext.h>
1221 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1222 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1223 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1225 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1226 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1227 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1230 int cpu_signal_handler(int host_signum, void *pinfo,
1233 siginfo_t *info = pinfo;
1234 struct ucontext *uc = puc;
1242 #define REG_TRAPNO TRAPNO
1245 trapno = TRAP_sig(uc);
1246 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1248 (ERROR_sig(uc) >> 1) & 1 : 0,
1249 &uc->uc_sigmask, puc);
1252 #elif defined(__x86_64__)
1254 int cpu_signal_handler(int host_signum, void *pinfo,
1257 siginfo_t *info = pinfo;
1258 struct ucontext *uc = puc;
1261 pc = uc->uc_mcontext.gregs[REG_RIP];
1262 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1263 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1264 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1265 &uc->uc_sigmask, puc);
1268 #elif defined(__powerpc__)
1270 /***********************************************************************
1271 * signal context platform-specific definitions
1275 /* All Registers access - only for local access */
1276 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1277 /* Gpr Registers access */
1278 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1279 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1280 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1281 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1282 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1283 # define LR_sig(context) REG_sig(link, context) /* Link register */
1284 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1285 /* Float Registers access */
1286 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1287 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1288 /* Exception Registers access */
1289 # define DAR_sig(context) REG_sig(dar, context)
1290 # define DSISR_sig(context) REG_sig(dsisr, context)
1291 # define TRAP_sig(context) REG_sig(trap, context)
1295 # include <sys/ucontext.h>
1296 typedef struct ucontext SIGCONTEXT;
1297 /* All Registers access - only for local access */
1298 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1299 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1300 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1301 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1302 /* Gpr Registers access */
1303 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1304 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1305 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1306 # define CTR_sig(context) REG_sig(ctr, context)
1307 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1308 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1309 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1310 /* Float Registers access */
1311 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1312 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1313 /* Exception Registers access */
1314 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1315 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1316 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1317 #endif /* __APPLE__ */
1319 int cpu_signal_handler(int host_signum, void *pinfo,
1322 siginfo_t *info = pinfo;
1323 struct ucontext *uc = puc;
1331 if (DSISR_sig(uc) & 0x00800000)
1334 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1337 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1338 is_write, &uc->uc_sigmask, puc);
1341 #elif defined(__alpha__)
1343 int cpu_signal_handler(int host_signum, void *pinfo,
1346 siginfo_t *info = pinfo;
1347 struct ucontext *uc = puc;
1348 uint32_t *pc = uc->uc_mcontext.sc_pc;
1349 uint32_t insn = *pc;
1352 /* XXX: need kernel patch to get write flag faster */
1353 switch (insn >> 26) {
1368 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1369 is_write, &uc->uc_sigmask, puc);
1371 #elif defined(__sparc__)
1373 int cpu_signal_handler(int host_signum, void *pinfo,
1376 siginfo_t *info = pinfo;
1377 uint32_t *regs = (uint32_t *)(info + 1);
1378 void *sigmask = (regs + 20);
1383 /* XXX: is there a standard glibc define ? */
1385 /* XXX: need kernel patch to get write flag faster */
1387 insn = *(uint32_t *)pc;
1388 if ((insn >> 30) == 3) {
1389 switch((insn >> 19) & 0x3f) {
1401 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1402 is_write, sigmask, NULL);
1405 #elif defined(__arm__)
1407 int cpu_signal_handler(int host_signum, void *pinfo,
1410 siginfo_t *info = pinfo;
1411 struct ucontext *uc = puc;
1415 pc = uc->uc_mcontext.gregs[R15];
1416 /* XXX: compute is_write */
1418 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1420 &uc->uc_sigmask, puc);
1423 #elif defined(__mc68000)
1425 int cpu_signal_handler(int host_signum, void *pinfo,
1428 siginfo_t *info = pinfo;
1429 struct ucontext *uc = puc;
1433 pc = uc->uc_mcontext.gregs[16];
1434 /* XXX: compute is_write */
1436 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1438 &uc->uc_sigmask, puc);
1441 #elif defined(__ia64)
1444 /* This ought to be in <bits/siginfo.h>... */
1445 # define __ISR_VALID 1
1448 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1450 siginfo_t *info = pinfo;
1451 struct ucontext *uc = puc;
1455 ip = uc->uc_mcontext.sc_ip;
1456 switch (host_signum) {
1462 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1463 /* ISR.W (write-access) is bit 33: */
1464 is_write = (info->si_isr >> 33) & 1;
1470 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1472 &uc->uc_sigmask, puc);
1475 #elif defined(__s390__)
1477 int cpu_signal_handler(int host_signum, void *pinfo,
1480 siginfo_t *info = pinfo;
1481 struct ucontext *uc = puc;
1485 pc = uc->uc_mcontext.psw.addr;
1486 /* XXX: compute is_write */
1488 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1489 is_write, &uc->uc_sigmask, puc);
1492 #elif defined(__mips__)
1494 int cpu_signal_handler(int host_signum, void *pinfo,
1497 siginfo_t *info = pinfo;
1498 struct ucontext *uc = puc;
1499 greg_t pc = uc->uc_mcontext.pc;
1502 /* XXX: compute is_write */
1504 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1505 is_write, &uc->uc_sigmask, puc);
1508 #elif defined(__hppa__)
1510 int cpu_signal_handler(int host_signum, void *pinfo,
1513 struct siginfo *info = pinfo;
1514 struct ucontext *uc = puc;
1518 pc = uc->uc_mcontext.sc_iaoq[0];
1519 /* FIXME: compute is_write */
1521 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1523 &uc->uc_sigmask, puc);
1528 #error host CPU specific signal handler needed
1532 #endif /* !defined(CONFIG_SOFTMMU) */