2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #if !defined(CONFIG_SOFTMMU)
35 #include <sys/ucontext.h>
38 int tb_invalidated_flag;
39 static unsigned long next_tb;
42 //#define DEBUG_SIGNAL
44 #define SAVE_GLOBALS()
45 #define RESTORE_GLOBALS()
47 #if defined(__sparc__) && !defined(HOST_SOLARIS)
49 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
50 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
51 // Work around ugly bugs in glibc that mangle global register contents
53 static volatile void *saved_env;
54 static volatile unsigned long saved_t0, saved_i7;
56 #define SAVE_GLOBALS() do { \
59 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
62 #undef RESTORE_GLOBALS
63 #define RESTORE_GLOBALS() do { \
64 env = (void *)saved_env; \
66 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
69 static int sparc_setjmp(jmp_buf buf)
79 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
81 static void sparc_longjmp(jmp_buf buf, int val)
86 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
90 void cpu_loop_exit(void)
92 /* NOTE: the register at this point must be saved by hand because
93 longjmp restore them */
95 longjmp(env->jmp_env, 1);
98 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
102 /* exit the current TB from a signal handler. The host registers are
103 restored in a state compatible with the CPU emulator
105 void cpu_resume_from_signal(CPUState *env1, void *puc)
107 #if !defined(CONFIG_SOFTMMU)
108 struct ucontext *uc = puc;
113 /* XXX: restore cpu registers saved in host registers */
115 #if !defined(CONFIG_SOFTMMU)
117 /* XXX: use siglongjmp ? */
118 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
121 longjmp(env->jmp_env, 1);
124 static TranslationBlock *tb_find_slow(target_ulong pc,
125 target_ulong cs_base,
128 TranslationBlock *tb, **ptb1;
131 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
136 tb_invalidated_flag = 0;
138 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
140 /* find translated block using physical mappings */
141 phys_pc = get_phys_addr_code(env, pc);
142 phys_page1 = phys_pc & TARGET_PAGE_MASK;
144 h = tb_phys_hash_func(phys_pc);
145 ptb1 = &tb_phys_hash[h];
151 tb->page_addr[0] == phys_page1 &&
152 tb->cs_base == cs_base &&
153 tb->flags == flags) {
154 /* check next page if needed */
155 if (tb->page_addr[1] != -1) {
156 virt_page2 = (pc & TARGET_PAGE_MASK) +
158 phys_page2 = get_phys_addr_code(env, virt_page2);
159 if (tb->page_addr[1] == phys_page2)
165 ptb1 = &tb->phys_hash_next;
168 /* if no translated code available, then translate it now */
171 /* flush must be done */
173 /* cannot fail at this point */
175 /* don't forget to invalidate previous TB info */
176 tb_invalidated_flag = 1;
178 tc_ptr = code_gen_ptr;
180 tb->cs_base = cs_base;
183 cpu_gen_code(env, tb, &code_gen_size);
185 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
187 /* check next page if needed */
188 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
190 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
191 phys_page2 = get_phys_addr_code(env, virt_page2);
193 tb_link_phys(tb, phys_pc, phys_page2);
196 /* we add the TB in the virtual pc hash table */
197 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
198 spin_unlock(&tb_lock);
202 static inline TranslationBlock *tb_find_fast(void)
204 TranslationBlock *tb;
205 target_ulong cs_base, pc;
208 /* we record a subset of the CPU state. It will
209 always be the same before a given translated block
211 #if defined(TARGET_I386)
213 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
214 flags |= env->intercept;
215 cs_base = env->segs[R_CS].base;
216 pc = cs_base + env->eip;
217 #elif defined(TARGET_ARM)
218 flags = env->thumb | (env->vfp.vec_len << 1)
219 | (env->vfp.vec_stride << 4);
220 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
222 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
224 flags |= (env->condexec_bits << 8);
227 #elif defined(TARGET_SPARC)
228 #ifdef TARGET_SPARC64
229 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
230 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
231 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
233 // FPU enable . Supervisor
234 flags = (env->psref << 4) | env->psrs;
238 #elif defined(TARGET_PPC)
242 #elif defined(TARGET_MIPS)
243 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
245 pc = env->PC[env->current_tc];
246 #elif defined(TARGET_M68K)
247 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
248 | (env->sr & SR_S) /* Bit 13 */
249 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
252 #elif defined(TARGET_SH4)
256 #elif defined(TARGET_ALPHA)
260 #elif defined(TARGET_CRIS)
261 flags = env->pregs[PR_CCS] & (U_FLAG | X_FLAG);
265 #error unsupported CPU
267 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
268 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
269 tb->flags != flags, 0)) {
270 tb = tb_find_slow(pc, cs_base, flags);
271 /* Note: we do it here to avoid a gcc bug on Mac OS X when
272 doing it in tb_find_slow */
273 if (tb_invalidated_flag) {
274 /* as some TB could have been invalidated because
275 of memory exceptions while generating the code, we
276 must recompute the hash index here */
283 /* main execution loop */
285 int cpu_exec(CPUState *env1)
287 #define DECLARE_HOST_REGS 1
288 #include "hostregs_helper.h"
289 #if defined(TARGET_SPARC)
290 #if defined(reg_REGWPTR)
291 uint32_t *saved_regwptr;
294 int ret, interrupt_request;
295 unsigned long (*gen_func)(void);
296 TranslationBlock *tb;
299 if (cpu_halted(env1) == EXCP_HALTED)
302 cpu_single_env = env1;
304 /* first we save global registers */
305 #define SAVE_HOST_REGS 1
306 #include "hostregs_helper.h"
311 #if defined(TARGET_I386)
312 /* put eflags in CPU temporary format */
313 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
314 DF = 1 - (2 * ((env->eflags >> 10) & 1));
315 CC_OP = CC_OP_EFLAGS;
316 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
317 #elif defined(TARGET_SPARC)
318 #if defined(reg_REGWPTR)
319 saved_regwptr = REGWPTR;
321 #elif defined(TARGET_M68K)
322 env->cc_op = CC_OP_FLAGS;
323 env->cc_dest = env->sr & 0xf;
324 env->cc_x = (env->sr >> 4) & 1;
325 #elif defined(TARGET_ALPHA)
326 #elif defined(TARGET_ARM)
327 #elif defined(TARGET_PPC)
328 #elif defined(TARGET_MIPS)
329 #elif defined(TARGET_SH4)
330 #elif defined(TARGET_CRIS)
333 #error unsupported target CPU
335 env->exception_index = -1;
337 /* prepare setjmp context for exception handling */
339 if (setjmp(env->jmp_env) == 0) {
340 env->current_tb = NULL;
341 /* if an exception is pending, we execute it here */
342 if (env->exception_index >= 0) {
343 if (env->exception_index >= EXCP_INTERRUPT) {
344 /* exit request from the cpu execution loop */
345 ret = env->exception_index;
347 } else if (env->user_mode_only) {
348 /* if user mode only, we simulate a fake exception
349 which will be handled outside the cpu execution
351 #if defined(TARGET_I386)
352 do_interrupt_user(env->exception_index,
353 env->exception_is_int,
355 env->exception_next_eip);
357 ret = env->exception_index;
360 #if defined(TARGET_I386)
361 /* simulate a real cpu exception. On i386, it can
362 trigger new exceptions, but we do not handle
363 double or triple faults yet. */
364 do_interrupt(env->exception_index,
365 env->exception_is_int,
367 env->exception_next_eip, 0);
368 /* successfully delivered */
369 env->old_exception = -1;
370 #elif defined(TARGET_PPC)
372 #elif defined(TARGET_MIPS)
374 #elif defined(TARGET_SPARC)
375 do_interrupt(env->exception_index);
376 #elif defined(TARGET_ARM)
378 #elif defined(TARGET_SH4)
380 #elif defined(TARGET_ALPHA)
382 #elif defined(TARGET_CRIS)
384 #elif defined(TARGET_M68K)
388 env->exception_index = -1;
391 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
393 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
394 ret = kqemu_cpu_exec(env);
395 /* put eflags in CPU temporary format */
396 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
397 DF = 1 - (2 * ((env->eflags >> 10) & 1));
398 CC_OP = CC_OP_EFLAGS;
399 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
402 longjmp(env->jmp_env, 1);
403 } else if (ret == 2) {
404 /* softmmu execution needed */
406 if (env->interrupt_request != 0) {
407 /* hardware interrupt will be executed just after */
409 /* otherwise, we restart */
410 longjmp(env->jmp_env, 1);
416 next_tb = 0; /* force lookup of first TB */
419 interrupt_request = env->interrupt_request;
420 if (__builtin_expect(interrupt_request, 0)
421 #if defined(TARGET_I386)
422 && env->hflags & HF_GIF_MASK
425 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
426 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
427 env->exception_index = EXCP_DEBUG;
430 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
431 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
432 if (interrupt_request & CPU_INTERRUPT_HALT) {
433 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
435 env->exception_index = EXCP_HLT;
439 #if defined(TARGET_I386)
440 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
441 !(env->hflags & HF_SMM_MASK)) {
442 svm_check_intercept(SVM_EXIT_SMI);
443 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
446 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
447 !(env->hflags & HF_NMI_MASK)) {
448 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
449 env->hflags |= HF_NMI_MASK;
450 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
452 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
453 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
454 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
456 svm_check_intercept(SVM_EXIT_INTR);
457 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
458 intno = cpu_get_pic_interrupt(env);
459 if (loglevel & CPU_LOG_TB_IN_ASM) {
460 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
462 do_interrupt(intno, 0, 0, 0, 1);
463 /* ensure that no TB jump will be modified as
464 the program flow was changed */
466 #if !defined(CONFIG_USER_ONLY)
467 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
468 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
470 /* FIXME: this should respect TPR */
471 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
472 svm_check_intercept(SVM_EXIT_VINTR);
473 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
474 if (loglevel & CPU_LOG_TB_IN_ASM)
475 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
476 do_interrupt(intno, 0, 0, -1, 1);
477 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
478 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
482 #elif defined(TARGET_PPC)
484 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
488 if (interrupt_request & CPU_INTERRUPT_HARD) {
489 ppc_hw_interrupt(env);
490 if (env->pending_interrupts == 0)
491 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
494 #elif defined(TARGET_MIPS)
495 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
496 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
497 (env->CP0_Status & (1 << CP0St_IE)) &&
498 !(env->CP0_Status & (1 << CP0St_EXL)) &&
499 !(env->CP0_Status & (1 << CP0St_ERL)) &&
500 !(env->hflags & MIPS_HFLAG_DM)) {
502 env->exception_index = EXCP_EXT_INTERRUPT;
507 #elif defined(TARGET_SPARC)
508 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
510 int pil = env->interrupt_index & 15;
511 int type = env->interrupt_index & 0xf0;
513 if (((type == TT_EXTINT) &&
514 (pil == 15 || pil > env->psrpil)) ||
516 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
517 do_interrupt(env->interrupt_index);
518 env->interrupt_index = 0;
519 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
524 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
525 //do_interrupt(0, 0, 0, 0, 0);
526 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
528 #elif defined(TARGET_ARM)
529 if (interrupt_request & CPU_INTERRUPT_FIQ
530 && !(env->uncached_cpsr & CPSR_F)) {
531 env->exception_index = EXCP_FIQ;
535 /* ARMv7-M interrupt return works by loading a magic value
536 into the PC. On real hardware the load causes the
537 return to occur. The qemu implementation performs the
538 jump normally, then does the exception return when the
539 CPU tries to execute code at the magic address.
540 This will cause the magic PC value to be pushed to
541 the stack if an interrupt occured at the wrong time.
542 We avoid this by disabling interrupts when
543 pc contains a magic address. */
544 if (interrupt_request & CPU_INTERRUPT_HARD
545 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
546 || !(env->uncached_cpsr & CPSR_I))) {
547 env->exception_index = EXCP_IRQ;
551 #elif defined(TARGET_SH4)
552 if (interrupt_request & CPU_INTERRUPT_HARD) {
556 #elif defined(TARGET_ALPHA)
557 if (interrupt_request & CPU_INTERRUPT_HARD) {
561 #elif defined(TARGET_CRIS)
562 if (interrupt_request & CPU_INTERRUPT_HARD) {
566 #elif defined(TARGET_M68K)
567 if (interrupt_request & CPU_INTERRUPT_HARD
568 && ((env->sr & SR_I) >> SR_I_SHIFT)
569 < env->pending_level) {
570 /* Real hardware gets the interrupt vector via an
571 IACK cycle at this point. Current emulated
572 hardware doesn't rely on this, so we
573 provide/save the vector when the interrupt is
575 env->exception_index = env->pending_vector;
580 /* Don't use the cached interupt_request value,
581 do_interrupt may have updated the EXITTB flag. */
582 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
583 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
584 /* ensure that no TB jump will be modified as
585 the program flow was changed */
588 if (interrupt_request & CPU_INTERRUPT_EXIT) {
589 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
590 env->exception_index = EXCP_INTERRUPT;
595 if ((loglevel & CPU_LOG_TB_CPU)) {
596 /* restore flags in standard format */
598 #if defined(TARGET_I386)
599 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
600 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
601 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
602 #elif defined(TARGET_ARM)
603 cpu_dump_state(env, logfile, fprintf, 0);
604 #elif defined(TARGET_SPARC)
605 REGWPTR = env->regbase + (env->cwp * 16);
606 env->regwptr = REGWPTR;
607 cpu_dump_state(env, logfile, fprintf, 0);
608 #elif defined(TARGET_PPC)
609 cpu_dump_state(env, logfile, fprintf, 0);
610 #elif defined(TARGET_M68K)
611 cpu_m68k_flush_flags(env, env->cc_op);
612 env->cc_op = CC_OP_FLAGS;
613 env->sr = (env->sr & 0xffe0)
614 | env->cc_dest | (env->cc_x << 4);
615 cpu_dump_state(env, logfile, fprintf, 0);
616 #elif defined(TARGET_MIPS)
617 cpu_dump_state(env, logfile, fprintf, 0);
618 #elif defined(TARGET_SH4)
619 cpu_dump_state(env, logfile, fprintf, 0);
620 #elif defined(TARGET_ALPHA)
621 cpu_dump_state(env, logfile, fprintf, 0);
622 #elif defined(TARGET_CRIS)
623 cpu_dump_state(env, logfile, fprintf, 0);
625 #error unsupported target CPU
631 if ((loglevel & CPU_LOG_EXEC)) {
632 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
633 (long)tb->tc_ptr, tb->pc,
634 lookup_symbol(tb->pc));
638 /* see if we can patch the calling TB. When the TB
639 spans two pages, we cannot safely do a direct
644 (env->kqemu_enabled != 2) &&
646 tb->page_addr[1] == -1) {
648 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
649 spin_unlock(&tb_lock);
653 env->current_tb = tb;
654 /* execute the generated code */
655 gen_func = (void *)tc_ptr;
656 #if defined(__sparc__)
657 __asm__ __volatile__("call %0\n\t"
661 : "i0", "i1", "i2", "i3", "i4", "i5",
662 "o0", "o1", "o2", "o3", "o4", "o5",
663 "l0", "l1", "l2", "l3", "l4", "l5",
665 #elif defined(__hppa__)
666 asm volatile ("ble 0(%%sr4,%1)\n"
671 : "r1", "r2", "r3", "r4", "r5", "r6", "r7",
672 "r8", "r9", "r10", "r11", "r12", "r13",
673 "r18", "r19", "r20", "r21", "r22", "r23",
674 "r24", "r25", "r26", "r27", "r28", "r29",
676 #elif defined(__arm__)
677 asm volatile ("mov pc, %0\n\t"
678 ".global exec_loop\n\t"
682 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
683 #elif defined(__ia64)
690 fp.gp = code_gen_buffer + 2 * (1 << 20);
691 (*(void (*)(void)) &fp)();
692 #elif defined(__i386)
693 asm volatile ("sub $12, %%esp\n\t"
700 : "ebx", "ecx", "edx", "esi", "edi", "cc",
702 #elif defined(__x86_64__)
703 asm volatile ("sub $8, %%rsp\n\t"
710 : "rbx", "rcx", "rdx", "rsi", "rdi", "r8", "r9",
711 "r10", "r11", "r12", "r13", "r14", "r15", "cc",
714 next_tb = gen_func();
716 env->current_tb = NULL;
717 /* reset soft MMU for next block (it can currently
718 only be set by a memory fault) */
719 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
720 if (env->hflags & HF_SOFTMMU_MASK) {
721 env->hflags &= ~HF_SOFTMMU_MASK;
722 /* do not allow linking to another block */
726 #if defined(USE_KQEMU)
727 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
728 if (kqemu_is_ok(env) &&
729 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
740 #if defined(TARGET_I386)
741 /* restore flags in standard format */
742 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
743 #elif defined(TARGET_ARM)
744 /* XXX: Save/restore host fpu exception state?. */
745 #elif defined(TARGET_SPARC)
746 #if defined(reg_REGWPTR)
747 REGWPTR = saved_regwptr;
749 #elif defined(TARGET_PPC)
750 #elif defined(TARGET_M68K)
751 cpu_m68k_flush_flags(env, env->cc_op);
752 env->cc_op = CC_OP_FLAGS;
753 env->sr = (env->sr & 0xffe0)
754 | env->cc_dest | (env->cc_x << 4);
755 #elif defined(TARGET_MIPS)
756 #elif defined(TARGET_SH4)
757 #elif defined(TARGET_ALPHA)
758 #elif defined(TARGET_CRIS)
761 #error unsupported target CPU
764 /* restore global registers */
766 #include "hostregs_helper.h"
768 /* fail safe : never use cpu_single_env outside cpu_exec() */
769 cpu_single_env = NULL;
773 /* must only be called from the generated code as an exception can be
775 void tb_invalidate_page_range(target_ulong start, target_ulong end)
777 /* XXX: cannot enable it yet because it yields to MMU exception
778 where NIP != read address on PowerPC */
780 target_ulong phys_addr;
781 phys_addr = get_phys_addr_code(env, start);
782 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
786 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
788 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
790 CPUX86State *saved_env;
794 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
796 cpu_x86_load_seg_cache(env, seg_reg, selector,
797 (selector << 4), 0xffff, 0);
799 load_seg(seg_reg, selector);
804 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
806 CPUX86State *saved_env;
811 helper_fsave(ptr, data32);
816 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
818 CPUX86State *saved_env;
823 helper_frstor(ptr, data32);
828 #endif /* TARGET_I386 */
830 #if !defined(CONFIG_SOFTMMU)
832 #if defined(TARGET_I386)
834 /* 'pc' is the host PC at which the exception was raised. 'address' is
835 the effective address of the memory exception. 'is_write' is 1 if a
836 write caused the exception and otherwise 0'. 'old_set' is the
837 signal set which should be restored */
838 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
839 int is_write, sigset_t *old_set,
842 TranslationBlock *tb;
846 env = cpu_single_env; /* XXX: find a correct solution for multithread */
847 #if defined(DEBUG_SIGNAL)
848 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
849 pc, address, is_write, *(unsigned long *)old_set);
851 /* XXX: locking issue */
852 if (is_write && page_unprotect(h2g(address), pc, puc)) {
856 /* see if it is an MMU fault */
857 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
859 return 0; /* not an MMU fault */
861 return 1; /* the MMU fault was handled without causing real CPU fault */
862 /* now we have a real cpu fault */
865 /* the PC is inside the translated code. It means that we have
866 a virtual CPU fault */
867 cpu_restore_state(tb, env, pc, puc);
871 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
872 env->eip, env->cr[2], env->error_code);
874 /* we restore the process signal mask as the sigreturn should
875 do it (XXX: use sigsetjmp) */
876 sigprocmask(SIG_SETMASK, old_set, NULL);
877 raise_exception_err(env->exception_index, env->error_code);
879 /* activate soft MMU for this block */
880 env->hflags |= HF_SOFTMMU_MASK;
881 cpu_resume_from_signal(env, puc);
883 /* never comes here */
887 #elif defined(TARGET_ARM)
888 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
889 int is_write, sigset_t *old_set,
892 TranslationBlock *tb;
896 env = cpu_single_env; /* XXX: find a correct solution for multithread */
897 #if defined(DEBUG_SIGNAL)
898 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
899 pc, address, is_write, *(unsigned long *)old_set);
901 /* XXX: locking issue */
902 if (is_write && page_unprotect(h2g(address), pc, puc)) {
905 /* see if it is an MMU fault */
906 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
908 return 0; /* not an MMU fault */
910 return 1; /* the MMU fault was handled without causing real CPU fault */
911 /* now we have a real cpu fault */
914 /* the PC is inside the translated code. It means that we have
915 a virtual CPU fault */
916 cpu_restore_state(tb, env, pc, puc);
918 /* we restore the process signal mask as the sigreturn should
919 do it (XXX: use sigsetjmp) */
920 sigprocmask(SIG_SETMASK, old_set, NULL);
922 /* never comes here */
925 #elif defined(TARGET_SPARC)
926 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
927 int is_write, sigset_t *old_set,
930 TranslationBlock *tb;
934 env = cpu_single_env; /* XXX: find a correct solution for multithread */
935 #if defined(DEBUG_SIGNAL)
936 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
937 pc, address, is_write, *(unsigned long *)old_set);
939 /* XXX: locking issue */
940 if (is_write && page_unprotect(h2g(address), pc, puc)) {
943 /* see if it is an MMU fault */
944 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
946 return 0; /* not an MMU fault */
948 return 1; /* the MMU fault was handled without causing real CPU fault */
949 /* now we have a real cpu fault */
952 /* the PC is inside the translated code. It means that we have
953 a virtual CPU fault */
954 cpu_restore_state(tb, env, pc, puc);
956 /* we restore the process signal mask as the sigreturn should
957 do it (XXX: use sigsetjmp) */
958 sigprocmask(SIG_SETMASK, old_set, NULL);
960 /* never comes here */
963 #elif defined (TARGET_PPC)
964 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
965 int is_write, sigset_t *old_set,
968 TranslationBlock *tb;
972 env = cpu_single_env; /* XXX: find a correct solution for multithread */
973 #if defined(DEBUG_SIGNAL)
974 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
975 pc, address, is_write, *(unsigned long *)old_set);
977 /* XXX: locking issue */
978 if (is_write && page_unprotect(h2g(address), pc, puc)) {
982 /* see if it is an MMU fault */
983 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
985 return 0; /* not an MMU fault */
987 return 1; /* the MMU fault was handled without causing real CPU fault */
989 /* now we have a real cpu fault */
992 /* the PC is inside the translated code. It means that we have
993 a virtual CPU fault */
994 cpu_restore_state(tb, env, pc, puc);
998 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
999 env->nip, env->error_code, tb);
1001 /* we restore the process signal mask as the sigreturn should
1002 do it (XXX: use sigsetjmp) */
1003 sigprocmask(SIG_SETMASK, old_set, NULL);
1004 do_raise_exception_err(env->exception_index, env->error_code);
1006 /* activate soft MMU for this block */
1007 cpu_resume_from_signal(env, puc);
1009 /* never comes here */
1013 #elif defined(TARGET_M68K)
1014 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1015 int is_write, sigset_t *old_set,
1018 TranslationBlock *tb;
1022 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1023 #if defined(DEBUG_SIGNAL)
1024 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1025 pc, address, is_write, *(unsigned long *)old_set);
1027 /* XXX: locking issue */
1028 if (is_write && page_unprotect(address, pc, puc)) {
1031 /* see if it is an MMU fault */
1032 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1034 return 0; /* not an MMU fault */
1036 return 1; /* the MMU fault was handled without causing real CPU fault */
1037 /* now we have a real cpu fault */
1038 tb = tb_find_pc(pc);
1040 /* the PC is inside the translated code. It means that we have
1041 a virtual CPU fault */
1042 cpu_restore_state(tb, env, pc, puc);
1044 /* we restore the process signal mask as the sigreturn should
1045 do it (XXX: use sigsetjmp) */
1046 sigprocmask(SIG_SETMASK, old_set, NULL);
1048 /* never comes here */
1052 #elif defined (TARGET_MIPS)
1053 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1054 int is_write, sigset_t *old_set,
1057 TranslationBlock *tb;
1061 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1062 #if defined(DEBUG_SIGNAL)
1063 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1064 pc, address, is_write, *(unsigned long *)old_set);
1066 /* XXX: locking issue */
1067 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1071 /* see if it is an MMU fault */
1072 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1074 return 0; /* not an MMU fault */
1076 return 1; /* the MMU fault was handled without causing real CPU fault */
1078 /* now we have a real cpu fault */
1079 tb = tb_find_pc(pc);
1081 /* the PC is inside the translated code. It means that we have
1082 a virtual CPU fault */
1083 cpu_restore_state(tb, env, pc, puc);
1087 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1088 env->PC, env->error_code, tb);
1090 /* we restore the process signal mask as the sigreturn should
1091 do it (XXX: use sigsetjmp) */
1092 sigprocmask(SIG_SETMASK, old_set, NULL);
1093 do_raise_exception_err(env->exception_index, env->error_code);
1095 /* activate soft MMU for this block */
1096 cpu_resume_from_signal(env, puc);
1098 /* never comes here */
1102 #elif defined (TARGET_SH4)
1103 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1104 int is_write, sigset_t *old_set,
1107 TranslationBlock *tb;
1111 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1112 #if defined(DEBUG_SIGNAL)
1113 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1114 pc, address, is_write, *(unsigned long *)old_set);
1116 /* XXX: locking issue */
1117 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1121 /* see if it is an MMU fault */
1122 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1124 return 0; /* not an MMU fault */
1126 return 1; /* the MMU fault was handled without causing real CPU fault */
1128 /* now we have a real cpu fault */
1129 tb = tb_find_pc(pc);
1131 /* the PC is inside the translated code. It means that we have
1132 a virtual CPU fault */
1133 cpu_restore_state(tb, env, pc, puc);
1136 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1137 env->nip, env->error_code, tb);
1139 /* we restore the process signal mask as the sigreturn should
1140 do it (XXX: use sigsetjmp) */
1141 sigprocmask(SIG_SETMASK, old_set, NULL);
1143 /* never comes here */
1147 #elif defined (TARGET_ALPHA)
1148 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1149 int is_write, sigset_t *old_set,
1152 TranslationBlock *tb;
1156 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1157 #if defined(DEBUG_SIGNAL)
1158 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1159 pc, address, is_write, *(unsigned long *)old_set);
1161 /* XXX: locking issue */
1162 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1166 /* see if it is an MMU fault */
1167 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1169 return 0; /* not an MMU fault */
1171 return 1; /* the MMU fault was handled without causing real CPU fault */
1173 /* now we have a real cpu fault */
1174 tb = tb_find_pc(pc);
1176 /* the PC is inside the translated code. It means that we have
1177 a virtual CPU fault */
1178 cpu_restore_state(tb, env, pc, puc);
1181 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1182 env->nip, env->error_code, tb);
1184 /* we restore the process signal mask as the sigreturn should
1185 do it (XXX: use sigsetjmp) */
1186 sigprocmask(SIG_SETMASK, old_set, NULL);
1188 /* never comes here */
1191 #elif defined (TARGET_CRIS)
1192 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1193 int is_write, sigset_t *old_set,
1196 TranslationBlock *tb;
1200 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1201 #if defined(DEBUG_SIGNAL)
1202 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1203 pc, address, is_write, *(unsigned long *)old_set);
1205 /* XXX: locking issue */
1206 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1210 /* see if it is an MMU fault */
1211 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1213 return 0; /* not an MMU fault */
1215 return 1; /* the MMU fault was handled without causing real CPU fault */
1217 /* now we have a real cpu fault */
1218 tb = tb_find_pc(pc);
1220 /* the PC is inside the translated code. It means that we have
1221 a virtual CPU fault */
1222 cpu_restore_state(tb, env, pc, puc);
1224 /* we restore the process signal mask as the sigreturn should
1225 do it (XXX: use sigsetjmp) */
1226 sigprocmask(SIG_SETMASK, old_set, NULL);
1228 /* never comes here */
1233 #error unsupported target CPU
1236 #if defined(__i386__)
1238 #if defined(__APPLE__)
1239 # include <sys/ucontext.h>
1241 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1242 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1243 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1245 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1246 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1247 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1250 int cpu_signal_handler(int host_signum, void *pinfo,
1253 siginfo_t *info = pinfo;
1254 struct ucontext *uc = puc;
1262 #define REG_TRAPNO TRAPNO
1265 trapno = TRAP_sig(uc);
1266 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1268 (ERROR_sig(uc) >> 1) & 1 : 0,
1269 &uc->uc_sigmask, puc);
1272 #elif defined(__x86_64__)
1274 int cpu_signal_handler(int host_signum, void *pinfo,
1277 siginfo_t *info = pinfo;
1278 struct ucontext *uc = puc;
1281 pc = uc->uc_mcontext.gregs[REG_RIP];
1282 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1283 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1284 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1285 &uc->uc_sigmask, puc);
1288 #elif defined(__powerpc__)
1290 /***********************************************************************
1291 * signal context platform-specific definitions
1295 /* All Registers access - only for local access */
1296 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1297 /* Gpr Registers access */
1298 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1299 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1300 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1301 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1302 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1303 # define LR_sig(context) REG_sig(link, context) /* Link register */
1304 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1305 /* Float Registers access */
1306 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1307 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1308 /* Exception Registers access */
1309 # define DAR_sig(context) REG_sig(dar, context)
1310 # define DSISR_sig(context) REG_sig(dsisr, context)
1311 # define TRAP_sig(context) REG_sig(trap, context)
1315 # include <sys/ucontext.h>
1316 typedef struct ucontext SIGCONTEXT;
1317 /* All Registers access - only for local access */
1318 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1319 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1320 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1321 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1322 /* Gpr Registers access */
1323 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1324 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1325 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1326 # define CTR_sig(context) REG_sig(ctr, context)
1327 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1328 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1329 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1330 /* Float Registers access */
1331 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1332 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1333 /* Exception Registers access */
1334 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1335 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1336 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1337 #endif /* __APPLE__ */
1339 int cpu_signal_handler(int host_signum, void *pinfo,
1342 siginfo_t *info = pinfo;
1343 struct ucontext *uc = puc;
1351 if (DSISR_sig(uc) & 0x00800000)
1354 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1357 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1358 is_write, &uc->uc_sigmask, puc);
1361 #elif defined(__alpha__)
1363 int cpu_signal_handler(int host_signum, void *pinfo,
1366 siginfo_t *info = pinfo;
1367 struct ucontext *uc = puc;
1368 uint32_t *pc = uc->uc_mcontext.sc_pc;
1369 uint32_t insn = *pc;
1372 /* XXX: need kernel patch to get write flag faster */
1373 switch (insn >> 26) {
1388 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1389 is_write, &uc->uc_sigmask, puc);
1391 #elif defined(__sparc__)
1393 int cpu_signal_handler(int host_signum, void *pinfo,
1396 siginfo_t *info = pinfo;
1397 uint32_t *regs = (uint32_t *)(info + 1);
1398 void *sigmask = (regs + 20);
1403 /* XXX: is there a standard glibc define ? */
1405 /* XXX: need kernel patch to get write flag faster */
1407 insn = *(uint32_t *)pc;
1408 if ((insn >> 30) == 3) {
1409 switch((insn >> 19) & 0x3f) {
1421 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1422 is_write, sigmask, NULL);
1425 #elif defined(__arm__)
1427 int cpu_signal_handler(int host_signum, void *pinfo,
1430 siginfo_t *info = pinfo;
1431 struct ucontext *uc = puc;
1435 pc = uc->uc_mcontext.gregs[R15];
1436 /* XXX: compute is_write */
1438 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1440 &uc->uc_sigmask, puc);
1443 #elif defined(__mc68000)
1445 int cpu_signal_handler(int host_signum, void *pinfo,
1448 siginfo_t *info = pinfo;
1449 struct ucontext *uc = puc;
1453 pc = uc->uc_mcontext.gregs[16];
1454 /* XXX: compute is_write */
1456 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1458 &uc->uc_sigmask, puc);
1461 #elif defined(__ia64)
1464 /* This ought to be in <bits/siginfo.h>... */
1465 # define __ISR_VALID 1
1468 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1470 siginfo_t *info = pinfo;
1471 struct ucontext *uc = puc;
1475 ip = uc->uc_mcontext.sc_ip;
1476 switch (host_signum) {
1482 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1483 /* ISR.W (write-access) is bit 33: */
1484 is_write = (info->si_isr >> 33) & 1;
1490 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1492 &uc->uc_sigmask, puc);
1495 #elif defined(__s390__)
1497 int cpu_signal_handler(int host_signum, void *pinfo,
1500 siginfo_t *info = pinfo;
1501 struct ucontext *uc = puc;
1505 pc = uc->uc_mcontext.psw.addr;
1506 /* XXX: compute is_write */
1508 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1509 is_write, &uc->uc_sigmask, puc);
1512 #elif defined(__mips__)
1514 int cpu_signal_handler(int host_signum, void *pinfo,
1517 siginfo_t *info = pinfo;
1518 struct ucontext *uc = puc;
1519 greg_t pc = uc->uc_mcontext.pc;
1522 /* XXX: compute is_write */
1524 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1525 is_write, &uc->uc_sigmask, puc);
1528 #elif defined(__hppa__)
1530 int cpu_signal_handler(int host_signum, void *pinfo,
1533 struct siginfo *info = pinfo;
1534 struct ucontext *uc = puc;
1538 pc = uc->uc_mcontext.sc_iaoq[0];
1539 /* FIXME: compute is_write */
1541 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1543 &uc->uc_sigmask, puc);
1548 #error host CPU specific signal handler needed
1552 #endif /* !defined(CONFIG_SOFTMMU) */