2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #if !defined(CONFIG_SOFTMMU)
36 #include <sys/ucontext.h>
39 int tb_invalidated_flag;
42 //#define DEBUG_SIGNAL
44 /* translation settings */
45 int translation_settings = 0;
47 #define SAVE_GLOBALS()
48 #define RESTORE_GLOBALS()
50 #if defined(__sparc__) && !defined(HOST_SOLARIS)
52 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
53 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
54 // Work around ugly bugs in glibc that mangle global register contents
56 static volatile void *saved_env;
57 static volatile unsigned long saved_t0, saved_i7;
59 #define SAVE_GLOBALS() do { \
62 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
65 #undef RESTORE_GLOBALS
66 #define RESTORE_GLOBALS() do { \
67 env = (void *)saved_env; \
69 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
72 static int sparc_setjmp(jmp_buf buf)
82 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
84 static void sparc_longjmp(jmp_buf buf, int val)
89 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
93 void cpu_loop_exit(void)
95 /* NOTE: the register at this point must be saved by hand because
96 longjmp restore them */
98 longjmp(env->jmp_env, 1);
101 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
105 /* exit the current TB from a signal handler. The host registers are
106 restored in a state compatible with the CPU emulator
108 void cpu_resume_from_signal(CPUState *env1, void *puc)
110 #if !defined(CONFIG_SOFTMMU)
111 struct ucontext *uc = puc;
116 /* XXX: restore cpu registers saved in host registers */
118 #if !defined(CONFIG_SOFTMMU)
120 /* XXX: use siglongjmp ? */
121 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
124 longjmp(env->jmp_env, 1);
127 CPUTranslationSetting cpu_translation_settings[] = {
128 { CPU_SETTING_NO_CACHE, "no-cache",
129 "Do not use translation blocks cache (very slow!)" },
133 void cpu_set_translation_settings(int translation_flags)
135 translation_settings = translation_flags;
138 static int cmp1(const char *s1, int n, const char *s2)
142 return memcmp(s1, s2, n) == 0;
145 /* takes a comma separated list of translation settings. Return 0 if error. */
146 int cpu_str_to_translation_mask(const char *str)
148 CPUTranslationSetting *setting;
158 if(cmp1(p,p1-p,"all")) {
159 for(setting = cpu_translation_settings; setting->mask != 0; setting++) {
160 mask |= setting->mask;
163 for(setting = cpu_translation_settings; setting->mask != 0; setting++) {
164 if (cmp1(p, p1 - p, setting->name))
170 mask |= setting->mask;
178 static TranslationBlock *tb_find_slow(target_ulong pc,
179 target_ulong cs_base,
182 TranslationBlock *tb, **ptb1;
185 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
190 tb_invalidated_flag = 0;
192 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
194 /* find translated block using physical mappings */
195 phys_pc = get_phys_addr_code(env, pc);
196 phys_page1 = phys_pc & TARGET_PAGE_MASK;
198 if (translation_settings & CPU_SETTING_NO_CACHE)
201 h = tb_phys_hash_func(phys_pc);
202 ptb1 = &tb_phys_hash[h];
208 tb->page_addr[0] == phys_page1 &&
209 tb->cs_base == cs_base &&
210 tb->flags == flags) {
211 /* check next page if needed */
212 if (tb->page_addr[1] != -1) {
213 virt_page2 = (pc & TARGET_PAGE_MASK) +
215 phys_page2 = get_phys_addr_code(env, virt_page2);
216 if (tb->page_addr[1] == phys_page2)
222 ptb1 = &tb->phys_hash_next;
225 /* if no translated code available, then translate it now */
228 /* flush must be done */
230 /* cannot fail at this point */
232 /* don't forget to invalidate previous TB info */
233 tb_invalidated_flag = 1;
235 tc_ptr = code_gen_ptr;
237 tb->cs_base = cs_base;
240 cpu_gen_code(env, tb, &code_gen_size);
242 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
244 /* check next page if needed */
245 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
247 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
248 phys_page2 = get_phys_addr_code(env, virt_page2);
250 tb_link_phys(tb, phys_pc, phys_page2);
253 /* we add the TB in the virtual pc hash table */
254 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
255 spin_unlock(&tb_lock);
259 static inline TranslationBlock *tb_find_fast(void)
261 TranslationBlock *tb;
262 target_ulong cs_base, pc;
265 /* we record a subset of the CPU state. It will
266 always be the same before a given translated block
268 #if defined(TARGET_I386)
270 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
271 flags |= env->intercept;
272 cs_base = env->segs[R_CS].base;
273 pc = cs_base + env->eip;
274 #elif defined(TARGET_ARM)
275 flags = env->thumb | (env->vfp.vec_len << 1)
276 | (env->vfp.vec_stride << 4);
277 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
279 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
281 flags |= (env->condexec_bits << 8);
284 #elif defined(TARGET_SPARC)
285 #ifdef TARGET_SPARC64
286 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
287 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
288 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
290 // FPU enable . Supervisor
291 flags = (env->psref << 4) | env->psrs;
295 #elif defined(TARGET_PPC)
299 #elif defined(TARGET_MIPS)
300 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
302 pc = env->PC[env->current_tc];
303 #elif defined(TARGET_M68K)
304 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
305 | (env->sr & SR_S) /* Bit 13 */
306 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
309 #elif defined(TARGET_SH4)
313 #elif defined(TARGET_ALPHA)
317 #elif defined(TARGET_CRIS)
322 #error unsupported CPU
324 if (translation_settings & CPU_SETTING_NO_CACHE)
327 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
328 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
329 tb->flags != flags, 0)) {
330 tb = tb_find_slow(pc, cs_base, flags);
331 /* Note: we do it here to avoid a gcc bug on Mac OS X when
332 doing it in tb_find_slow */
333 if (tb_invalidated_flag) {
334 /* as some TB could have been invalidated because
335 of memory exceptions while generating the code, we
336 must recompute the hash index here */
343 #define BREAK_CHAIN T0 = 0
345 /* main execution loop */
347 int cpu_exec(CPUState *env1)
349 #define DECLARE_HOST_REGS 1
350 #include "hostregs_helper.h"
351 #if defined(TARGET_SPARC)
352 #if defined(reg_REGWPTR)
353 uint32_t *saved_regwptr;
356 int ret, interrupt_request;
357 long (*gen_func)(void);
358 TranslationBlock *tb;
361 if (cpu_halted(env1) == EXCP_HALTED)
364 cpu_single_env = env1;
366 /* first we save global registers */
367 #define SAVE_HOST_REGS 1
368 #include "hostregs_helper.h"
373 #if defined(TARGET_I386)
374 /* put eflags in CPU temporary format */
375 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
376 DF = 1 - (2 * ((env->eflags >> 10) & 1));
377 CC_OP = CC_OP_EFLAGS;
378 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
379 #elif defined(TARGET_SPARC)
380 #if defined(reg_REGWPTR)
381 saved_regwptr = REGWPTR;
383 #elif defined(TARGET_M68K)
384 env->cc_op = CC_OP_FLAGS;
385 env->cc_dest = env->sr & 0xf;
386 env->cc_x = (env->sr >> 4) & 1;
387 #elif defined(TARGET_ALPHA)
388 #elif defined(TARGET_ARM)
389 #elif defined(TARGET_PPC)
390 #elif defined(TARGET_MIPS)
391 #elif defined(TARGET_SH4)
392 #elif defined(TARGET_CRIS)
395 #error unsupported target CPU
397 env->exception_index = -1;
399 /* prepare setjmp context for exception handling */
401 if (setjmp(env->jmp_env) == 0) {
402 env->current_tb = NULL;
403 /* if an exception is pending, we execute it here */
404 if (env->exception_index >= 0) {
405 if (env->exception_index >= EXCP_INTERRUPT) {
406 /* exit request from the cpu execution loop */
407 ret = env->exception_index;
409 } else if (env->user_mode_only) {
410 /* if user mode only, we simulate a fake exception
411 which will be handled outside the cpu execution
413 #if defined(TARGET_I386)
414 do_interrupt_user(env->exception_index,
415 env->exception_is_int,
417 env->exception_next_eip);
419 ret = env->exception_index;
422 #if defined(TARGET_I386)
423 /* simulate a real cpu exception. On i386, it can
424 trigger new exceptions, but we do not handle
425 double or triple faults yet. */
426 do_interrupt(env->exception_index,
427 env->exception_is_int,
429 env->exception_next_eip, 0);
430 /* successfully delivered */
431 env->old_exception = -1;
432 #elif defined(TARGET_PPC)
434 #elif defined(TARGET_MIPS)
436 #elif defined(TARGET_SPARC)
437 do_interrupt(env->exception_index);
438 #elif defined(TARGET_ARM)
440 #elif defined(TARGET_SH4)
442 #elif defined(TARGET_ALPHA)
444 #elif defined(TARGET_CRIS)
446 #elif defined(TARGET_M68K)
450 env->exception_index = -1;
453 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
455 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
456 ret = kqemu_cpu_exec(env);
457 /* put eflags in CPU temporary format */
458 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
459 DF = 1 - (2 * ((env->eflags >> 10) & 1));
460 CC_OP = CC_OP_EFLAGS;
461 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
464 longjmp(env->jmp_env, 1);
465 } else if (ret == 2) {
466 /* softmmu execution needed */
468 if (env->interrupt_request != 0) {
469 /* hardware interrupt will be executed just after */
471 /* otherwise, we restart */
472 longjmp(env->jmp_env, 1);
478 T0 = 0; /* force lookup of first TB */
481 interrupt_request = env->interrupt_request;
482 if (__builtin_expect(interrupt_request, 0)
483 #if defined(TARGET_I386)
484 && env->hflags & HF_GIF_MASK
487 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
488 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
489 env->exception_index = EXCP_DEBUG;
492 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
493 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
494 if (interrupt_request & CPU_INTERRUPT_HALT) {
495 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
497 env->exception_index = EXCP_HLT;
501 #if defined(TARGET_I386)
502 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
503 !(env->hflags & HF_SMM_MASK)) {
504 svm_check_intercept(SVM_EXIT_SMI);
505 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
508 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
509 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
510 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
512 svm_check_intercept(SVM_EXIT_INTR);
513 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
514 intno = cpu_get_pic_interrupt(env);
515 if (loglevel & CPU_LOG_TB_IN_ASM) {
516 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
518 do_interrupt(intno, 0, 0, 0, 1);
519 /* ensure that no TB jump will be modified as
520 the program flow was changed */
522 #if !defined(CONFIG_USER_ONLY)
523 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
524 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
526 /* FIXME: this should respect TPR */
527 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
528 svm_check_intercept(SVM_EXIT_VINTR);
529 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
530 if (loglevel & CPU_LOG_TB_IN_ASM)
531 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
532 do_interrupt(intno, 0, 0, -1, 1);
533 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
534 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
538 #elif defined(TARGET_PPC)
540 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
544 if (interrupt_request & CPU_INTERRUPT_HARD) {
545 ppc_hw_interrupt(env);
546 if (env->pending_interrupts == 0)
547 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
550 #elif defined(TARGET_MIPS)
551 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
552 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
553 (env->CP0_Status & (1 << CP0St_IE)) &&
554 !(env->CP0_Status & (1 << CP0St_EXL)) &&
555 !(env->CP0_Status & (1 << CP0St_ERL)) &&
556 !(env->hflags & MIPS_HFLAG_DM)) {
558 env->exception_index = EXCP_EXT_INTERRUPT;
563 #elif defined(TARGET_SPARC)
564 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
566 int pil = env->interrupt_index & 15;
567 int type = env->interrupt_index & 0xf0;
569 if (((type == TT_EXTINT) &&
570 (pil == 15 || pil > env->psrpil)) ||
572 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
573 do_interrupt(env->interrupt_index);
574 env->interrupt_index = 0;
575 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
580 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
581 //do_interrupt(0, 0, 0, 0, 0);
582 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
584 #elif defined(TARGET_ARM)
585 if (interrupt_request & CPU_INTERRUPT_FIQ
586 && !(env->uncached_cpsr & CPSR_F)) {
587 env->exception_index = EXCP_FIQ;
591 /* ARMv7-M interrupt return works by loading a magic value
592 into the PC. On real hardware the load causes the
593 return to occur. The qemu implementation performs the
594 jump normally, then does the exception return when the
595 CPU tries to execute code at the magic address.
596 This will cause the magic PC value to be pushed to
597 the stack if an interrupt occured at the wrong time.
598 We avoid this by disabling interrupts when
599 pc contains a magic address. */
600 if (interrupt_request & CPU_INTERRUPT_HARD
601 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
602 || !(env->uncached_cpsr & CPSR_I))) {
603 env->exception_index = EXCP_IRQ;
607 #elif defined(TARGET_SH4)
608 if (interrupt_request & CPU_INTERRUPT_HARD) {
612 #elif defined(TARGET_ALPHA)
613 if (interrupt_request & CPU_INTERRUPT_HARD) {
617 #elif defined(TARGET_CRIS)
618 if (interrupt_request & CPU_INTERRUPT_HARD) {
620 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
623 #elif defined(TARGET_M68K)
624 if (interrupt_request & CPU_INTERRUPT_HARD
625 && ((env->sr & SR_I) >> SR_I_SHIFT)
626 < env->pending_level) {
627 /* Real hardware gets the interrupt vector via an
628 IACK cycle at this point. Current emulated
629 hardware doesn't rely on this, so we
630 provide/save the vector when the interrupt is
632 env->exception_index = env->pending_vector;
637 /* Don't use the cached interupt_request value,
638 do_interrupt may have updated the EXITTB flag. */
639 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
640 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
641 /* ensure that no TB jump will be modified as
642 the program flow was changed */
645 if (interrupt_request & CPU_INTERRUPT_EXIT) {
646 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
647 env->exception_index = EXCP_INTERRUPT;
652 if ((loglevel & CPU_LOG_TB_CPU)) {
653 /* restore flags in standard format */
655 #if defined(TARGET_I386)
656 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
657 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
658 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
659 #elif defined(TARGET_ARM)
660 cpu_dump_state(env, logfile, fprintf, 0);
661 #elif defined(TARGET_SPARC)
662 REGWPTR = env->regbase + (env->cwp * 16);
663 env->regwptr = REGWPTR;
664 cpu_dump_state(env, logfile, fprintf, 0);
665 #elif defined(TARGET_PPC)
666 cpu_dump_state(env, logfile, fprintf, 0);
667 #elif defined(TARGET_M68K)
668 cpu_m68k_flush_flags(env, env->cc_op);
669 env->cc_op = CC_OP_FLAGS;
670 env->sr = (env->sr & 0xffe0)
671 | env->cc_dest | (env->cc_x << 4);
672 cpu_dump_state(env, logfile, fprintf, 0);
673 #elif defined(TARGET_MIPS)
674 cpu_dump_state(env, logfile, fprintf, 0);
675 #elif defined(TARGET_SH4)
676 cpu_dump_state(env, logfile, fprintf, 0);
677 #elif defined(TARGET_ALPHA)
678 cpu_dump_state(env, logfile, fprintf, 0);
679 #elif defined(TARGET_CRIS)
680 cpu_dump_state(env, logfile, fprintf, 0);
682 #error unsupported target CPU
688 if ((loglevel & CPU_LOG_EXEC)) {
689 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
690 (long)tb->tc_ptr, tb->pc,
691 lookup_symbol(tb->pc));
695 /* see if we can patch the calling TB. When the TB
696 spans two pages, we cannot safely do a direct
701 (env->kqemu_enabled != 2) &&
703 tb->page_addr[1] == -1) {
705 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
706 spin_unlock(&tb_lock);
710 env->current_tb = tb;
711 /* execute the generated code */
712 gen_func = (void *)tc_ptr;
713 #if defined(__sparc__)
714 __asm__ __volatile__("call %0\n\t"
718 : "i0", "i1", "i2", "i3", "i4", "i5",
719 "o0", "o1", "o2", "o3", "o4", "o5",
720 "l0", "l1", "l2", "l3", "l4", "l5",
722 #elif defined(__arm__)
723 asm volatile ("mov pc, %0\n\t"
724 ".global exec_loop\n\t"
728 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
729 #elif defined(__ia64)
736 fp.gp = code_gen_buffer + 2 * (1 << 20);
737 (*(void (*)(void)) &fp)();
741 env->current_tb = NULL;
742 /* reset soft MMU for next block (it can currently
743 only be set by a memory fault) */
744 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
745 if (env->hflags & HF_SOFTMMU_MASK) {
746 env->hflags &= ~HF_SOFTMMU_MASK;
747 /* do not allow linking to another block */
751 #if defined(USE_KQEMU)
752 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
753 if (kqemu_is_ok(env) &&
754 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
765 #if defined(TARGET_I386)
766 /* restore flags in standard format */
767 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
768 #elif defined(TARGET_ARM)
769 /* XXX: Save/restore host fpu exception state?. */
770 #elif defined(TARGET_SPARC)
771 #if defined(reg_REGWPTR)
772 REGWPTR = saved_regwptr;
774 #elif defined(TARGET_PPC)
775 #elif defined(TARGET_M68K)
776 cpu_m68k_flush_flags(env, env->cc_op);
777 env->cc_op = CC_OP_FLAGS;
778 env->sr = (env->sr & 0xffe0)
779 | env->cc_dest | (env->cc_x << 4);
780 #elif defined(TARGET_MIPS)
781 #elif defined(TARGET_SH4)
782 #elif defined(TARGET_ALPHA)
783 #elif defined(TARGET_CRIS)
786 #error unsupported target CPU
789 /* restore global registers */
791 #include "hostregs_helper.h"
793 /* fail safe : never use cpu_single_env outside cpu_exec() */
794 cpu_single_env = NULL;
798 /* must only be called from the generated code as an exception can be
800 void tb_invalidate_page_range(target_ulong start, target_ulong end)
802 /* XXX: cannot enable it yet because it yields to MMU exception
803 where NIP != read address on PowerPC */
805 target_ulong phys_addr;
806 phys_addr = get_phys_addr_code(env, start);
807 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
811 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
813 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
815 CPUX86State *saved_env;
819 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
821 cpu_x86_load_seg_cache(env, seg_reg, selector,
822 (selector << 4), 0xffff, 0);
824 load_seg(seg_reg, selector);
829 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
831 CPUX86State *saved_env;
836 helper_fsave(ptr, data32);
841 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
843 CPUX86State *saved_env;
848 helper_frstor(ptr, data32);
853 #endif /* TARGET_I386 */
855 #if !defined(CONFIG_SOFTMMU)
857 #if defined(TARGET_I386)
859 /* 'pc' is the host PC at which the exception was raised. 'address' is
860 the effective address of the memory exception. 'is_write' is 1 if a
861 write caused the exception and otherwise 0'. 'old_set' is the
862 signal set which should be restored */
863 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
864 int is_write, sigset_t *old_set,
867 TranslationBlock *tb;
871 env = cpu_single_env; /* XXX: find a correct solution for multithread */
872 #if defined(DEBUG_SIGNAL)
873 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
874 pc, address, is_write, *(unsigned long *)old_set);
876 /* XXX: locking issue */
877 if (is_write && page_unprotect(h2g(address), pc, puc)) {
881 /* see if it is an MMU fault */
882 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
884 return 0; /* not an MMU fault */
886 return 1; /* the MMU fault was handled without causing real CPU fault */
887 /* now we have a real cpu fault */
890 /* the PC is inside the translated code. It means that we have
891 a virtual CPU fault */
892 cpu_restore_state(tb, env, pc, puc);
896 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
897 env->eip, env->cr[2], env->error_code);
899 /* we restore the process signal mask as the sigreturn should
900 do it (XXX: use sigsetjmp) */
901 sigprocmask(SIG_SETMASK, old_set, NULL);
902 raise_exception_err(env->exception_index, env->error_code);
904 /* activate soft MMU for this block */
905 env->hflags |= HF_SOFTMMU_MASK;
906 cpu_resume_from_signal(env, puc);
908 /* never comes here */
912 #elif defined(TARGET_ARM)
913 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
914 int is_write, sigset_t *old_set,
917 TranslationBlock *tb;
921 env = cpu_single_env; /* XXX: find a correct solution for multithread */
922 #if defined(DEBUG_SIGNAL)
923 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
924 pc, address, is_write, *(unsigned long *)old_set);
926 /* XXX: locking issue */
927 if (is_write && page_unprotect(h2g(address), pc, puc)) {
930 /* see if it is an MMU fault */
931 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
933 return 0; /* not an MMU fault */
935 return 1; /* the MMU fault was handled without causing real CPU fault */
936 /* now we have a real cpu fault */
939 /* the PC is inside the translated code. It means that we have
940 a virtual CPU fault */
941 cpu_restore_state(tb, env, pc, puc);
943 /* we restore the process signal mask as the sigreturn should
944 do it (XXX: use sigsetjmp) */
945 sigprocmask(SIG_SETMASK, old_set, NULL);
948 #elif defined(TARGET_SPARC)
949 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
950 int is_write, sigset_t *old_set,
953 TranslationBlock *tb;
957 env = cpu_single_env; /* XXX: find a correct solution for multithread */
958 #if defined(DEBUG_SIGNAL)
959 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
960 pc, address, is_write, *(unsigned long *)old_set);
962 /* XXX: locking issue */
963 if (is_write && page_unprotect(h2g(address), pc, puc)) {
966 /* see if it is an MMU fault */
967 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
969 return 0; /* not an MMU fault */
971 return 1; /* the MMU fault was handled without causing real CPU fault */
972 /* now we have a real cpu fault */
975 /* the PC is inside the translated code. It means that we have
976 a virtual CPU fault */
977 cpu_restore_state(tb, env, pc, puc);
979 /* we restore the process signal mask as the sigreturn should
980 do it (XXX: use sigsetjmp) */
981 sigprocmask(SIG_SETMASK, old_set, NULL);
984 #elif defined (TARGET_PPC)
985 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
986 int is_write, sigset_t *old_set,
989 TranslationBlock *tb;
993 env = cpu_single_env; /* XXX: find a correct solution for multithread */
994 #if defined(DEBUG_SIGNAL)
995 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
996 pc, address, is_write, *(unsigned long *)old_set);
998 /* XXX: locking issue */
999 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1003 /* see if it is an MMU fault */
1004 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1006 return 0; /* not an MMU fault */
1008 return 1; /* the MMU fault was handled without causing real CPU fault */
1010 /* now we have a real cpu fault */
1011 tb = tb_find_pc(pc);
1013 /* the PC is inside the translated code. It means that we have
1014 a virtual CPU fault */
1015 cpu_restore_state(tb, env, pc, puc);
1019 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1020 env->nip, env->error_code, tb);
1022 /* we restore the process signal mask as the sigreturn should
1023 do it (XXX: use sigsetjmp) */
1024 sigprocmask(SIG_SETMASK, old_set, NULL);
1025 do_raise_exception_err(env->exception_index, env->error_code);
1027 /* activate soft MMU for this block */
1028 cpu_resume_from_signal(env, puc);
1030 /* never comes here */
1034 #elif defined(TARGET_M68K)
1035 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1036 int is_write, sigset_t *old_set,
1039 TranslationBlock *tb;
1043 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1044 #if defined(DEBUG_SIGNAL)
1045 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1046 pc, address, is_write, *(unsigned long *)old_set);
1048 /* XXX: locking issue */
1049 if (is_write && page_unprotect(address, pc, puc)) {
1052 /* see if it is an MMU fault */
1053 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1055 return 0; /* not an MMU fault */
1057 return 1; /* the MMU fault was handled without causing real CPU fault */
1058 /* now we have a real cpu fault */
1059 tb = tb_find_pc(pc);
1061 /* the PC is inside the translated code. It means that we have
1062 a virtual CPU fault */
1063 cpu_restore_state(tb, env, pc, puc);
1065 /* we restore the process signal mask as the sigreturn should
1066 do it (XXX: use sigsetjmp) */
1067 sigprocmask(SIG_SETMASK, old_set, NULL);
1069 /* never comes here */
1073 #elif defined (TARGET_MIPS)
1074 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1075 int is_write, sigset_t *old_set,
1078 TranslationBlock *tb;
1082 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1083 #if defined(DEBUG_SIGNAL)
1084 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1085 pc, address, is_write, *(unsigned long *)old_set);
1087 /* XXX: locking issue */
1088 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1092 /* see if it is an MMU fault */
1093 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1095 return 0; /* not an MMU fault */
1097 return 1; /* the MMU fault was handled without causing real CPU fault */
1099 /* now we have a real cpu fault */
1100 tb = tb_find_pc(pc);
1102 /* the PC is inside the translated code. It means that we have
1103 a virtual CPU fault */
1104 cpu_restore_state(tb, env, pc, puc);
1108 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1109 env->PC, env->error_code, tb);
1111 /* we restore the process signal mask as the sigreturn should
1112 do it (XXX: use sigsetjmp) */
1113 sigprocmask(SIG_SETMASK, old_set, NULL);
1114 do_raise_exception_err(env->exception_index, env->error_code);
1116 /* activate soft MMU for this block */
1117 cpu_resume_from_signal(env, puc);
1119 /* never comes here */
1123 #elif defined (TARGET_SH4)
1124 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1125 int is_write, sigset_t *old_set,
1128 TranslationBlock *tb;
1132 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1133 #if defined(DEBUG_SIGNAL)
1134 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1135 pc, address, is_write, *(unsigned long *)old_set);
1137 /* XXX: locking issue */
1138 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1142 /* see if it is an MMU fault */
1143 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1145 return 0; /* not an MMU fault */
1147 return 1; /* the MMU fault was handled without causing real CPU fault */
1149 /* now we have a real cpu fault */
1150 tb = tb_find_pc(pc);
1152 /* the PC is inside the translated code. It means that we have
1153 a virtual CPU fault */
1154 cpu_restore_state(tb, env, pc, puc);
1157 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1158 env->nip, env->error_code, tb);
1160 /* we restore the process signal mask as the sigreturn should
1161 do it (XXX: use sigsetjmp) */
1162 sigprocmask(SIG_SETMASK, old_set, NULL);
1164 /* never comes here */
1168 #elif defined (TARGET_ALPHA)
1169 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1170 int is_write, sigset_t *old_set,
1173 TranslationBlock *tb;
1177 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1178 #if defined(DEBUG_SIGNAL)
1179 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1180 pc, address, is_write, *(unsigned long *)old_set);
1182 /* XXX: locking issue */
1183 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1187 /* see if it is an MMU fault */
1188 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1190 return 0; /* not an MMU fault */
1192 return 1; /* the MMU fault was handled without causing real CPU fault */
1194 /* now we have a real cpu fault */
1195 tb = tb_find_pc(pc);
1197 /* the PC is inside the translated code. It means that we have
1198 a virtual CPU fault */
1199 cpu_restore_state(tb, env, pc, puc);
1202 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1203 env->nip, env->error_code, tb);
1205 /* we restore the process signal mask as the sigreturn should
1206 do it (XXX: use sigsetjmp) */
1207 sigprocmask(SIG_SETMASK, old_set, NULL);
1209 /* never comes here */
1212 #elif defined (TARGET_CRIS)
1213 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1214 int is_write, sigset_t *old_set,
1217 TranslationBlock *tb;
1221 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1222 #if defined(DEBUG_SIGNAL)
1223 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1224 pc, address, is_write, *(unsigned long *)old_set);
1226 /* XXX: locking issue */
1227 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1231 /* see if it is an MMU fault */
1232 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1234 return 0; /* not an MMU fault */
1236 return 1; /* the MMU fault was handled without causing real CPU fault */
1238 /* now we have a real cpu fault */
1239 tb = tb_find_pc(pc);
1241 /* the PC is inside the translated code. It means that we have
1242 a virtual CPU fault */
1243 cpu_restore_state(tb, env, pc, puc);
1246 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1247 env->nip, env->error_code, tb);
1249 /* we restore the process signal mask as the sigreturn should
1250 do it (XXX: use sigsetjmp) */
1251 sigprocmask(SIG_SETMASK, old_set, NULL);
1253 /* never comes here */
1258 #error unsupported target CPU
1261 #if defined(__i386__)
1263 #if defined(__APPLE__)
1264 # include <sys/ucontext.h>
1266 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1267 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1268 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1270 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1271 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1272 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1275 int cpu_signal_handler(int host_signum, void *pinfo,
1278 siginfo_t *info = pinfo;
1279 struct ucontext *uc = puc;
1287 #define REG_TRAPNO TRAPNO
1290 trapno = TRAP_sig(uc);
1291 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1293 (ERROR_sig(uc) >> 1) & 1 : 0,
1294 &uc->uc_sigmask, puc);
1297 #elif defined(__x86_64__)
1299 int cpu_signal_handler(int host_signum, void *pinfo,
1302 siginfo_t *info = pinfo;
1303 struct ucontext *uc = puc;
1306 pc = uc->uc_mcontext.gregs[REG_RIP];
1307 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1308 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1309 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1310 &uc->uc_sigmask, puc);
1313 #elif defined(__powerpc__)
1315 /***********************************************************************
1316 * signal context platform-specific definitions
1320 /* All Registers access - only for local access */
1321 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1322 /* Gpr Registers access */
1323 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1324 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1325 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1326 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1327 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1328 # define LR_sig(context) REG_sig(link, context) /* Link register */
1329 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1330 /* Float Registers access */
1331 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1332 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1333 /* Exception Registers access */
1334 # define DAR_sig(context) REG_sig(dar, context)
1335 # define DSISR_sig(context) REG_sig(dsisr, context)
1336 # define TRAP_sig(context) REG_sig(trap, context)
1340 # include <sys/ucontext.h>
1341 typedef struct ucontext SIGCONTEXT;
1342 /* All Registers access - only for local access */
1343 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1344 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1345 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1346 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1347 /* Gpr Registers access */
1348 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1349 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1350 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1351 # define CTR_sig(context) REG_sig(ctr, context)
1352 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1353 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1354 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1355 /* Float Registers access */
1356 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1357 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1358 /* Exception Registers access */
1359 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1360 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1361 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1362 #endif /* __APPLE__ */
1364 int cpu_signal_handler(int host_signum, void *pinfo,
1367 siginfo_t *info = pinfo;
1368 struct ucontext *uc = puc;
1376 if (DSISR_sig(uc) & 0x00800000)
1379 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1382 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1383 is_write, &uc->uc_sigmask, puc);
1386 #elif defined(__alpha__)
1388 int cpu_signal_handler(int host_signum, void *pinfo,
1391 siginfo_t *info = pinfo;
1392 struct ucontext *uc = puc;
1393 uint32_t *pc = uc->uc_mcontext.sc_pc;
1394 uint32_t insn = *pc;
1397 /* XXX: need kernel patch to get write flag faster */
1398 switch (insn >> 26) {
1413 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1414 is_write, &uc->uc_sigmask, puc);
1416 #elif defined(__sparc__)
1418 int cpu_signal_handler(int host_signum, void *pinfo,
1421 siginfo_t *info = pinfo;
1422 uint32_t *regs = (uint32_t *)(info + 1);
1423 void *sigmask = (regs + 20);
1428 /* XXX: is there a standard glibc define ? */
1430 /* XXX: need kernel patch to get write flag faster */
1432 insn = *(uint32_t *)pc;
1433 if ((insn >> 30) == 3) {
1434 switch((insn >> 19) & 0x3f) {
1446 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1447 is_write, sigmask, NULL);
1450 #elif defined(__arm__)
1452 int cpu_signal_handler(int host_signum, void *pinfo,
1455 siginfo_t *info = pinfo;
1456 struct ucontext *uc = puc;
1460 pc = uc->uc_mcontext.gregs[R15];
1461 /* XXX: compute is_write */
1463 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1465 &uc->uc_sigmask, puc);
1468 #elif defined(__mc68000)
1470 int cpu_signal_handler(int host_signum, void *pinfo,
1473 siginfo_t *info = pinfo;
1474 struct ucontext *uc = puc;
1478 pc = uc->uc_mcontext.gregs[16];
1479 /* XXX: compute is_write */
1481 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1483 &uc->uc_sigmask, puc);
1486 #elif defined(__ia64)
1489 /* This ought to be in <bits/siginfo.h>... */
1490 # define __ISR_VALID 1
1493 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1495 siginfo_t *info = pinfo;
1496 struct ucontext *uc = puc;
1500 ip = uc->uc_mcontext.sc_ip;
1501 switch (host_signum) {
1507 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1508 /* ISR.W (write-access) is bit 33: */
1509 is_write = (info->si_isr >> 33) & 1;
1515 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1517 &uc->uc_sigmask, puc);
1520 #elif defined(__s390__)
1522 int cpu_signal_handler(int host_signum, void *pinfo,
1525 siginfo_t *info = pinfo;
1526 struct ucontext *uc = puc;
1530 pc = uc->uc_mcontext.psw.addr;
1531 /* XXX: compute is_write */
1533 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1534 is_write, &uc->uc_sigmask, puc);
1537 #elif defined(__mips__)
1539 int cpu_signal_handler(int host_signum, void *pinfo,
1542 siginfo_t *info = pinfo;
1543 struct ucontext *uc = puc;
1544 greg_t pc = uc->uc_mcontext.pc;
1547 /* XXX: compute is_write */
1549 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1550 is_write, &uc->uc_sigmask, puc);
1555 #error host CPU specific signal handler needed
1559 #endif /* !defined(CONFIG_SOFTMMU) */