2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #define CPU_NO_GLOBAL_REGS
27 #if !defined(CONFIG_SOFTMMU)
39 #include <sys/ucontext.h>
43 #if defined(__sparc__) && !defined(HOST_SOLARIS)
44 // Work around ugly bugs in glibc that mangle global register contents
46 #define env cpu_single_env
49 int tb_invalidated_flag;
52 //#define DEBUG_SIGNAL
54 void cpu_loop_exit(void)
56 /* NOTE: the register at this point must be saved by hand because
57 longjmp restore them */
59 longjmp(env->jmp_env, 1);
62 /* exit the current TB from a signal handler. The host registers are
63 restored in a state compatible with the CPU emulator
65 void cpu_resume_from_signal(CPUState *env1, void *puc)
67 #if !defined(CONFIG_SOFTMMU)
69 struct ucontext *uc = puc;
70 #elif defined(__OpenBSD__)
71 struct sigcontext *uc = puc;
77 /* XXX: restore cpu registers saved in host registers */
79 #if !defined(CONFIG_SOFTMMU)
81 /* XXX: use siglongjmp ? */
83 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
84 #elif defined(__OpenBSD__)
85 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
89 longjmp(env->jmp_env, 1);
92 /* Execute the code without caching the generated code. An interpreter
93 could be used if available. */
94 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
96 unsigned long next_tb;
99 /* Should never happen.
100 We only end up here when an existing TB is too long. */
101 if (max_cycles > CF_COUNT_MASK)
102 max_cycles = CF_COUNT_MASK;
104 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
106 env->current_tb = tb;
107 /* execute the generated code */
108 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
110 if ((next_tb & 3) == 2) {
111 /* Restore PC. This may happen if async event occurs before
112 the TB starts executing. */
113 cpu_pc_from_tb(env, tb);
115 tb_phys_invalidate(tb, -1);
119 static TranslationBlock *tb_find_slow(target_ulong pc,
120 target_ulong cs_base,
123 TranslationBlock *tb, **ptb1;
125 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
127 tb_invalidated_flag = 0;
129 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
131 /* find translated block using physical mappings */
132 phys_pc = get_phys_addr_code(env, pc);
133 phys_page1 = phys_pc & TARGET_PAGE_MASK;
135 h = tb_phys_hash_func(phys_pc);
136 ptb1 = &tb_phys_hash[h];
142 tb->page_addr[0] == phys_page1 &&
143 tb->cs_base == cs_base &&
144 tb->flags == flags) {
145 /* check next page if needed */
146 if (tb->page_addr[1] != -1) {
147 virt_page2 = (pc & TARGET_PAGE_MASK) +
149 phys_page2 = get_phys_addr_code(env, virt_page2);
150 if (tb->page_addr[1] == phys_page2)
156 ptb1 = &tb->phys_hash_next;
159 /* if no translated code available, then translate it now */
160 tb = tb_gen_code(env, pc, cs_base, flags, 0);
163 /* we add the TB in the virtual pc hash table */
164 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
168 static inline TranslationBlock *tb_find_fast(void)
170 TranslationBlock *tb;
171 target_ulong cs_base, pc;
174 /* we record a subset of the CPU state. It will
175 always be the same before a given translated block
177 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
178 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
179 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
180 tb->flags != flags)) {
181 tb = tb_find_slow(pc, cs_base, flags);
186 /* main execution loop */
188 int cpu_exec(CPUState *env1)
190 #define DECLARE_HOST_REGS 1
191 #include "hostregs_helper.h"
192 int ret, interrupt_request;
193 TranslationBlock *tb;
195 unsigned long next_tb;
197 if (cpu_halted(env1) == EXCP_HALTED)
200 cpu_single_env = env1;
202 /* first we save global registers */
203 #define SAVE_HOST_REGS 1
204 #include "hostregs_helper.h"
208 #if defined(TARGET_I386)
209 /* put eflags in CPU temporary format */
210 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
211 DF = 1 - (2 * ((env->eflags >> 10) & 1));
212 CC_OP = CC_OP_EFLAGS;
213 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
214 #elif defined(TARGET_SPARC)
215 #elif defined(TARGET_M68K)
216 env->cc_op = CC_OP_FLAGS;
217 env->cc_dest = env->sr & 0xf;
218 env->cc_x = (env->sr >> 4) & 1;
219 #elif defined(TARGET_ALPHA)
220 #elif defined(TARGET_ARM)
221 #elif defined(TARGET_PPC)
222 #elif defined(TARGET_MIPS)
223 #elif defined(TARGET_SH4)
224 #elif defined(TARGET_CRIS)
227 #error unsupported target CPU
229 env->exception_index = -1;
231 /* prepare setjmp context for exception handling */
233 if (setjmp(env->jmp_env) == 0) {
234 env->current_tb = NULL;
235 /* if an exception is pending, we execute it here */
236 if (env->exception_index >= 0) {
237 if (env->exception_index >= EXCP_INTERRUPT) {
238 /* exit request from the cpu execution loop */
239 ret = env->exception_index;
241 } else if (env->user_mode_only) {
242 /* if user mode only, we simulate a fake exception
243 which will be handled outside the cpu execution
245 #if defined(TARGET_I386)
246 do_interrupt_user(env->exception_index,
247 env->exception_is_int,
249 env->exception_next_eip);
250 /* successfully delivered */
251 env->old_exception = -1;
253 ret = env->exception_index;
256 #if defined(TARGET_I386)
257 /* simulate a real cpu exception. On i386, it can
258 trigger new exceptions, but we do not handle
259 double or triple faults yet. */
260 do_interrupt(env->exception_index,
261 env->exception_is_int,
263 env->exception_next_eip, 0);
264 /* successfully delivered */
265 env->old_exception = -1;
266 #elif defined(TARGET_PPC)
268 #elif defined(TARGET_MIPS)
270 #elif defined(TARGET_SPARC)
272 #elif defined(TARGET_ARM)
274 #elif defined(TARGET_SH4)
276 #elif defined(TARGET_ALPHA)
278 #elif defined(TARGET_CRIS)
280 #elif defined(TARGET_M68K)
284 env->exception_index = -1;
287 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
289 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
290 ret = kqemu_cpu_exec(env);
291 /* put eflags in CPU temporary format */
292 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
293 DF = 1 - (2 * ((env->eflags >> 10) & 1));
294 CC_OP = CC_OP_EFLAGS;
295 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
298 longjmp(env->jmp_env, 1);
299 } else if (ret == 2) {
300 /* softmmu execution needed */
302 if (env->interrupt_request != 0) {
303 /* hardware interrupt will be executed just after */
305 /* otherwise, we restart */
306 longjmp(env->jmp_env, 1);
314 longjmp(env->jmp_env, 1);
317 next_tb = 0; /* force lookup of first TB */
319 interrupt_request = env->interrupt_request;
320 if (unlikely(interrupt_request)) {
321 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
322 /* Mask out external interrupts for this step. */
323 interrupt_request &= ~(CPU_INTERRUPT_HARD |
328 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
329 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
330 env->exception_index = EXCP_DEBUG;
333 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
334 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
335 if (interrupt_request & CPU_INTERRUPT_HALT) {
336 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
338 env->exception_index = EXCP_HLT;
342 #if defined(TARGET_I386)
343 if (env->hflags2 & HF2_GIF_MASK) {
344 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
345 !(env->hflags & HF_SMM_MASK)) {
346 svm_check_intercept(SVM_EXIT_SMI);
347 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
350 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
351 !(env->hflags2 & HF2_NMI_MASK)) {
352 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
353 env->hflags2 |= HF2_NMI_MASK;
354 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
356 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
357 (((env->hflags2 & HF2_VINTR_MASK) &&
358 (env->hflags2 & HF2_HIF_MASK)) ||
359 (!(env->hflags2 & HF2_VINTR_MASK) &&
360 (env->eflags & IF_MASK &&
361 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
363 svm_check_intercept(SVM_EXIT_INTR);
364 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
365 intno = cpu_get_pic_interrupt(env);
366 if (loglevel & CPU_LOG_TB_IN_ASM) {
367 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
369 do_interrupt(intno, 0, 0, 0, 1);
370 /* ensure that no TB jump will be modified as
371 the program flow was changed */
373 #if !defined(CONFIG_USER_ONLY)
374 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
375 (env->eflags & IF_MASK) &&
376 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
378 /* FIXME: this should respect TPR */
379 svm_check_intercept(SVM_EXIT_VINTR);
380 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
381 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
382 if (loglevel & CPU_LOG_TB_IN_ASM)
383 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
384 do_interrupt(intno, 0, 0, 0, 1);
389 #elif defined(TARGET_PPC)
391 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
395 if (interrupt_request & CPU_INTERRUPT_HARD) {
396 ppc_hw_interrupt(env);
397 if (env->pending_interrupts == 0)
398 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
401 #elif defined(TARGET_MIPS)
402 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
403 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
404 (env->CP0_Status & (1 << CP0St_IE)) &&
405 !(env->CP0_Status & (1 << CP0St_EXL)) &&
406 !(env->CP0_Status & (1 << CP0St_ERL)) &&
407 !(env->hflags & MIPS_HFLAG_DM)) {
409 env->exception_index = EXCP_EXT_INTERRUPT;
414 #elif defined(TARGET_SPARC)
415 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
417 int pil = env->interrupt_index & 15;
418 int type = env->interrupt_index & 0xf0;
420 if (((type == TT_EXTINT) &&
421 (pil == 15 || pil > env->psrpil)) ||
423 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
424 env->exception_index = env->interrupt_index;
426 env->interrupt_index = 0;
427 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
432 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
433 //do_interrupt(0, 0, 0, 0, 0);
434 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
436 #elif defined(TARGET_ARM)
437 if (interrupt_request & CPU_INTERRUPT_FIQ
438 && !(env->uncached_cpsr & CPSR_F)) {
439 env->exception_index = EXCP_FIQ;
443 /* ARMv7-M interrupt return works by loading a magic value
444 into the PC. On real hardware the load causes the
445 return to occur. The qemu implementation performs the
446 jump normally, then does the exception return when the
447 CPU tries to execute code at the magic address.
448 This will cause the magic PC value to be pushed to
449 the stack if an interrupt occured at the wrong time.
450 We avoid this by disabling interrupts when
451 pc contains a magic address. */
452 if (interrupt_request & CPU_INTERRUPT_HARD
453 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
454 || !(env->uncached_cpsr & CPSR_I))) {
455 env->exception_index = EXCP_IRQ;
459 #elif defined(TARGET_SH4)
460 if (interrupt_request & CPU_INTERRUPT_HARD) {
464 #elif defined(TARGET_ALPHA)
465 if (interrupt_request & CPU_INTERRUPT_HARD) {
469 #elif defined(TARGET_CRIS)
470 if (interrupt_request & CPU_INTERRUPT_HARD
471 && (env->pregs[PR_CCS] & I_FLAG)) {
472 env->exception_index = EXCP_IRQ;
476 if (interrupt_request & CPU_INTERRUPT_NMI
477 && (env->pregs[PR_CCS] & M_FLAG)) {
478 env->exception_index = EXCP_NMI;
482 #elif defined(TARGET_M68K)
483 if (interrupt_request & CPU_INTERRUPT_HARD
484 && ((env->sr & SR_I) >> SR_I_SHIFT)
485 < env->pending_level) {
486 /* Real hardware gets the interrupt vector via an
487 IACK cycle at this point. Current emulated
488 hardware doesn't rely on this, so we
489 provide/save the vector when the interrupt is
491 env->exception_index = env->pending_vector;
496 /* Don't use the cached interupt_request value,
497 do_interrupt may have updated the EXITTB flag. */
498 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
499 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
500 /* ensure that no TB jump will be modified as
501 the program flow was changed */
504 if (interrupt_request & CPU_INTERRUPT_EXIT) {
505 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
506 env->exception_index = EXCP_INTERRUPT;
511 if ((loglevel & CPU_LOG_TB_CPU)) {
512 /* restore flags in standard format */
514 #if defined(TARGET_I386)
515 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
516 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
517 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
518 #elif defined(TARGET_ARM)
519 cpu_dump_state(env, logfile, fprintf, 0);
520 #elif defined(TARGET_SPARC)
521 cpu_dump_state(env, logfile, fprintf, 0);
522 #elif defined(TARGET_PPC)
523 cpu_dump_state(env, logfile, fprintf, 0);
524 #elif defined(TARGET_M68K)
525 cpu_m68k_flush_flags(env, env->cc_op);
526 env->cc_op = CC_OP_FLAGS;
527 env->sr = (env->sr & 0xffe0)
528 | env->cc_dest | (env->cc_x << 4);
529 cpu_dump_state(env, logfile, fprintf, 0);
530 #elif defined(TARGET_MIPS)
531 cpu_dump_state(env, logfile, fprintf, 0);
532 #elif defined(TARGET_SH4)
533 cpu_dump_state(env, logfile, fprintf, 0);
534 #elif defined(TARGET_ALPHA)
535 cpu_dump_state(env, logfile, fprintf, 0);
536 #elif defined(TARGET_CRIS)
537 cpu_dump_state(env, logfile, fprintf, 0);
539 #error unsupported target CPU
545 /* Note: we do it here to avoid a gcc bug on Mac OS X when
546 doing it in tb_find_slow */
547 if (tb_invalidated_flag) {
548 /* as some TB could have been invalidated because
549 of memory exceptions while generating the code, we
550 must recompute the hash index here */
552 tb_invalidated_flag = 0;
555 if ((loglevel & CPU_LOG_EXEC)) {
556 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
557 (long)tb->tc_ptr, tb->pc,
558 lookup_symbol(tb->pc));
561 /* see if we can patch the calling TB. When the TB
562 spans two pages, we cannot safely do a direct
567 (env->kqemu_enabled != 2) &&
569 tb->page_addr[1] == -1) {
570 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
573 spin_unlock(&tb_lock);
574 env->current_tb = tb;
576 /* cpu_interrupt might be called while translating the
577 TB, but before it is linked into a potentially
578 infinite loop and becomes env->current_tb. Avoid
579 starting execution if there is a pending interrupt. */
580 if (unlikely (env->interrupt_request & CPU_INTERRUPT_EXIT))
581 env->current_tb = NULL;
583 while (env->current_tb) {
585 /* execute the generated code */
586 #if defined(__sparc__) && !defined(HOST_SOLARIS)
588 env = cpu_single_env;
589 #define env cpu_single_env
591 next_tb = tcg_qemu_tb_exec(tc_ptr);
592 env->current_tb = NULL;
593 if ((next_tb & 3) == 2) {
594 /* Instruction counter expired. */
596 tb = (TranslationBlock *)(long)(next_tb & ~3);
598 cpu_pc_from_tb(env, tb);
599 insns_left = env->icount_decr.u32;
600 if (env->icount_extra && insns_left >= 0) {
601 /* Refill decrementer and continue execution. */
602 env->icount_extra += insns_left;
603 if (env->icount_extra > 0xffff) {
606 insns_left = env->icount_extra;
608 env->icount_extra -= insns_left;
609 env->icount_decr.u16.low = insns_left;
611 if (insns_left > 0) {
612 /* Execute remaining instructions. */
613 cpu_exec_nocache(insns_left, tb);
615 env->exception_index = EXCP_INTERRUPT;
621 /* reset soft MMU for next block (it can currently
622 only be set by a memory fault) */
623 #if defined(USE_KQEMU)
624 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
625 if (kqemu_is_ok(env) &&
626 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
637 #if defined(TARGET_I386)
638 /* restore flags in standard format */
639 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
640 #elif defined(TARGET_ARM)
641 /* XXX: Save/restore host fpu exception state?. */
642 #elif defined(TARGET_SPARC)
643 #elif defined(TARGET_PPC)
644 #elif defined(TARGET_M68K)
645 cpu_m68k_flush_flags(env, env->cc_op);
646 env->cc_op = CC_OP_FLAGS;
647 env->sr = (env->sr & 0xffe0)
648 | env->cc_dest | (env->cc_x << 4);
649 #elif defined(TARGET_MIPS)
650 #elif defined(TARGET_SH4)
651 #elif defined(TARGET_ALPHA)
652 #elif defined(TARGET_CRIS)
655 #error unsupported target CPU
658 /* restore global registers */
659 #include "hostregs_helper.h"
661 /* fail safe : never use cpu_single_env outside cpu_exec() */
662 cpu_single_env = NULL;
666 /* must only be called from the generated code as an exception can be
668 void tb_invalidate_page_range(target_ulong start, target_ulong end)
670 /* XXX: cannot enable it yet because it yields to MMU exception
671 where NIP != read address on PowerPC */
673 target_ulong phys_addr;
674 phys_addr = get_phys_addr_code(env, start);
675 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
679 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
681 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
683 CPUX86State *saved_env;
687 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
689 cpu_x86_load_seg_cache(env, seg_reg, selector,
690 (selector << 4), 0xffff, 0);
692 helper_load_seg(seg_reg, selector);
697 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
699 CPUX86State *saved_env;
704 helper_fsave(ptr, data32);
709 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
711 CPUX86State *saved_env;
716 helper_frstor(ptr, data32);
721 #endif /* TARGET_I386 */
723 #if !defined(CONFIG_SOFTMMU)
725 #if defined(TARGET_I386)
727 /* 'pc' is the host PC at which the exception was raised. 'address' is
728 the effective address of the memory exception. 'is_write' is 1 if a
729 write caused the exception and otherwise 0'. 'old_set' is the
730 signal set which should be restored */
731 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
732 int is_write, sigset_t *old_set,
735 TranslationBlock *tb;
739 env = cpu_single_env; /* XXX: find a correct solution for multithread */
740 #if defined(DEBUG_SIGNAL)
741 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
742 pc, address, is_write, *(unsigned long *)old_set);
744 /* XXX: locking issue */
745 if (is_write && page_unprotect(h2g(address), pc, puc)) {
749 /* see if it is an MMU fault */
750 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
752 return 0; /* not an MMU fault */
754 return 1; /* the MMU fault was handled without causing real CPU fault */
755 /* now we have a real cpu fault */
758 /* the PC is inside the translated code. It means that we have
759 a virtual CPU fault */
760 cpu_restore_state(tb, env, pc, puc);
764 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
765 env->eip, env->cr[2], env->error_code);
767 /* we restore the process signal mask as the sigreturn should
768 do it (XXX: use sigsetjmp) */
769 sigprocmask(SIG_SETMASK, old_set, NULL);
770 raise_exception_err(env->exception_index, env->error_code);
772 /* activate soft MMU for this block */
773 env->hflags |= HF_SOFTMMU_MASK;
774 cpu_resume_from_signal(env, puc);
776 /* never comes here */
780 #elif defined(TARGET_ARM)
781 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
782 int is_write, sigset_t *old_set,
785 TranslationBlock *tb;
789 env = cpu_single_env; /* XXX: find a correct solution for multithread */
790 #if defined(DEBUG_SIGNAL)
791 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
792 pc, address, is_write, *(unsigned long *)old_set);
794 /* XXX: locking issue */
795 if (is_write && page_unprotect(h2g(address), pc, puc)) {
798 /* see if it is an MMU fault */
799 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
801 return 0; /* not an MMU fault */
803 return 1; /* the MMU fault was handled without causing real CPU fault */
804 /* now we have a real cpu fault */
807 /* the PC is inside the translated code. It means that we have
808 a virtual CPU fault */
809 cpu_restore_state(tb, env, pc, puc);
811 /* we restore the process signal mask as the sigreturn should
812 do it (XXX: use sigsetjmp) */
813 sigprocmask(SIG_SETMASK, old_set, NULL);
815 /* never comes here */
818 #elif defined(TARGET_SPARC)
819 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
820 int is_write, sigset_t *old_set,
823 TranslationBlock *tb;
827 env = cpu_single_env; /* XXX: find a correct solution for multithread */
828 #if defined(DEBUG_SIGNAL)
829 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
830 pc, address, is_write, *(unsigned long *)old_set);
832 /* XXX: locking issue */
833 if (is_write && page_unprotect(h2g(address), pc, puc)) {
836 /* see if it is an MMU fault */
837 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
839 return 0; /* not an MMU fault */
841 return 1; /* the MMU fault was handled without causing real CPU fault */
842 /* now we have a real cpu fault */
845 /* the PC is inside the translated code. It means that we have
846 a virtual CPU fault */
847 cpu_restore_state(tb, env, pc, puc);
849 /* we restore the process signal mask as the sigreturn should
850 do it (XXX: use sigsetjmp) */
851 sigprocmask(SIG_SETMASK, old_set, NULL);
853 /* never comes here */
856 #elif defined (TARGET_PPC)
857 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
858 int is_write, sigset_t *old_set,
861 TranslationBlock *tb;
865 env = cpu_single_env; /* XXX: find a correct solution for multithread */
866 #if defined(DEBUG_SIGNAL)
867 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
868 pc, address, is_write, *(unsigned long *)old_set);
870 /* XXX: locking issue */
871 if (is_write && page_unprotect(h2g(address), pc, puc)) {
875 /* see if it is an MMU fault */
876 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
878 return 0; /* not an MMU fault */
880 return 1; /* the MMU fault was handled without causing real CPU fault */
882 /* now we have a real cpu fault */
885 /* the PC is inside the translated code. It means that we have
886 a virtual CPU fault */
887 cpu_restore_state(tb, env, pc, puc);
891 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
892 env->nip, env->error_code, tb);
894 /* we restore the process signal mask as the sigreturn should
895 do it (XXX: use sigsetjmp) */
896 sigprocmask(SIG_SETMASK, old_set, NULL);
897 do_raise_exception_err(env->exception_index, env->error_code);
899 /* activate soft MMU for this block */
900 cpu_resume_from_signal(env, puc);
902 /* never comes here */
906 #elif defined(TARGET_M68K)
907 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
908 int is_write, sigset_t *old_set,
911 TranslationBlock *tb;
915 env = cpu_single_env; /* XXX: find a correct solution for multithread */
916 #if defined(DEBUG_SIGNAL)
917 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
918 pc, address, is_write, *(unsigned long *)old_set);
920 /* XXX: locking issue */
921 if (is_write && page_unprotect(address, pc, puc)) {
924 /* see if it is an MMU fault */
925 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
927 return 0; /* not an MMU fault */
929 return 1; /* the MMU fault was handled without causing real CPU fault */
930 /* now we have a real cpu fault */
933 /* the PC is inside the translated code. It means that we have
934 a virtual CPU fault */
935 cpu_restore_state(tb, env, pc, puc);
937 /* we restore the process signal mask as the sigreturn should
938 do it (XXX: use sigsetjmp) */
939 sigprocmask(SIG_SETMASK, old_set, NULL);
941 /* never comes here */
945 #elif defined (TARGET_MIPS)
946 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
947 int is_write, sigset_t *old_set,
950 TranslationBlock *tb;
954 env = cpu_single_env; /* XXX: find a correct solution for multithread */
955 #if defined(DEBUG_SIGNAL)
956 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
957 pc, address, is_write, *(unsigned long *)old_set);
959 /* XXX: locking issue */
960 if (is_write && page_unprotect(h2g(address), pc, puc)) {
964 /* see if it is an MMU fault */
965 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
967 return 0; /* not an MMU fault */
969 return 1; /* the MMU fault was handled without causing real CPU fault */
971 /* now we have a real cpu fault */
974 /* the PC is inside the translated code. It means that we have
975 a virtual CPU fault */
976 cpu_restore_state(tb, env, pc, puc);
980 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
981 env->PC, env->error_code, tb);
983 /* we restore the process signal mask as the sigreturn should
984 do it (XXX: use sigsetjmp) */
985 sigprocmask(SIG_SETMASK, old_set, NULL);
986 do_raise_exception_err(env->exception_index, env->error_code);
988 /* activate soft MMU for this block */
989 cpu_resume_from_signal(env, puc);
991 /* never comes here */
995 #elif defined (TARGET_SH4)
996 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
997 int is_write, sigset_t *old_set,
1000 TranslationBlock *tb;
1004 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1005 #if defined(DEBUG_SIGNAL)
1006 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1007 pc, address, is_write, *(unsigned long *)old_set);
1009 /* XXX: locking issue */
1010 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1014 /* see if it is an MMU fault */
1015 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1017 return 0; /* not an MMU fault */
1019 return 1; /* the MMU fault was handled without causing real CPU fault */
1021 /* now we have a real cpu fault */
1022 tb = tb_find_pc(pc);
1024 /* the PC is inside the translated code. It means that we have
1025 a virtual CPU fault */
1026 cpu_restore_state(tb, env, pc, puc);
1029 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1030 env->nip, env->error_code, tb);
1032 /* we restore the process signal mask as the sigreturn should
1033 do it (XXX: use sigsetjmp) */
1034 sigprocmask(SIG_SETMASK, old_set, NULL);
1036 /* never comes here */
1040 #elif defined (TARGET_ALPHA)
1041 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1042 int is_write, sigset_t *old_set,
1045 TranslationBlock *tb;
1049 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1050 #if defined(DEBUG_SIGNAL)
1051 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1052 pc, address, is_write, *(unsigned long *)old_set);
1054 /* XXX: locking issue */
1055 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1059 /* see if it is an MMU fault */
1060 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1062 return 0; /* not an MMU fault */
1064 return 1; /* the MMU fault was handled without causing real CPU fault */
1066 /* now we have a real cpu fault */
1067 tb = tb_find_pc(pc);
1069 /* the PC is inside the translated code. It means that we have
1070 a virtual CPU fault */
1071 cpu_restore_state(tb, env, pc, puc);
1074 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1075 env->nip, env->error_code, tb);
1077 /* we restore the process signal mask as the sigreturn should
1078 do it (XXX: use sigsetjmp) */
1079 sigprocmask(SIG_SETMASK, old_set, NULL);
1081 /* never comes here */
1084 #elif defined (TARGET_CRIS)
1085 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1086 int is_write, sigset_t *old_set,
1089 TranslationBlock *tb;
1093 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1094 #if defined(DEBUG_SIGNAL)
1095 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1096 pc, address, is_write, *(unsigned long *)old_set);
1098 /* XXX: locking issue */
1099 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1103 /* see if it is an MMU fault */
1104 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1106 return 0; /* not an MMU fault */
1108 return 1; /* the MMU fault was handled without causing real CPU fault */
1110 /* now we have a real cpu fault */
1111 tb = tb_find_pc(pc);
1113 /* the PC is inside the translated code. It means that we have
1114 a virtual CPU fault */
1115 cpu_restore_state(tb, env, pc, puc);
1117 /* we restore the process signal mask as the sigreturn should
1118 do it (XXX: use sigsetjmp) */
1119 sigprocmask(SIG_SETMASK, old_set, NULL);
1121 /* never comes here */
1126 #error unsupported target CPU
1129 #if defined(__i386__)
1131 #if defined(__APPLE__)
1132 # include <sys/ucontext.h>
1134 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1135 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1136 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1138 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1139 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1140 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1143 int cpu_signal_handler(int host_signum, void *pinfo,
1146 siginfo_t *info = pinfo;
1147 struct ucontext *uc = puc;
1155 #define REG_TRAPNO TRAPNO
1158 trapno = TRAP_sig(uc);
1159 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1161 (ERROR_sig(uc) >> 1) & 1 : 0,
1162 &uc->uc_sigmask, puc);
1165 #elif defined(__x86_64__)
1167 int cpu_signal_handler(int host_signum, void *pinfo,
1170 siginfo_t *info = pinfo;
1171 struct ucontext *uc = puc;
1174 pc = uc->uc_mcontext.gregs[REG_RIP];
1175 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1176 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1177 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1178 &uc->uc_sigmask, puc);
1181 #elif defined(__powerpc__)
1183 /***********************************************************************
1184 * signal context platform-specific definitions
1188 /* All Registers access - only for local access */
1189 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1190 /* Gpr Registers access */
1191 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1192 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1193 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1194 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1195 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1196 # define LR_sig(context) REG_sig(link, context) /* Link register */
1197 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1198 /* Float Registers access */
1199 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1200 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1201 /* Exception Registers access */
1202 # define DAR_sig(context) REG_sig(dar, context)
1203 # define DSISR_sig(context) REG_sig(dsisr, context)
1204 # define TRAP_sig(context) REG_sig(trap, context)
1208 # include <sys/ucontext.h>
1209 typedef struct ucontext SIGCONTEXT;
1210 /* All Registers access - only for local access */
1211 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1212 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1213 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1214 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1215 /* Gpr Registers access */
1216 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1217 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1218 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1219 # define CTR_sig(context) REG_sig(ctr, context)
1220 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1221 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1222 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1223 /* Float Registers access */
1224 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1225 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1226 /* Exception Registers access */
1227 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1228 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1229 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1230 #endif /* __APPLE__ */
1232 int cpu_signal_handler(int host_signum, void *pinfo,
1235 siginfo_t *info = pinfo;
1236 struct ucontext *uc = puc;
1244 if (DSISR_sig(uc) & 0x00800000)
1247 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1250 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1251 is_write, &uc->uc_sigmask, puc);
1254 #elif defined(__alpha__)
1256 int cpu_signal_handler(int host_signum, void *pinfo,
1259 siginfo_t *info = pinfo;
1260 struct ucontext *uc = puc;
1261 uint32_t *pc = uc->uc_mcontext.sc_pc;
1262 uint32_t insn = *pc;
1265 /* XXX: need kernel patch to get write flag faster */
1266 switch (insn >> 26) {
1281 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1282 is_write, &uc->uc_sigmask, puc);
1284 #elif defined(__sparc__)
1286 int cpu_signal_handler(int host_signum, void *pinfo,
1289 siginfo_t *info = pinfo;
1292 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1293 uint32_t *regs = (uint32_t *)(info + 1);
1294 void *sigmask = (regs + 20);
1295 /* XXX: is there a standard glibc define ? */
1296 unsigned long pc = regs[1];
1299 struct sigcontext *sc = puc;
1300 unsigned long pc = sc->sigc_regs.tpc;
1301 void *sigmask = (void *)sc->sigc_mask;
1302 #elif defined(__OpenBSD__)
1303 struct sigcontext *uc = puc;
1304 unsigned long pc = uc->sc_pc;
1305 void *sigmask = (void *)(long)uc->sc_mask;
1309 /* XXX: need kernel patch to get write flag faster */
1311 insn = *(uint32_t *)pc;
1312 if ((insn >> 30) == 3) {
1313 switch((insn >> 19) & 0x3f) {
1325 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1326 is_write, sigmask, NULL);
1329 #elif defined(__arm__)
1331 int cpu_signal_handler(int host_signum, void *pinfo,
1334 siginfo_t *info = pinfo;
1335 struct ucontext *uc = puc;
1339 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1340 pc = uc->uc_mcontext.gregs[R15];
1342 pc = uc->uc_mcontext.arm_pc;
1344 /* XXX: compute is_write */
1346 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1348 &uc->uc_sigmask, puc);
1351 #elif defined(__mc68000)
1353 int cpu_signal_handler(int host_signum, void *pinfo,
1356 siginfo_t *info = pinfo;
1357 struct ucontext *uc = puc;
1361 pc = uc->uc_mcontext.gregs[16];
1362 /* XXX: compute is_write */
1364 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1366 &uc->uc_sigmask, puc);
1369 #elif defined(__ia64)
1372 /* This ought to be in <bits/siginfo.h>... */
1373 # define __ISR_VALID 1
1376 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1378 siginfo_t *info = pinfo;
1379 struct ucontext *uc = puc;
1383 ip = uc->uc_mcontext.sc_ip;
1384 switch (host_signum) {
1390 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1391 /* ISR.W (write-access) is bit 33: */
1392 is_write = (info->si_isr >> 33) & 1;
1398 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1400 &uc->uc_sigmask, puc);
1403 #elif defined(__s390__)
1405 int cpu_signal_handler(int host_signum, void *pinfo,
1408 siginfo_t *info = pinfo;
1409 struct ucontext *uc = puc;
1413 pc = uc->uc_mcontext.psw.addr;
1414 /* XXX: compute is_write */
1416 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1417 is_write, &uc->uc_sigmask, puc);
1420 #elif defined(__mips__)
1422 int cpu_signal_handler(int host_signum, void *pinfo,
1425 siginfo_t *info = pinfo;
1426 struct ucontext *uc = puc;
1427 greg_t pc = uc->uc_mcontext.pc;
1430 /* XXX: compute is_write */
1432 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1433 is_write, &uc->uc_sigmask, puc);
1436 #elif defined(__hppa__)
1438 int cpu_signal_handler(int host_signum, void *pinfo,
1441 struct siginfo *info = pinfo;
1442 struct ucontext *uc = puc;
1446 pc = uc->uc_mcontext.sc_iaoq[0];
1447 /* FIXME: compute is_write */
1449 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1451 &uc->uc_sigmask, puc);
1456 #error host CPU specific signal handler needed
1460 #endif /* !defined(CONFIG_SOFTMMU) */