4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include <sys/syscall.h>
22 #include <sys/resource.h>
25 #include "qemu/path.h"
26 #include "qemu/cutils.h"
27 #include "qemu/help_option.h"
29 #include "exec/exec-all.h"
31 #include "qemu/timer.h"
32 #include "qemu/envlist.h"
39 static const char *filename;
40 static const char *argv0;
41 static int gdbstub_port;
42 static envlist_t *envlist;
43 static const char *cpu_model;
44 unsigned long mmap_min_addr;
45 unsigned long guest_base;
48 #define EXCP_DUMP(env, fmt, ...) \
50 CPUState *cs = ENV_GET_CPU(env); \
51 fprintf(stderr, fmt , ## __VA_ARGS__); \
52 cpu_dump_state(cs, stderr, fprintf, 0); \
53 if (qemu_log_separate()) { \
54 qemu_log(fmt, ## __VA_ARGS__); \
55 log_cpu_state(cs, 0); \
59 #if (TARGET_LONG_BITS == 32) && (HOST_LONG_BITS == 64)
61 * When running 32-on-64 we should make sure we can fit all of the possible
62 * guest address space into a contiguous chunk of virtual host memory.
64 * This way we will never overlap with our own libraries or binaries or stack
65 * or anything else that QEMU maps.
68 /* MIPS only supports 31 bits of virtual address space for user space */
69 unsigned long reserved_va = 0x77000000;
71 unsigned long reserved_va = 0xf7000000;
74 unsigned long reserved_va;
77 static void usage(int exitcode);
79 static const char *interp_prefix = CONFIG_QEMU_INTERP_PREFIX;
80 const char *qemu_uname_release;
82 /* XXX: on x86 MAP_GROWSDOWN only works if ESP <= address + 32, so
83 we allocate a bigger stack. Need a better solution, for example
84 by remapping the process stack directly at the right place */
85 unsigned long guest_stack_size = 8 * 1024 * 1024UL;
87 void gemu_log(const char *fmt, ...)
92 vfprintf(stderr, fmt, ap);
96 #if defined(TARGET_I386)
97 int cpu_get_pic_interrupt(CPUX86State *env)
103 /***********************************************************/
104 /* Helper routines for implementing atomic operations. */
106 /* To implement exclusive operations we force all cpus to syncronise.
107 We don't require a full sync, only that no cpus are executing guest code.
108 The alternative is to map target atomic ops onto host equivalents,
109 which requires quite a lot of per host/target work. */
110 static pthread_mutex_t cpu_list_mutex = PTHREAD_MUTEX_INITIALIZER;
111 static pthread_mutex_t exclusive_lock = PTHREAD_MUTEX_INITIALIZER;
112 static pthread_cond_t exclusive_cond = PTHREAD_COND_INITIALIZER;
113 static pthread_cond_t exclusive_resume = PTHREAD_COND_INITIALIZER;
114 static int pending_cpus;
116 /* Make sure everything is in a consistent state for calling fork(). */
117 void fork_start(void)
119 qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
120 pthread_mutex_lock(&exclusive_lock);
124 void fork_end(int child)
126 mmap_fork_end(child);
128 CPUState *cpu, *next_cpu;
129 /* Child processes created by fork() only have a single thread.
130 Discard information about the parent threads. */
131 CPU_FOREACH_SAFE(cpu, next_cpu) {
132 if (cpu != thread_cpu) {
133 QTAILQ_REMOVE(&cpus, thread_cpu, node);
137 pthread_mutex_init(&exclusive_lock, NULL);
138 pthread_mutex_init(&cpu_list_mutex, NULL);
139 pthread_cond_init(&exclusive_cond, NULL);
140 pthread_cond_init(&exclusive_resume, NULL);
141 qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
142 gdbserver_fork(thread_cpu);
144 pthread_mutex_unlock(&exclusive_lock);
145 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
149 /* Wait for pending exclusive operations to complete. The exclusive lock
151 static inline void exclusive_idle(void)
153 while (pending_cpus) {
154 pthread_cond_wait(&exclusive_resume, &exclusive_lock);
158 /* Start an exclusive operation.
159 Must only be called from outside cpu_arm_exec. */
160 static inline void start_exclusive(void)
164 pthread_mutex_lock(&exclusive_lock);
168 /* Make all other cpus stop executing. */
169 CPU_FOREACH(other_cpu) {
170 if (other_cpu->running) {
175 if (pending_cpus > 1) {
176 pthread_cond_wait(&exclusive_cond, &exclusive_lock);
180 /* Finish an exclusive operation. */
181 static inline void __attribute__((unused)) end_exclusive(void)
184 pthread_cond_broadcast(&exclusive_resume);
185 pthread_mutex_unlock(&exclusive_lock);
188 /* Wait for exclusive ops to finish, and begin cpu execution. */
189 static inline void cpu_exec_start(CPUState *cpu)
191 pthread_mutex_lock(&exclusive_lock);
194 pthread_mutex_unlock(&exclusive_lock);
197 /* Mark cpu as not executing, and release pending exclusive ops. */
198 static inline void cpu_exec_end(CPUState *cpu)
200 pthread_mutex_lock(&exclusive_lock);
201 cpu->running = false;
202 if (pending_cpus > 1) {
204 if (pending_cpus == 1) {
205 pthread_cond_signal(&exclusive_cond);
209 pthread_mutex_unlock(&exclusive_lock);
212 void cpu_list_lock(void)
214 pthread_mutex_lock(&cpu_list_mutex);
217 void cpu_list_unlock(void)
219 pthread_mutex_unlock(&cpu_list_mutex);
224 /***********************************************************/
225 /* CPUX86 core interface */
227 uint64_t cpu_get_tsc(CPUX86State *env)
229 return cpu_get_host_ticks();
232 static void write_dt(void *ptr, unsigned long addr, unsigned long limit,
237 e1 = (addr << 16) | (limit & 0xffff);
238 e2 = ((addr >> 16) & 0xff) | (addr & 0xff000000) | (limit & 0x000f0000);
245 static uint64_t *idt_table;
247 static void set_gate64(void *ptr, unsigned int type, unsigned int dpl,
248 uint64_t addr, unsigned int sel)
251 e1 = (addr & 0xffff) | (sel << 16);
252 e2 = (addr & 0xffff0000) | 0x8000 | (dpl << 13) | (type << 8);
256 p[2] = tswap32(addr >> 32);
259 /* only dpl matters as we do only user space emulation */
260 static void set_idt(int n, unsigned int dpl)
262 set_gate64(idt_table + n * 2, 0, dpl, 0, 0);
265 static void set_gate(void *ptr, unsigned int type, unsigned int dpl,
266 uint32_t addr, unsigned int sel)
269 e1 = (addr & 0xffff) | (sel << 16);
270 e2 = (addr & 0xffff0000) | 0x8000 | (dpl << 13) | (type << 8);
276 /* only dpl matters as we do only user space emulation */
277 static void set_idt(int n, unsigned int dpl)
279 set_gate(idt_table + n, 0, dpl, 0, 0);
283 void cpu_loop(CPUX86State *env)
285 CPUState *cs = CPU(x86_env_get_cpu(env));
289 target_siginfo_t info;
293 trapnr = cpu_x86_exec(cs);
297 /* linux syscall from int $0x80 */
298 ret = do_syscall(env,
307 if (ret == -TARGET_ERESTARTSYS) {
309 } else if (ret != -TARGET_QEMU_ESIGRETURN) {
310 env->regs[R_EAX] = ret;
315 /* linux syscall from syscall instruction */
316 ret = do_syscall(env,
325 if (ret == -TARGET_ERESTARTSYS) {
327 } else if (ret != -TARGET_QEMU_ESIGRETURN) {
328 env->regs[R_EAX] = ret;
334 info.si_signo = TARGET_SIGBUS;
336 info.si_code = TARGET_SI_KERNEL;
337 info._sifields._sigfault._addr = 0;
338 queue_signal(env, info.si_signo, &info);
341 /* XXX: potential problem if ABI32 */
342 #ifndef TARGET_X86_64
343 if (env->eflags & VM_MASK) {
344 handle_vm86_fault(env);
348 info.si_signo = TARGET_SIGSEGV;
350 info.si_code = TARGET_SI_KERNEL;
351 info._sifields._sigfault._addr = 0;
352 queue_signal(env, info.si_signo, &info);
356 info.si_signo = TARGET_SIGSEGV;
358 if (!(env->error_code & 1))
359 info.si_code = TARGET_SEGV_MAPERR;
361 info.si_code = TARGET_SEGV_ACCERR;
362 info._sifields._sigfault._addr = env->cr[2];
363 queue_signal(env, info.si_signo, &info);
366 #ifndef TARGET_X86_64
367 if (env->eflags & VM_MASK) {
368 handle_vm86_trap(env, trapnr);
372 /* division by zero */
373 info.si_signo = TARGET_SIGFPE;
375 info.si_code = TARGET_FPE_INTDIV;
376 info._sifields._sigfault._addr = env->eip;
377 queue_signal(env, info.si_signo, &info);
382 #ifndef TARGET_X86_64
383 if (env->eflags & VM_MASK) {
384 handle_vm86_trap(env, trapnr);
388 info.si_signo = TARGET_SIGTRAP;
390 if (trapnr == EXCP01_DB) {
391 info.si_code = TARGET_TRAP_BRKPT;
392 info._sifields._sigfault._addr = env->eip;
394 info.si_code = TARGET_SI_KERNEL;
395 info._sifields._sigfault._addr = 0;
397 queue_signal(env, info.si_signo, &info);
402 #ifndef TARGET_X86_64
403 if (env->eflags & VM_MASK) {
404 handle_vm86_trap(env, trapnr);
408 info.si_signo = TARGET_SIGSEGV;
410 info.si_code = TARGET_SI_KERNEL;
411 info._sifields._sigfault._addr = 0;
412 queue_signal(env, info.si_signo, &info);
416 info.si_signo = TARGET_SIGILL;
418 info.si_code = TARGET_ILL_ILLOPN;
419 info._sifields._sigfault._addr = env->eip;
420 queue_signal(env, info.si_signo, &info);
423 /* just indicate that signals should be handled asap */
429 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
434 info.si_code = TARGET_TRAP_BRKPT;
435 queue_signal(env, info.si_signo, &info);
440 pc = env->segs[R_CS].base + env->eip;
441 EXCP_DUMP(env, "qemu: 0x%08lx: unhandled CPU exception 0x%x - aborting\n",
445 process_pending_signals(env);
452 #define get_user_code_u32(x, gaddr, env) \
453 ({ abi_long __r = get_user_u32((x), (gaddr)); \
454 if (!__r && bswap_code(arm_sctlr_b(env))) { \
460 #define get_user_code_u16(x, gaddr, env) \
461 ({ abi_long __r = get_user_u16((x), (gaddr)); \
462 if (!__r && bswap_code(arm_sctlr_b(env))) { \
468 #define get_user_data_u32(x, gaddr, env) \
469 ({ abi_long __r = get_user_u32((x), (gaddr)); \
470 if (!__r && arm_cpu_bswap_data(env)) { \
476 #define get_user_data_u16(x, gaddr, env) \
477 ({ abi_long __r = get_user_u16((x), (gaddr)); \
478 if (!__r && arm_cpu_bswap_data(env)) { \
484 #define put_user_data_u32(x, gaddr, env) \
485 ({ typeof(x) __x = (x); \
486 if (arm_cpu_bswap_data(env)) { \
487 __x = bswap32(__x); \
489 put_user_u32(__x, (gaddr)); \
492 #define put_user_data_u16(x, gaddr, env) \
493 ({ typeof(x) __x = (x); \
494 if (arm_cpu_bswap_data(env)) { \
495 __x = bswap16(__x); \
497 put_user_u16(__x, (gaddr)); \
501 /* Commpage handling -- there is no commpage for AArch64 */
504 * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt
506 * r0 = pointer to oldval
507 * r1 = pointer to newval
508 * r2 = pointer to target value
511 * r0 = 0 if *ptr was changed, non-0 if no exchange happened
512 * C set if *ptr was changed, clear if no exchange happened
514 * Note segv's in kernel helpers are a bit tricky, we can set the
515 * data address sensibly but the PC address is just the entry point.
517 static void arm_kernel_cmpxchg64_helper(CPUARMState *env)
519 uint64_t oldval, newval, val;
521 target_siginfo_t info;
523 /* Based on the 32 bit code in do_kernel_trap */
525 /* XXX: This only works between threads, not between processes.
526 It's probably possible to implement this with native host
527 operations. However things like ldrex/strex are much harder so
528 there's not much point trying. */
530 cpsr = cpsr_read(env);
533 if (get_user_u64(oldval, env->regs[0])) {
534 env->exception.vaddress = env->regs[0];
538 if (get_user_u64(newval, env->regs[1])) {
539 env->exception.vaddress = env->regs[1];
543 if (get_user_u64(val, addr)) {
544 env->exception.vaddress = addr;
551 if (put_user_u64(val, addr)) {
552 env->exception.vaddress = addr;
562 cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr);
568 /* We get the PC of the entry address - which is as good as anything,
569 on a real kernel what you get depends on which mode it uses. */
570 info.si_signo = TARGET_SIGSEGV;
572 /* XXX: check env->error_code */
573 info.si_code = TARGET_SEGV_MAPERR;
574 info._sifields._sigfault._addr = env->exception.vaddress;
575 queue_signal(env, info.si_signo, &info);
578 /* Handle a jump to the kernel code page. */
580 do_kernel_trap(CPUARMState *env)
586 switch (env->regs[15]) {
587 case 0xffff0fa0: /* __kernel_memory_barrier */
588 /* ??? No-op. Will need to do better for SMP. */
590 case 0xffff0fc0: /* __kernel_cmpxchg */
591 /* XXX: This only works between threads, not between processes.
592 It's probably possible to implement this with native host
593 operations. However things like ldrex/strex are much harder so
594 there's not much point trying. */
596 cpsr = cpsr_read(env);
598 /* FIXME: This should SEGV if the access fails. */
599 if (get_user_u32(val, addr))
601 if (val == env->regs[0]) {
603 /* FIXME: Check for segfaults. */
604 put_user_u32(val, addr);
611 cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr);
614 case 0xffff0fe0: /* __kernel_get_tls */
615 env->regs[0] = cpu_get_tls(env);
617 case 0xffff0f60: /* __kernel_cmpxchg64 */
618 arm_kernel_cmpxchg64_helper(env);
624 /* Jump back to the caller. */
625 addr = env->regs[14];
630 env->regs[15] = addr;
635 /* Store exclusive handling for AArch32 */
636 static int do_strex(CPUARMState *env)
644 if (env->exclusive_addr != env->exclusive_test) {
647 /* We know we're always AArch32 so the address is in uint32_t range
648 * unless it was the -1 exclusive-monitor-lost value (which won't
649 * match exclusive_test above).
651 assert(extract64(env->exclusive_addr, 32, 32) == 0);
652 addr = env->exclusive_addr;
653 size = env->exclusive_info & 0xf;
656 segv = get_user_u8(val, addr);
659 segv = get_user_data_u16(val, addr, env);
663 segv = get_user_data_u32(val, addr, env);
669 env->exception.vaddress = addr;
674 segv = get_user_data_u32(valhi, addr + 4, env);
676 env->exception.vaddress = addr + 4;
679 if (arm_cpu_bswap_data(env)) {
680 val = deposit64((uint64_t)valhi, 32, 32, val);
682 val = deposit64(val, 32, 32, valhi);
685 if (val != env->exclusive_val) {
689 val = env->regs[(env->exclusive_info >> 8) & 0xf];
692 segv = put_user_u8(val, addr);
695 segv = put_user_data_u16(val, addr, env);
699 segv = put_user_data_u32(val, addr, env);
703 env->exception.vaddress = addr;
707 val = env->regs[(env->exclusive_info >> 12) & 0xf];
708 segv = put_user_data_u32(val, addr + 4, env);
710 env->exception.vaddress = addr + 4;
717 env->regs[(env->exclusive_info >> 4) & 0xf] = rc;
723 void cpu_loop(CPUARMState *env)
725 CPUState *cs = CPU(arm_env_get_cpu(env));
727 unsigned int n, insn;
728 target_siginfo_t info;
734 trapnr = cpu_arm_exec(cs);
739 TaskState *ts = cs->opaque;
743 /* we handle the FPU emulation here, as Linux */
744 /* we get the opcode */
745 /* FIXME - what to do if get_user() fails? */
746 get_user_code_u32(opcode, env->regs[15], env);
748 rc = EmulateAll(opcode, &ts->fpa, env);
749 if (rc == 0) { /* illegal instruction */
750 info.si_signo = TARGET_SIGILL;
752 info.si_code = TARGET_ILL_ILLOPN;
753 info._sifields._sigfault._addr = env->regs[15];
754 queue_signal(env, info.si_signo, &info);
755 } else if (rc < 0) { /* FP exception */
758 /* translate softfloat flags to FPSR flags */
759 if (-rc & float_flag_invalid)
761 if (-rc & float_flag_divbyzero)
763 if (-rc & float_flag_overflow)
765 if (-rc & float_flag_underflow)
767 if (-rc & float_flag_inexact)
770 FPSR fpsr = ts->fpa.fpsr;
771 //printf("fpsr 0x%x, arm_fpe 0x%x\n",fpsr,arm_fpe);
773 if (fpsr & (arm_fpe << 16)) { /* exception enabled? */
774 info.si_signo = TARGET_SIGFPE;
777 /* ordered by priority, least first */
778 if (arm_fpe & BIT_IXC) info.si_code = TARGET_FPE_FLTRES;
779 if (arm_fpe & BIT_UFC) info.si_code = TARGET_FPE_FLTUND;
780 if (arm_fpe & BIT_OFC) info.si_code = TARGET_FPE_FLTOVF;
781 if (arm_fpe & BIT_DZC) info.si_code = TARGET_FPE_FLTDIV;
782 if (arm_fpe & BIT_IOC) info.si_code = TARGET_FPE_FLTINV;
784 info._sifields._sigfault._addr = env->regs[15];
785 queue_signal(env, info.si_signo, &info);
790 /* accumulate unenabled exceptions */
791 if ((!(fpsr & BIT_IXE)) && (arm_fpe & BIT_IXC))
793 if ((!(fpsr & BIT_UFE)) && (arm_fpe & BIT_UFC))
795 if ((!(fpsr & BIT_OFE)) && (arm_fpe & BIT_OFC))
797 if ((!(fpsr & BIT_DZE)) && (arm_fpe & BIT_DZC))
799 if ((!(fpsr & BIT_IOE)) && (arm_fpe & BIT_IOC))
802 } else { /* everything OK */
813 if (trapnr == EXCP_BKPT) {
815 /* FIXME - what to do if get_user() fails? */
816 get_user_code_u16(insn, env->regs[15], env);
820 /* FIXME - what to do if get_user() fails? */
821 get_user_code_u32(insn, env->regs[15], env);
822 n = (insn & 0xf) | ((insn >> 4) & 0xff0);
827 /* FIXME - what to do if get_user() fails? */
828 get_user_code_u16(insn, env->regs[15] - 2, env);
831 /* FIXME - what to do if get_user() fails? */
832 get_user_code_u32(insn, env->regs[15] - 4, env);
837 if (n == ARM_NR_cacheflush) {
839 } else if (n == ARM_NR_semihosting
840 || n == ARM_NR_thumb_semihosting) {
841 env->regs[0] = do_arm_semihosting (env);
842 } else if (n == 0 || n >= ARM_SYSCALL_BASE || env->thumb) {
844 if (env->thumb || n == 0) {
847 n -= ARM_SYSCALL_BASE;
850 if ( n > ARM_NR_BASE) {
852 case ARM_NR_cacheflush:
856 cpu_set_tls(env, env->regs[0]);
859 case ARM_NR_breakpoint:
860 env->regs[15] -= env->thumb ? 2 : 4;
863 gemu_log("qemu: Unsupported ARM syscall: 0x%x\n",
865 env->regs[0] = -TARGET_ENOSYS;
869 ret = do_syscall(env,
878 if (ret == -TARGET_ERESTARTSYS) {
879 env->regs[15] -= env->thumb ? 2 : 4;
880 } else if (ret != -TARGET_QEMU_ESIGRETURN) {
890 /* just indicate that signals should be handled asap */
893 if (!do_strex(env)) {
896 /* fall through for segv */
897 case EXCP_PREFETCH_ABORT:
898 case EXCP_DATA_ABORT:
899 addr = env->exception.vaddress;
901 info.si_signo = TARGET_SIGSEGV;
903 /* XXX: check env->error_code */
904 info.si_code = TARGET_SEGV_MAPERR;
905 info._sifields._sigfault._addr = addr;
906 queue_signal(env, info.si_signo, &info);
914 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
919 info.si_code = TARGET_TRAP_BRKPT;
920 queue_signal(env, info.si_signo, &info);
924 case EXCP_KERNEL_TRAP:
925 if (do_kernel_trap(env))
929 /* nothing to do here for user-mode, just resume guest code */
933 EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
936 process_pending_signals(env);
943 * Handle AArch64 store-release exclusive
945 * rs = gets the status result of store exclusive
946 * rt = is the register that is stored
947 * rt2 = is the second register store (in STP)
950 static int do_strex_a64(CPUARMState *env)
961 /* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */
962 size = extract32(env->exclusive_info, 0, 2);
963 is_pair = extract32(env->exclusive_info, 2, 1);
964 rs = extract32(env->exclusive_info, 4, 5);
965 rt = extract32(env->exclusive_info, 9, 5);
966 rt2 = extract32(env->exclusive_info, 14, 5);
968 addr = env->exclusive_addr;
970 if (addr != env->exclusive_test) {
976 segv = get_user_u8(val, addr);
979 segv = get_user_u16(val, addr);
982 segv = get_user_u32(val, addr);
985 segv = get_user_u64(val, addr);
991 env->exception.vaddress = addr;
994 if (val != env->exclusive_val) {
999 segv = get_user_u32(val, addr + 4);
1001 segv = get_user_u64(val, addr + 8);
1004 env->exception.vaddress = addr + (size == 2 ? 4 : 8);
1007 if (val != env->exclusive_high) {
1011 /* handle the zero register */
1012 val = rt == 31 ? 0 : env->xregs[rt];
1015 segv = put_user_u8(val, addr);
1018 segv = put_user_u16(val, addr);
1021 segv = put_user_u32(val, addr);
1024 segv = put_user_u64(val, addr);
1031 /* handle the zero register */
1032 val = rt2 == 31 ? 0 : env->xregs[rt2];
1034 segv = put_user_u32(val, addr + 4);
1036 segv = put_user_u64(val, addr + 8);
1039 env->exception.vaddress = addr + (size == 2 ? 4 : 8);
1046 /* rs == 31 encodes a write to the ZR, thus throwing away
1047 * the status return. This is rather silly but valid.
1050 env->xregs[rs] = rc;
1053 /* instruction faulted, PC does not advance */
1054 /* either way a strex releases any exclusive lock we have */
1055 env->exclusive_addr = -1;
1060 /* AArch64 main loop */
1061 void cpu_loop(CPUARMState *env)
1063 CPUState *cs = CPU(arm_env_get_cpu(env));
1066 target_siginfo_t info;
1070 trapnr = cpu_arm_exec(cs);
1075 ret = do_syscall(env,
1084 if (ret == -TARGET_ERESTARTSYS) {
1086 } else if (ret != -TARGET_QEMU_ESIGRETURN) {
1087 env->xregs[0] = ret;
1090 case EXCP_INTERRUPT:
1091 /* just indicate that signals should be handled asap */
1094 info.si_signo = TARGET_SIGILL;
1096 info.si_code = TARGET_ILL_ILLOPN;
1097 info._sifields._sigfault._addr = env->pc;
1098 queue_signal(env, info.si_signo, &info);
1101 if (!do_strex_a64(env)) {
1104 /* fall through for segv */
1105 case EXCP_PREFETCH_ABORT:
1106 case EXCP_DATA_ABORT:
1107 info.si_signo = TARGET_SIGSEGV;
1109 /* XXX: check env->error_code */
1110 info.si_code = TARGET_SEGV_MAPERR;
1111 info._sifields._sigfault._addr = env->exception.vaddress;
1112 queue_signal(env, info.si_signo, &info);
1116 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
1118 info.si_signo = sig;
1120 info.si_code = TARGET_TRAP_BRKPT;
1121 queue_signal(env, info.si_signo, &info);
1125 env->xregs[0] = do_arm_semihosting(env);
1128 /* nothing to do here for user-mode, just resume guest code */
1131 EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
1134 process_pending_signals(env);
1135 /* Exception return on AArch64 always clears the exclusive monitor,
1136 * so any return to running guest code implies this.
1137 * A strex (successful or otherwise) also clears the monitor, so
1138 * we don't need to specialcase EXCP_STREX.
1140 env->exclusive_addr = -1;
1143 #endif /* ndef TARGET_ABI32 */
1147 #ifdef TARGET_UNICORE32
1149 void cpu_loop(CPUUniCore32State *env)
1151 CPUState *cs = CPU(uc32_env_get_cpu(env));
1153 unsigned int n, insn;
1154 target_siginfo_t info;
1158 trapnr = uc32_cpu_exec(cs);
1161 case UC32_EXCP_PRIV:
1164 get_user_u32(insn, env->regs[31] - 4);
1165 n = insn & 0xffffff;
1167 if (n >= UC32_SYSCALL_BASE) {
1169 n -= UC32_SYSCALL_BASE;
1170 if (n == UC32_SYSCALL_NR_set_tls) {
1171 cpu_set_tls(env, env->regs[0]);
1174 env->regs[0] = do_syscall(env,
1189 case UC32_EXCP_DTRAP:
1190 case UC32_EXCP_ITRAP:
1191 info.si_signo = TARGET_SIGSEGV;
1193 /* XXX: check env->error_code */
1194 info.si_code = TARGET_SEGV_MAPERR;
1195 info._sifields._sigfault._addr = env->cp0.c4_faultaddr;
1196 queue_signal(env, info.si_signo, &info);
1198 case EXCP_INTERRUPT:
1199 /* just indicate that signals should be handled asap */
1205 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
1207 info.si_signo = sig;
1209 info.si_code = TARGET_TRAP_BRKPT;
1210 queue_signal(env, info.si_signo, &info);
1217 process_pending_signals(env);
1221 EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
1227 #define SPARC64_STACK_BIAS 2047
1231 /* WARNING: dealing with register windows _is_ complicated. More info
1232 can be found at http://www.sics.se/~psm/sparcstack.html */
1233 static inline int get_reg_index(CPUSPARCState *env, int cwp, int index)
1235 index = (index + cwp * 16) % (16 * env->nwindows);
1236 /* wrap handling : if cwp is on the last window, then we use the
1237 registers 'after' the end */
1238 if (index < 8 && env->cwp == env->nwindows - 1)
1239 index += 16 * env->nwindows;
1243 /* save the register window 'cwp1' */
1244 static inline void save_window_offset(CPUSPARCState *env, int cwp1)
1249 sp_ptr = env->regbase[get_reg_index(env, cwp1, 6)];
1250 #ifdef TARGET_SPARC64
1252 sp_ptr += SPARC64_STACK_BIAS;
1254 #if defined(DEBUG_WIN)
1255 printf("win_overflow: sp_ptr=0x" TARGET_ABI_FMT_lx " save_cwp=%d\n",
1258 for(i = 0; i < 16; i++) {
1259 /* FIXME - what to do if put_user() fails? */
1260 put_user_ual(env->regbase[get_reg_index(env, cwp1, 8 + i)], sp_ptr);
1261 sp_ptr += sizeof(abi_ulong);
1265 static void save_window(CPUSPARCState *env)
1267 #ifndef TARGET_SPARC64
1268 unsigned int new_wim;
1269 new_wim = ((env->wim >> 1) | (env->wim << (env->nwindows - 1))) &
1270 ((1LL << env->nwindows) - 1);
1271 save_window_offset(env, cpu_cwp_dec(env, env->cwp - 2));
1274 save_window_offset(env, cpu_cwp_dec(env, env->cwp - 2));
1280 static void restore_window(CPUSPARCState *env)
1282 #ifndef TARGET_SPARC64
1283 unsigned int new_wim;
1285 unsigned int i, cwp1;
1288 #ifndef TARGET_SPARC64
1289 new_wim = ((env->wim << 1) | (env->wim >> (env->nwindows - 1))) &
1290 ((1LL << env->nwindows) - 1);
1293 /* restore the invalid window */
1294 cwp1 = cpu_cwp_inc(env, env->cwp + 1);
1295 sp_ptr = env->regbase[get_reg_index(env, cwp1, 6)];
1296 #ifdef TARGET_SPARC64
1298 sp_ptr += SPARC64_STACK_BIAS;
1300 #if defined(DEBUG_WIN)
1301 printf("win_underflow: sp_ptr=0x" TARGET_ABI_FMT_lx " load_cwp=%d\n",
1304 for(i = 0; i < 16; i++) {
1305 /* FIXME - what to do if get_user() fails? */
1306 get_user_ual(env->regbase[get_reg_index(env, cwp1, 8 + i)], sp_ptr);
1307 sp_ptr += sizeof(abi_ulong);
1309 #ifdef TARGET_SPARC64
1311 if (env->cleanwin < env->nwindows - 1)
1319 static void flush_windows(CPUSPARCState *env)
1325 /* if restore would invoke restore_window(), then we can stop */
1326 cwp1 = cpu_cwp_inc(env, env->cwp + offset);
1327 #ifndef TARGET_SPARC64
1328 if (env->wim & (1 << cwp1))
1331 if (env->canrestore == 0)
1336 save_window_offset(env, cwp1);
1339 cwp1 = cpu_cwp_inc(env, env->cwp + 1);
1340 #ifndef TARGET_SPARC64
1341 /* set wim so that restore will reload the registers */
1342 env->wim = 1 << cwp1;
1344 #if defined(DEBUG_WIN)
1345 printf("flush_windows: nb=%d\n", offset - 1);
1349 void cpu_loop (CPUSPARCState *env)
1351 CPUState *cs = CPU(sparc_env_get_cpu(env));
1354 target_siginfo_t info;
1358 trapnr = cpu_sparc_exec(cs);
1361 /* Compute PSR before exposing state. */
1362 if (env->cc_op != CC_OP_FLAGS) {
1367 #ifndef TARGET_SPARC64
1374 ret = do_syscall (env, env->gregs[1],
1375 env->regwptr[0], env->regwptr[1],
1376 env->regwptr[2], env->regwptr[3],
1377 env->regwptr[4], env->regwptr[5],
1379 if (ret == -TARGET_ERESTARTSYS || ret == -TARGET_QEMU_ESIGRETURN) {
1382 if ((abi_ulong)ret >= (abi_ulong)(-515)) {
1383 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1384 env->xcc |= PSR_CARRY;
1386 env->psr |= PSR_CARRY;
1390 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1391 env->xcc &= ~PSR_CARRY;
1393 env->psr &= ~PSR_CARRY;
1396 env->regwptr[0] = ret;
1397 /* next instruction */
1399 env->npc = env->npc + 4;
1401 case 0x83: /* flush windows */
1406 /* next instruction */
1408 env->npc = env->npc + 4;
1410 #ifndef TARGET_SPARC64
1411 case TT_WIN_OVF: /* window overflow */
1414 case TT_WIN_UNF: /* window underflow */
1415 restore_window(env);
1420 info.si_signo = TARGET_SIGSEGV;
1422 /* XXX: check env->error_code */
1423 info.si_code = TARGET_SEGV_MAPERR;
1424 info._sifields._sigfault._addr = env->mmuregs[4];
1425 queue_signal(env, info.si_signo, &info);
1429 case TT_SPILL: /* window overflow */
1432 case TT_FILL: /* window underflow */
1433 restore_window(env);
1438 info.si_signo = TARGET_SIGSEGV;
1440 /* XXX: check env->error_code */
1441 info.si_code = TARGET_SEGV_MAPERR;
1442 if (trapnr == TT_DFAULT)
1443 info._sifields._sigfault._addr = env->dmmuregs[4];
1445 info._sifields._sigfault._addr = cpu_tsptr(env)->tpc;
1446 queue_signal(env, info.si_signo, &info);
1449 #ifndef TARGET_ABI32
1452 sparc64_get_context(env);
1456 sparc64_set_context(env);
1460 case EXCP_INTERRUPT:
1461 /* just indicate that signals should be handled asap */
1465 info.si_signo = TARGET_SIGILL;
1467 info.si_code = TARGET_ILL_ILLOPC;
1468 info._sifields._sigfault._addr = env->pc;
1469 queue_signal(env, info.si_signo, &info);
1476 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
1479 info.si_signo = sig;
1481 info.si_code = TARGET_TRAP_BRKPT;
1482 queue_signal(env, info.si_signo, &info);
1487 printf ("Unhandled trap: 0x%x\n", trapnr);
1488 cpu_dump_state(cs, stderr, fprintf, 0);
1491 process_pending_signals (env);
1498 static inline uint64_t cpu_ppc_get_tb(CPUPPCState *env)
1500 return cpu_get_host_ticks();
1503 uint64_t cpu_ppc_load_tbl(CPUPPCState *env)
1505 return cpu_ppc_get_tb(env);
1508 uint32_t cpu_ppc_load_tbu(CPUPPCState *env)
1510 return cpu_ppc_get_tb(env) >> 32;
1513 uint64_t cpu_ppc_load_atbl(CPUPPCState *env)
1515 return cpu_ppc_get_tb(env);
1518 uint32_t cpu_ppc_load_atbu(CPUPPCState *env)
1520 return cpu_ppc_get_tb(env) >> 32;
1523 uint32_t cpu_ppc601_load_rtcu(CPUPPCState *env)
1524 __attribute__ (( alias ("cpu_ppc_load_tbu") ));
1526 uint32_t cpu_ppc601_load_rtcl(CPUPPCState *env)
1528 return cpu_ppc_load_tbl(env) & 0x3FFFFF80;
1531 /* XXX: to be fixed */
1532 int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp)
1537 int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val)
1542 static int do_store_exclusive(CPUPPCState *env)
1545 target_ulong page_addr;
1546 target_ulong val, val2 __attribute__((unused)) = 0;
1550 addr = env->reserve_ea;
1551 page_addr = addr & TARGET_PAGE_MASK;
1554 flags = page_get_flags(page_addr);
1555 if ((flags & PAGE_READ) == 0) {
1558 int reg = env->reserve_info & 0x1f;
1559 int size = env->reserve_info >> 5;
1562 if (addr == env->reserve_addr) {
1564 case 1: segv = get_user_u8(val, addr); break;
1565 case 2: segv = get_user_u16(val, addr); break;
1566 case 4: segv = get_user_u32(val, addr); break;
1567 #if defined(TARGET_PPC64)
1568 case 8: segv = get_user_u64(val, addr); break;
1570 segv = get_user_u64(val, addr);
1572 segv = get_user_u64(val2, addr + 8);
1579 if (!segv && val == env->reserve_val) {
1580 val = env->gpr[reg];
1582 case 1: segv = put_user_u8(val, addr); break;
1583 case 2: segv = put_user_u16(val, addr); break;
1584 case 4: segv = put_user_u32(val, addr); break;
1585 #if defined(TARGET_PPC64)
1586 case 8: segv = put_user_u64(val, addr); break;
1588 if (val2 == env->reserve_val2) {
1591 val = env->gpr[reg+1];
1593 val2 = env->gpr[reg+1];
1595 segv = put_user_u64(val, addr);
1597 segv = put_user_u64(val2, addr + 8);
1610 env->crf[0] = (stored << 1) | xer_so;
1611 env->reserve_addr = (target_ulong)-1;
1621 void cpu_loop(CPUPPCState *env)
1623 CPUState *cs = CPU(ppc_env_get_cpu(env));
1624 target_siginfo_t info;
1630 trapnr = cpu_ppc_exec(cs);
1633 case POWERPC_EXCP_NONE:
1636 case POWERPC_EXCP_CRITICAL: /* Critical input */
1637 cpu_abort(cs, "Critical interrupt while in user mode. "
1640 case POWERPC_EXCP_MCHECK: /* Machine check exception */
1641 cpu_abort(cs, "Machine check exception while in user mode. "
1644 case POWERPC_EXCP_DSI: /* Data storage exception */
1645 EXCP_DUMP(env, "Invalid data memory access: 0x" TARGET_FMT_lx "\n",
1647 /* XXX: check this. Seems bugged */
1648 switch (env->error_code & 0xFF000000) {
1650 info.si_signo = TARGET_SIGSEGV;
1652 info.si_code = TARGET_SEGV_MAPERR;
1655 info.si_signo = TARGET_SIGILL;
1657 info.si_code = TARGET_ILL_ILLADR;
1660 info.si_signo = TARGET_SIGSEGV;
1662 info.si_code = TARGET_SEGV_ACCERR;
1665 /* Let's send a regular segfault... */
1666 EXCP_DUMP(env, "Invalid segfault errno (%02x)\n",
1668 info.si_signo = TARGET_SIGSEGV;
1670 info.si_code = TARGET_SEGV_MAPERR;
1673 info._sifields._sigfault._addr = env->nip;
1674 queue_signal(env, info.si_signo, &info);
1676 case POWERPC_EXCP_ISI: /* Instruction storage exception */
1677 EXCP_DUMP(env, "Invalid instruction fetch: 0x\n" TARGET_FMT_lx
1678 "\n", env->spr[SPR_SRR0]);
1679 /* XXX: check this */
1680 switch (env->error_code & 0xFF000000) {
1682 info.si_signo = TARGET_SIGSEGV;
1684 info.si_code = TARGET_SEGV_MAPERR;
1688 info.si_signo = TARGET_SIGSEGV;
1690 info.si_code = TARGET_SEGV_ACCERR;
1693 /* Let's send a regular segfault... */
1694 EXCP_DUMP(env, "Invalid segfault errno (%02x)\n",
1696 info.si_signo = TARGET_SIGSEGV;
1698 info.si_code = TARGET_SEGV_MAPERR;
1701 info._sifields._sigfault._addr = env->nip - 4;
1702 queue_signal(env, info.si_signo, &info);
1704 case POWERPC_EXCP_EXTERNAL: /* External input */
1705 cpu_abort(cs, "External interrupt while in user mode. "
1708 case POWERPC_EXCP_ALIGN: /* Alignment exception */
1709 EXCP_DUMP(env, "Unaligned memory access\n");
1710 /* XXX: check this */
1711 info.si_signo = TARGET_SIGBUS;
1713 info.si_code = TARGET_BUS_ADRALN;
1714 info._sifields._sigfault._addr = env->nip;
1715 queue_signal(env, info.si_signo, &info);
1717 case POWERPC_EXCP_PROGRAM: /* Program exception */
1718 /* XXX: check this */
1719 switch (env->error_code & ~0xF) {
1720 case POWERPC_EXCP_FP:
1721 EXCP_DUMP(env, "Floating point program exception\n");
1722 info.si_signo = TARGET_SIGFPE;
1724 switch (env->error_code & 0xF) {
1725 case POWERPC_EXCP_FP_OX:
1726 info.si_code = TARGET_FPE_FLTOVF;
1728 case POWERPC_EXCP_FP_UX:
1729 info.si_code = TARGET_FPE_FLTUND;
1731 case POWERPC_EXCP_FP_ZX:
1732 case POWERPC_EXCP_FP_VXZDZ:
1733 info.si_code = TARGET_FPE_FLTDIV;
1735 case POWERPC_EXCP_FP_XX:
1736 info.si_code = TARGET_FPE_FLTRES;
1738 case POWERPC_EXCP_FP_VXSOFT:
1739 info.si_code = TARGET_FPE_FLTINV;
1741 case POWERPC_EXCP_FP_VXSNAN:
1742 case POWERPC_EXCP_FP_VXISI:
1743 case POWERPC_EXCP_FP_VXIDI:
1744 case POWERPC_EXCP_FP_VXIMZ:
1745 case POWERPC_EXCP_FP_VXVC:
1746 case POWERPC_EXCP_FP_VXSQRT:
1747 case POWERPC_EXCP_FP_VXCVI:
1748 info.si_code = TARGET_FPE_FLTSUB;
1751 EXCP_DUMP(env, "Unknown floating point exception (%02x)\n",
1756 case POWERPC_EXCP_INVAL:
1757 EXCP_DUMP(env, "Invalid instruction\n");
1758 info.si_signo = TARGET_SIGILL;
1760 switch (env->error_code & 0xF) {
1761 case POWERPC_EXCP_INVAL_INVAL:
1762 info.si_code = TARGET_ILL_ILLOPC;
1764 case POWERPC_EXCP_INVAL_LSWX:
1765 info.si_code = TARGET_ILL_ILLOPN;
1767 case POWERPC_EXCP_INVAL_SPR:
1768 info.si_code = TARGET_ILL_PRVREG;
1770 case POWERPC_EXCP_INVAL_FP:
1771 info.si_code = TARGET_ILL_COPROC;
1774 EXCP_DUMP(env, "Unknown invalid operation (%02x)\n",
1775 env->error_code & 0xF);
1776 info.si_code = TARGET_ILL_ILLADR;
1780 case POWERPC_EXCP_PRIV:
1781 EXCP_DUMP(env, "Privilege violation\n");
1782 info.si_signo = TARGET_SIGILL;
1784 switch (env->error_code & 0xF) {
1785 case POWERPC_EXCP_PRIV_OPC:
1786 info.si_code = TARGET_ILL_PRVOPC;
1788 case POWERPC_EXCP_PRIV_REG:
1789 info.si_code = TARGET_ILL_PRVREG;
1792 EXCP_DUMP(env, "Unknown privilege violation (%02x)\n",
1793 env->error_code & 0xF);
1794 info.si_code = TARGET_ILL_PRVOPC;
1798 case POWERPC_EXCP_TRAP:
1799 cpu_abort(cs, "Tried to call a TRAP\n");
1802 /* Should not happen ! */
1803 cpu_abort(cs, "Unknown program exception (%02x)\n",
1807 info._sifields._sigfault._addr = env->nip - 4;
1808 queue_signal(env, info.si_signo, &info);
1810 case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
1811 EXCP_DUMP(env, "No floating point allowed\n");
1812 info.si_signo = TARGET_SIGILL;
1814 info.si_code = TARGET_ILL_COPROC;
1815 info._sifields._sigfault._addr = env->nip - 4;
1816 queue_signal(env, info.si_signo, &info);
1818 case POWERPC_EXCP_SYSCALL: /* System call exception */
1819 cpu_abort(cs, "Syscall exception while in user mode. "
1822 case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */
1823 EXCP_DUMP(env, "No APU instruction allowed\n");
1824 info.si_signo = TARGET_SIGILL;
1826 info.si_code = TARGET_ILL_COPROC;
1827 info._sifields._sigfault._addr = env->nip - 4;
1828 queue_signal(env, info.si_signo, &info);
1830 case POWERPC_EXCP_DECR: /* Decrementer exception */
1831 cpu_abort(cs, "Decrementer interrupt while in user mode. "
1834 case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */
1835 cpu_abort(cs, "Fix interval timer interrupt while in user mode. "
1838 case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */
1839 cpu_abort(cs, "Watchdog timer interrupt while in user mode. "
1842 case POWERPC_EXCP_DTLB: /* Data TLB error */
1843 cpu_abort(cs, "Data TLB exception while in user mode. "
1846 case POWERPC_EXCP_ITLB: /* Instruction TLB error */
1847 cpu_abort(cs, "Instruction TLB exception while in user mode. "
1850 case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavail. */
1851 EXCP_DUMP(env, "No SPE/floating-point instruction allowed\n");
1852 info.si_signo = TARGET_SIGILL;
1854 info.si_code = TARGET_ILL_COPROC;
1855 info._sifields._sigfault._addr = env->nip - 4;
1856 queue_signal(env, info.si_signo, &info);
1858 case POWERPC_EXCP_EFPDI: /* Embedded floating-point data IRQ */
1859 cpu_abort(cs, "Embedded floating-point data IRQ not handled\n");
1861 case POWERPC_EXCP_EFPRI: /* Embedded floating-point round IRQ */
1862 cpu_abort(cs, "Embedded floating-point round IRQ not handled\n");
1864 case POWERPC_EXCP_EPERFM: /* Embedded performance monitor IRQ */
1865 cpu_abort(cs, "Performance monitor exception not handled\n");
1867 case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */
1868 cpu_abort(cs, "Doorbell interrupt while in user mode. "
1871 case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */
1872 cpu_abort(cs, "Doorbell critical interrupt while in user mode. "
1875 case POWERPC_EXCP_RESET: /* System reset exception */
1876 cpu_abort(cs, "Reset interrupt while in user mode. "
1879 case POWERPC_EXCP_DSEG: /* Data segment exception */
1880 cpu_abort(cs, "Data segment exception while in user mode. "
1883 case POWERPC_EXCP_ISEG: /* Instruction segment exception */
1884 cpu_abort(cs, "Instruction segment exception "
1885 "while in user mode. Aborting\n");
1887 /* PowerPC 64 with hypervisor mode support */
1888 case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */
1889 cpu_abort(cs, "Hypervisor decrementer interrupt "
1890 "while in user mode. Aborting\n");
1892 case POWERPC_EXCP_TRACE: /* Trace exception */
1894 * we use this exception to emulate step-by-step execution mode.
1897 /* PowerPC 64 with hypervisor mode support */
1898 case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */
1899 cpu_abort(cs, "Hypervisor data storage exception "
1900 "while in user mode. Aborting\n");
1902 case POWERPC_EXCP_HISI: /* Hypervisor instruction storage excp */
1903 cpu_abort(cs, "Hypervisor instruction storage exception "
1904 "while in user mode. Aborting\n");
1906 case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */
1907 cpu_abort(cs, "Hypervisor data segment exception "
1908 "while in user mode. Aborting\n");
1910 case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment excp */
1911 cpu_abort(cs, "Hypervisor instruction segment exception "
1912 "while in user mode. Aborting\n");
1914 case POWERPC_EXCP_VPU: /* Vector unavailable exception */
1915 EXCP_DUMP(env, "No Altivec instructions allowed\n");
1916 info.si_signo = TARGET_SIGILL;
1918 info.si_code = TARGET_ILL_COPROC;
1919 info._sifields._sigfault._addr = env->nip - 4;
1920 queue_signal(env, info.si_signo, &info);
1922 case POWERPC_EXCP_PIT: /* Programmable interval timer IRQ */
1923 cpu_abort(cs, "Programmable interval timer interrupt "
1924 "while in user mode. Aborting\n");
1926 case POWERPC_EXCP_IO: /* IO error exception */
1927 cpu_abort(cs, "IO error exception while in user mode. "
1930 case POWERPC_EXCP_RUNM: /* Run mode exception */
1931 cpu_abort(cs, "Run mode exception while in user mode. "
1934 case POWERPC_EXCP_EMUL: /* Emulation trap exception */
1935 cpu_abort(cs, "Emulation trap exception not handled\n");
1937 case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */
1938 cpu_abort(cs, "Instruction fetch TLB exception "
1939 "while in user-mode. Aborting");
1941 case POWERPC_EXCP_DLTLB: /* Data load TLB miss */
1942 cpu_abort(cs, "Data load TLB exception while in user-mode. "
1945 case POWERPC_EXCP_DSTLB: /* Data store TLB miss */
1946 cpu_abort(cs, "Data store TLB exception while in user-mode. "
1949 case POWERPC_EXCP_FPA: /* Floating-point assist exception */
1950 cpu_abort(cs, "Floating-point assist exception not handled\n");
1952 case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
1953 cpu_abort(cs, "Instruction address breakpoint exception "
1956 case POWERPC_EXCP_SMI: /* System management interrupt */
1957 cpu_abort(cs, "System management interrupt while in user mode. "
1960 case POWERPC_EXCP_THERM: /* Thermal interrupt */
1961 cpu_abort(cs, "Thermal interrupt interrupt while in user mode. "
1964 case POWERPC_EXCP_PERFM: /* Embedded performance monitor IRQ */
1965 cpu_abort(cs, "Performance monitor exception not handled\n");
1967 case POWERPC_EXCP_VPUA: /* Vector assist exception */
1968 cpu_abort(cs, "Vector assist exception not handled\n");
1970 case POWERPC_EXCP_SOFTP: /* Soft patch exception */
1971 cpu_abort(cs, "Soft patch exception not handled\n");
1973 case POWERPC_EXCP_MAINT: /* Maintenance exception */
1974 cpu_abort(cs, "Maintenance exception while in user mode. "
1977 case POWERPC_EXCP_STOP: /* stop translation */
1978 /* We did invalidate the instruction cache. Go on */
1980 case POWERPC_EXCP_BRANCH: /* branch instruction: */
1981 /* We just stopped because of a branch. Go on */
1983 case POWERPC_EXCP_SYSCALL_USER:
1984 /* system call in user-mode emulation */
1986 * PPC ABI uses overflow flag in cr0 to signal an error
1989 env->crf[0] &= ~0x1;
1990 ret = do_syscall(env, env->gpr[0], env->gpr[3], env->gpr[4],
1991 env->gpr[5], env->gpr[6], env->gpr[7],
1993 if (ret == -TARGET_ERESTARTSYS) {
1997 if (ret == (target_ulong)(-TARGET_QEMU_ESIGRETURN)) {
1998 /* Returning from a successful sigreturn syscall.
1999 Avoid corrupting register state. */
2002 if (ret > (target_ulong)(-515)) {
2008 case POWERPC_EXCP_STCX:
2009 if (do_store_exclusive(env)) {
2010 info.si_signo = TARGET_SIGSEGV;
2012 info.si_code = TARGET_SEGV_MAPERR;
2013 info._sifields._sigfault._addr = env->nip;
2014 queue_signal(env, info.si_signo, &info);
2021 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
2023 info.si_signo = sig;
2025 info.si_code = TARGET_TRAP_BRKPT;
2026 queue_signal(env, info.si_signo, &info);
2030 case EXCP_INTERRUPT:
2031 /* just indicate that signals should be handled asap */
2034 cpu_abort(cs, "Unknown exception 0x%d. Aborting\n", trapnr);
2037 process_pending_signals(env);
2044 # ifdef TARGET_ABI_MIPSO32
2045 # define MIPS_SYS(name, args) args,
2046 static const uint8_t mips_syscall_args[] = {
2047 MIPS_SYS(sys_syscall , 8) /* 4000 */
2048 MIPS_SYS(sys_exit , 1)
2049 MIPS_SYS(sys_fork , 0)
2050 MIPS_SYS(sys_read , 3)
2051 MIPS_SYS(sys_write , 3)
2052 MIPS_SYS(sys_open , 3) /* 4005 */
2053 MIPS_SYS(sys_close , 1)
2054 MIPS_SYS(sys_waitpid , 3)
2055 MIPS_SYS(sys_creat , 2)
2056 MIPS_SYS(sys_link , 2)
2057 MIPS_SYS(sys_unlink , 1) /* 4010 */
2058 MIPS_SYS(sys_execve , 0)
2059 MIPS_SYS(sys_chdir , 1)
2060 MIPS_SYS(sys_time , 1)
2061 MIPS_SYS(sys_mknod , 3)
2062 MIPS_SYS(sys_chmod , 2) /* 4015 */
2063 MIPS_SYS(sys_lchown , 3)
2064 MIPS_SYS(sys_ni_syscall , 0)
2065 MIPS_SYS(sys_ni_syscall , 0) /* was sys_stat */
2066 MIPS_SYS(sys_lseek , 3)
2067 MIPS_SYS(sys_getpid , 0) /* 4020 */
2068 MIPS_SYS(sys_mount , 5)
2069 MIPS_SYS(sys_umount , 1)
2070 MIPS_SYS(sys_setuid , 1)
2071 MIPS_SYS(sys_getuid , 0)
2072 MIPS_SYS(sys_stime , 1) /* 4025 */
2073 MIPS_SYS(sys_ptrace , 4)
2074 MIPS_SYS(sys_alarm , 1)
2075 MIPS_SYS(sys_ni_syscall , 0) /* was sys_fstat */
2076 MIPS_SYS(sys_pause , 0)
2077 MIPS_SYS(sys_utime , 2) /* 4030 */
2078 MIPS_SYS(sys_ni_syscall , 0)
2079 MIPS_SYS(sys_ni_syscall , 0)
2080 MIPS_SYS(sys_access , 2)
2081 MIPS_SYS(sys_nice , 1)
2082 MIPS_SYS(sys_ni_syscall , 0) /* 4035 */
2083 MIPS_SYS(sys_sync , 0)
2084 MIPS_SYS(sys_kill , 2)
2085 MIPS_SYS(sys_rename , 2)
2086 MIPS_SYS(sys_mkdir , 2)
2087 MIPS_SYS(sys_rmdir , 1) /* 4040 */
2088 MIPS_SYS(sys_dup , 1)
2089 MIPS_SYS(sys_pipe , 0)
2090 MIPS_SYS(sys_times , 1)
2091 MIPS_SYS(sys_ni_syscall , 0)
2092 MIPS_SYS(sys_brk , 1) /* 4045 */
2093 MIPS_SYS(sys_setgid , 1)
2094 MIPS_SYS(sys_getgid , 0)
2095 MIPS_SYS(sys_ni_syscall , 0) /* was signal(2) */
2096 MIPS_SYS(sys_geteuid , 0)
2097 MIPS_SYS(sys_getegid , 0) /* 4050 */
2098 MIPS_SYS(sys_acct , 0)
2099 MIPS_SYS(sys_umount2 , 2)
2100 MIPS_SYS(sys_ni_syscall , 0)
2101 MIPS_SYS(sys_ioctl , 3)
2102 MIPS_SYS(sys_fcntl , 3) /* 4055 */
2103 MIPS_SYS(sys_ni_syscall , 2)
2104 MIPS_SYS(sys_setpgid , 2)
2105 MIPS_SYS(sys_ni_syscall , 0)
2106 MIPS_SYS(sys_olduname , 1)
2107 MIPS_SYS(sys_umask , 1) /* 4060 */
2108 MIPS_SYS(sys_chroot , 1)
2109 MIPS_SYS(sys_ustat , 2)
2110 MIPS_SYS(sys_dup2 , 2)
2111 MIPS_SYS(sys_getppid , 0)
2112 MIPS_SYS(sys_getpgrp , 0) /* 4065 */
2113 MIPS_SYS(sys_setsid , 0)
2114 MIPS_SYS(sys_sigaction , 3)
2115 MIPS_SYS(sys_sgetmask , 0)
2116 MIPS_SYS(sys_ssetmask , 1)
2117 MIPS_SYS(sys_setreuid , 2) /* 4070 */
2118 MIPS_SYS(sys_setregid , 2)
2119 MIPS_SYS(sys_sigsuspend , 0)
2120 MIPS_SYS(sys_sigpending , 1)
2121 MIPS_SYS(sys_sethostname , 2)
2122 MIPS_SYS(sys_setrlimit , 2) /* 4075 */
2123 MIPS_SYS(sys_getrlimit , 2)
2124 MIPS_SYS(sys_getrusage , 2)
2125 MIPS_SYS(sys_gettimeofday, 2)
2126 MIPS_SYS(sys_settimeofday, 2)
2127 MIPS_SYS(sys_getgroups , 2) /* 4080 */
2128 MIPS_SYS(sys_setgroups , 2)
2129 MIPS_SYS(sys_ni_syscall , 0) /* old_select */
2130 MIPS_SYS(sys_symlink , 2)
2131 MIPS_SYS(sys_ni_syscall , 0) /* was sys_lstat */
2132 MIPS_SYS(sys_readlink , 3) /* 4085 */
2133 MIPS_SYS(sys_uselib , 1)
2134 MIPS_SYS(sys_swapon , 2)
2135 MIPS_SYS(sys_reboot , 3)
2136 MIPS_SYS(old_readdir , 3)
2137 MIPS_SYS(old_mmap , 6) /* 4090 */
2138 MIPS_SYS(sys_munmap , 2)
2139 MIPS_SYS(sys_truncate , 2)
2140 MIPS_SYS(sys_ftruncate , 2)
2141 MIPS_SYS(sys_fchmod , 2)
2142 MIPS_SYS(sys_fchown , 3) /* 4095 */
2143 MIPS_SYS(sys_getpriority , 2)
2144 MIPS_SYS(sys_setpriority , 3)
2145 MIPS_SYS(sys_ni_syscall , 0)
2146 MIPS_SYS(sys_statfs , 2)
2147 MIPS_SYS(sys_fstatfs , 2) /* 4100 */
2148 MIPS_SYS(sys_ni_syscall , 0) /* was ioperm(2) */
2149 MIPS_SYS(sys_socketcall , 2)
2150 MIPS_SYS(sys_syslog , 3)
2151 MIPS_SYS(sys_setitimer , 3)
2152 MIPS_SYS(sys_getitimer , 2) /* 4105 */
2153 MIPS_SYS(sys_newstat , 2)
2154 MIPS_SYS(sys_newlstat , 2)
2155 MIPS_SYS(sys_newfstat , 2)
2156 MIPS_SYS(sys_uname , 1)
2157 MIPS_SYS(sys_ni_syscall , 0) /* 4110 was iopl(2) */
2158 MIPS_SYS(sys_vhangup , 0)
2159 MIPS_SYS(sys_ni_syscall , 0) /* was sys_idle() */
2160 MIPS_SYS(sys_ni_syscall , 0) /* was sys_vm86 */
2161 MIPS_SYS(sys_wait4 , 4)
2162 MIPS_SYS(sys_swapoff , 1) /* 4115 */
2163 MIPS_SYS(sys_sysinfo , 1)
2164 MIPS_SYS(sys_ipc , 6)
2165 MIPS_SYS(sys_fsync , 1)
2166 MIPS_SYS(sys_sigreturn , 0)
2167 MIPS_SYS(sys_clone , 6) /* 4120 */
2168 MIPS_SYS(sys_setdomainname, 2)
2169 MIPS_SYS(sys_newuname , 1)
2170 MIPS_SYS(sys_ni_syscall , 0) /* sys_modify_ldt */
2171 MIPS_SYS(sys_adjtimex , 1)
2172 MIPS_SYS(sys_mprotect , 3) /* 4125 */
2173 MIPS_SYS(sys_sigprocmask , 3)
2174 MIPS_SYS(sys_ni_syscall , 0) /* was create_module */
2175 MIPS_SYS(sys_init_module , 5)
2176 MIPS_SYS(sys_delete_module, 1)
2177 MIPS_SYS(sys_ni_syscall , 0) /* 4130 was get_kernel_syms */
2178 MIPS_SYS(sys_quotactl , 0)
2179 MIPS_SYS(sys_getpgid , 1)
2180 MIPS_SYS(sys_fchdir , 1)
2181 MIPS_SYS(sys_bdflush , 2)
2182 MIPS_SYS(sys_sysfs , 3) /* 4135 */
2183 MIPS_SYS(sys_personality , 1)
2184 MIPS_SYS(sys_ni_syscall , 0) /* for afs_syscall */
2185 MIPS_SYS(sys_setfsuid , 1)
2186 MIPS_SYS(sys_setfsgid , 1)
2187 MIPS_SYS(sys_llseek , 5) /* 4140 */
2188 MIPS_SYS(sys_getdents , 3)
2189 MIPS_SYS(sys_select , 5)
2190 MIPS_SYS(sys_flock , 2)
2191 MIPS_SYS(sys_msync , 3)
2192 MIPS_SYS(sys_readv , 3) /* 4145 */
2193 MIPS_SYS(sys_writev , 3)
2194 MIPS_SYS(sys_cacheflush , 3)
2195 MIPS_SYS(sys_cachectl , 3)
2196 MIPS_SYS(sys_sysmips , 4)
2197 MIPS_SYS(sys_ni_syscall , 0) /* 4150 */
2198 MIPS_SYS(sys_getsid , 1)
2199 MIPS_SYS(sys_fdatasync , 0)
2200 MIPS_SYS(sys_sysctl , 1)
2201 MIPS_SYS(sys_mlock , 2)
2202 MIPS_SYS(sys_munlock , 2) /* 4155 */
2203 MIPS_SYS(sys_mlockall , 1)
2204 MIPS_SYS(sys_munlockall , 0)
2205 MIPS_SYS(sys_sched_setparam, 2)
2206 MIPS_SYS(sys_sched_getparam, 2)
2207 MIPS_SYS(sys_sched_setscheduler, 3) /* 4160 */
2208 MIPS_SYS(sys_sched_getscheduler, 1)
2209 MIPS_SYS(sys_sched_yield , 0)
2210 MIPS_SYS(sys_sched_get_priority_max, 1)
2211 MIPS_SYS(sys_sched_get_priority_min, 1)
2212 MIPS_SYS(sys_sched_rr_get_interval, 2) /* 4165 */
2213 MIPS_SYS(sys_nanosleep, 2)
2214 MIPS_SYS(sys_mremap , 5)
2215 MIPS_SYS(sys_accept , 3)
2216 MIPS_SYS(sys_bind , 3)
2217 MIPS_SYS(sys_connect , 3) /* 4170 */
2218 MIPS_SYS(sys_getpeername , 3)
2219 MIPS_SYS(sys_getsockname , 3)
2220 MIPS_SYS(sys_getsockopt , 5)
2221 MIPS_SYS(sys_listen , 2)
2222 MIPS_SYS(sys_recv , 4) /* 4175 */
2223 MIPS_SYS(sys_recvfrom , 6)
2224 MIPS_SYS(sys_recvmsg , 3)
2225 MIPS_SYS(sys_send , 4)
2226 MIPS_SYS(sys_sendmsg , 3)
2227 MIPS_SYS(sys_sendto , 6) /* 4180 */
2228 MIPS_SYS(sys_setsockopt , 5)
2229 MIPS_SYS(sys_shutdown , 2)
2230 MIPS_SYS(sys_socket , 3)
2231 MIPS_SYS(sys_socketpair , 4)
2232 MIPS_SYS(sys_setresuid , 3) /* 4185 */
2233 MIPS_SYS(sys_getresuid , 3)
2234 MIPS_SYS(sys_ni_syscall , 0) /* was sys_query_module */
2235 MIPS_SYS(sys_poll , 3)
2236 MIPS_SYS(sys_nfsservctl , 3)
2237 MIPS_SYS(sys_setresgid , 3) /* 4190 */
2238 MIPS_SYS(sys_getresgid , 3)
2239 MIPS_SYS(sys_prctl , 5)
2240 MIPS_SYS(sys_rt_sigreturn, 0)
2241 MIPS_SYS(sys_rt_sigaction, 4)
2242 MIPS_SYS(sys_rt_sigprocmask, 4) /* 4195 */
2243 MIPS_SYS(sys_rt_sigpending, 2)
2244 MIPS_SYS(sys_rt_sigtimedwait, 4)
2245 MIPS_SYS(sys_rt_sigqueueinfo, 3)
2246 MIPS_SYS(sys_rt_sigsuspend, 0)
2247 MIPS_SYS(sys_pread64 , 6) /* 4200 */
2248 MIPS_SYS(sys_pwrite64 , 6)
2249 MIPS_SYS(sys_chown , 3)
2250 MIPS_SYS(sys_getcwd , 2)
2251 MIPS_SYS(sys_capget , 2)
2252 MIPS_SYS(sys_capset , 2) /* 4205 */
2253 MIPS_SYS(sys_sigaltstack , 2)
2254 MIPS_SYS(sys_sendfile , 4)
2255 MIPS_SYS(sys_ni_syscall , 0)
2256 MIPS_SYS(sys_ni_syscall , 0)
2257 MIPS_SYS(sys_mmap2 , 6) /* 4210 */
2258 MIPS_SYS(sys_truncate64 , 4)
2259 MIPS_SYS(sys_ftruncate64 , 4)
2260 MIPS_SYS(sys_stat64 , 2)
2261 MIPS_SYS(sys_lstat64 , 2)
2262 MIPS_SYS(sys_fstat64 , 2) /* 4215 */
2263 MIPS_SYS(sys_pivot_root , 2)
2264 MIPS_SYS(sys_mincore , 3)
2265 MIPS_SYS(sys_madvise , 3)
2266 MIPS_SYS(sys_getdents64 , 3)
2267 MIPS_SYS(sys_fcntl64 , 3) /* 4220 */
2268 MIPS_SYS(sys_ni_syscall , 0)
2269 MIPS_SYS(sys_gettid , 0)
2270 MIPS_SYS(sys_readahead , 5)
2271 MIPS_SYS(sys_setxattr , 5)
2272 MIPS_SYS(sys_lsetxattr , 5) /* 4225 */
2273 MIPS_SYS(sys_fsetxattr , 5)
2274 MIPS_SYS(sys_getxattr , 4)
2275 MIPS_SYS(sys_lgetxattr , 4)
2276 MIPS_SYS(sys_fgetxattr , 4)
2277 MIPS_SYS(sys_listxattr , 3) /* 4230 */
2278 MIPS_SYS(sys_llistxattr , 3)
2279 MIPS_SYS(sys_flistxattr , 3)
2280 MIPS_SYS(sys_removexattr , 2)
2281 MIPS_SYS(sys_lremovexattr, 2)
2282 MIPS_SYS(sys_fremovexattr, 2) /* 4235 */
2283 MIPS_SYS(sys_tkill , 2)
2284 MIPS_SYS(sys_sendfile64 , 5)
2285 MIPS_SYS(sys_futex , 6)
2286 MIPS_SYS(sys_sched_setaffinity, 3)
2287 MIPS_SYS(sys_sched_getaffinity, 3) /* 4240 */
2288 MIPS_SYS(sys_io_setup , 2)
2289 MIPS_SYS(sys_io_destroy , 1)
2290 MIPS_SYS(sys_io_getevents, 5)
2291 MIPS_SYS(sys_io_submit , 3)
2292 MIPS_SYS(sys_io_cancel , 3) /* 4245 */
2293 MIPS_SYS(sys_exit_group , 1)
2294 MIPS_SYS(sys_lookup_dcookie, 3)
2295 MIPS_SYS(sys_epoll_create, 1)
2296 MIPS_SYS(sys_epoll_ctl , 4)
2297 MIPS_SYS(sys_epoll_wait , 3) /* 4250 */
2298 MIPS_SYS(sys_remap_file_pages, 5)
2299 MIPS_SYS(sys_set_tid_address, 1)
2300 MIPS_SYS(sys_restart_syscall, 0)
2301 MIPS_SYS(sys_fadvise64_64, 7)
2302 MIPS_SYS(sys_statfs64 , 3) /* 4255 */
2303 MIPS_SYS(sys_fstatfs64 , 2)
2304 MIPS_SYS(sys_timer_create, 3)
2305 MIPS_SYS(sys_timer_settime, 4)
2306 MIPS_SYS(sys_timer_gettime, 2)
2307 MIPS_SYS(sys_timer_getoverrun, 1) /* 4260 */
2308 MIPS_SYS(sys_timer_delete, 1)
2309 MIPS_SYS(sys_clock_settime, 2)
2310 MIPS_SYS(sys_clock_gettime, 2)
2311 MIPS_SYS(sys_clock_getres, 2)
2312 MIPS_SYS(sys_clock_nanosleep, 4) /* 4265 */
2313 MIPS_SYS(sys_tgkill , 3)
2314 MIPS_SYS(sys_utimes , 2)
2315 MIPS_SYS(sys_mbind , 4)
2316 MIPS_SYS(sys_ni_syscall , 0) /* sys_get_mempolicy */
2317 MIPS_SYS(sys_ni_syscall , 0) /* 4270 sys_set_mempolicy */
2318 MIPS_SYS(sys_mq_open , 4)
2319 MIPS_SYS(sys_mq_unlink , 1)
2320 MIPS_SYS(sys_mq_timedsend, 5)
2321 MIPS_SYS(sys_mq_timedreceive, 5)
2322 MIPS_SYS(sys_mq_notify , 2) /* 4275 */
2323 MIPS_SYS(sys_mq_getsetattr, 3)
2324 MIPS_SYS(sys_ni_syscall , 0) /* sys_vserver */
2325 MIPS_SYS(sys_waitid , 4)
2326 MIPS_SYS(sys_ni_syscall , 0) /* available, was setaltroot */
2327 MIPS_SYS(sys_add_key , 5)
2328 MIPS_SYS(sys_request_key, 4)
2329 MIPS_SYS(sys_keyctl , 5)
2330 MIPS_SYS(sys_set_thread_area, 1)
2331 MIPS_SYS(sys_inotify_init, 0)
2332 MIPS_SYS(sys_inotify_add_watch, 3) /* 4285 */
2333 MIPS_SYS(sys_inotify_rm_watch, 2)
2334 MIPS_SYS(sys_migrate_pages, 4)
2335 MIPS_SYS(sys_openat, 4)
2336 MIPS_SYS(sys_mkdirat, 3)
2337 MIPS_SYS(sys_mknodat, 4) /* 4290 */
2338 MIPS_SYS(sys_fchownat, 5)
2339 MIPS_SYS(sys_futimesat, 3)
2340 MIPS_SYS(sys_fstatat64, 4)
2341 MIPS_SYS(sys_unlinkat, 3)
2342 MIPS_SYS(sys_renameat, 4) /* 4295 */
2343 MIPS_SYS(sys_linkat, 5)
2344 MIPS_SYS(sys_symlinkat, 3)
2345 MIPS_SYS(sys_readlinkat, 4)
2346 MIPS_SYS(sys_fchmodat, 3)
2347 MIPS_SYS(sys_faccessat, 3) /* 4300 */
2348 MIPS_SYS(sys_pselect6, 6)
2349 MIPS_SYS(sys_ppoll, 5)
2350 MIPS_SYS(sys_unshare, 1)
2351 MIPS_SYS(sys_splice, 6)
2352 MIPS_SYS(sys_sync_file_range, 7) /* 4305 */
2353 MIPS_SYS(sys_tee, 4)
2354 MIPS_SYS(sys_vmsplice, 4)
2355 MIPS_SYS(sys_move_pages, 6)
2356 MIPS_SYS(sys_set_robust_list, 2)
2357 MIPS_SYS(sys_get_robust_list, 3) /* 4310 */
2358 MIPS_SYS(sys_kexec_load, 4)
2359 MIPS_SYS(sys_getcpu, 3)
2360 MIPS_SYS(sys_epoll_pwait, 6)
2361 MIPS_SYS(sys_ioprio_set, 3)
2362 MIPS_SYS(sys_ioprio_get, 2)
2363 MIPS_SYS(sys_utimensat, 4)
2364 MIPS_SYS(sys_signalfd, 3)
2365 MIPS_SYS(sys_ni_syscall, 0) /* was timerfd */
2366 MIPS_SYS(sys_eventfd, 1)
2367 MIPS_SYS(sys_fallocate, 6) /* 4320 */
2368 MIPS_SYS(sys_timerfd_create, 2)
2369 MIPS_SYS(sys_timerfd_gettime, 2)
2370 MIPS_SYS(sys_timerfd_settime, 4)
2371 MIPS_SYS(sys_signalfd4, 4)
2372 MIPS_SYS(sys_eventfd2, 2) /* 4325 */
2373 MIPS_SYS(sys_epoll_create1, 1)
2374 MIPS_SYS(sys_dup3, 3)
2375 MIPS_SYS(sys_pipe2, 2)
2376 MIPS_SYS(sys_inotify_init1, 1)
2377 MIPS_SYS(sys_preadv, 6) /* 4330 */
2378 MIPS_SYS(sys_pwritev, 6)
2379 MIPS_SYS(sys_rt_tgsigqueueinfo, 4)
2380 MIPS_SYS(sys_perf_event_open, 5)
2381 MIPS_SYS(sys_accept4, 4)
2382 MIPS_SYS(sys_recvmmsg, 5) /* 4335 */
2383 MIPS_SYS(sys_fanotify_init, 2)
2384 MIPS_SYS(sys_fanotify_mark, 6)
2385 MIPS_SYS(sys_prlimit64, 4)
2386 MIPS_SYS(sys_name_to_handle_at, 5)
2387 MIPS_SYS(sys_open_by_handle_at, 3) /* 4340 */
2388 MIPS_SYS(sys_clock_adjtime, 2)
2389 MIPS_SYS(sys_syncfs, 1)
2394 static int do_store_exclusive(CPUMIPSState *env)
2397 target_ulong page_addr;
2405 page_addr = addr & TARGET_PAGE_MASK;
2408 flags = page_get_flags(page_addr);
2409 if ((flags & PAGE_READ) == 0) {
2412 reg = env->llreg & 0x1f;
2413 d = (env->llreg & 0x20) != 0;
2415 segv = get_user_s64(val, addr);
2417 segv = get_user_s32(val, addr);
2420 if (val != env->llval) {
2421 env->active_tc.gpr[reg] = 0;
2424 segv = put_user_u64(env->llnewval, addr);
2426 segv = put_user_u32(env->llnewval, addr);
2429 env->active_tc.gpr[reg] = 1;
2436 env->active_tc.PC += 4;
2449 static int do_break(CPUMIPSState *env, target_siginfo_t *info,
2457 info->si_signo = TARGET_SIGFPE;
2459 info->si_code = (code == BRK_OVERFLOW) ? FPE_INTOVF : FPE_INTDIV;
2460 queue_signal(env, info->si_signo, &*info);
2464 info->si_signo = TARGET_SIGTRAP;
2466 queue_signal(env, info->si_signo, &*info);
2474 void cpu_loop(CPUMIPSState *env)
2476 CPUState *cs = CPU(mips_env_get_cpu(env));
2477 target_siginfo_t info;
2480 # ifdef TARGET_ABI_MIPSO32
2481 unsigned int syscall_num;
2486 trapnr = cpu_mips_exec(cs);
2490 env->active_tc.PC += 4;
2491 # ifdef TARGET_ABI_MIPSO32
2492 syscall_num = env->active_tc.gpr[2] - 4000;
2493 if (syscall_num >= sizeof(mips_syscall_args)) {
2494 ret = -TARGET_ENOSYS;
2498 abi_ulong arg5 = 0, arg6 = 0, arg7 = 0, arg8 = 0;
2500 nb_args = mips_syscall_args[syscall_num];
2501 sp_reg = env->active_tc.gpr[29];
2503 /* these arguments are taken from the stack */
2505 if ((ret = get_user_ual(arg8, sp_reg + 28)) != 0) {
2509 if ((ret = get_user_ual(arg7, sp_reg + 24)) != 0) {
2513 if ((ret = get_user_ual(arg6, sp_reg + 20)) != 0) {
2517 if ((ret = get_user_ual(arg5, sp_reg + 16)) != 0) {
2523 ret = do_syscall(env, env->active_tc.gpr[2],
2524 env->active_tc.gpr[4],
2525 env->active_tc.gpr[5],
2526 env->active_tc.gpr[6],
2527 env->active_tc.gpr[7],
2528 arg5, arg6, arg7, arg8);
2532 ret = do_syscall(env, env->active_tc.gpr[2],
2533 env->active_tc.gpr[4], env->active_tc.gpr[5],
2534 env->active_tc.gpr[6], env->active_tc.gpr[7],
2535 env->active_tc.gpr[8], env->active_tc.gpr[9],
2536 env->active_tc.gpr[10], env->active_tc.gpr[11]);
2538 if (ret == -TARGET_ERESTARTSYS) {
2539 env->active_tc.PC -= 4;
2542 if (ret == -TARGET_QEMU_ESIGRETURN) {
2543 /* Returning from a successful sigreturn syscall.
2544 Avoid clobbering register state. */
2547 if ((abi_ulong)ret >= (abi_ulong)-1133) {
2548 env->active_tc.gpr[7] = 1; /* error flag */
2551 env->active_tc.gpr[7] = 0; /* error flag */
2553 env->active_tc.gpr[2] = ret;
2559 info.si_signo = TARGET_SIGSEGV;
2561 /* XXX: check env->error_code */
2562 info.si_code = TARGET_SEGV_MAPERR;
2563 info._sifields._sigfault._addr = env->CP0_BadVAddr;
2564 queue_signal(env, info.si_signo, &info);
2568 info.si_signo = TARGET_SIGILL;
2571 queue_signal(env, info.si_signo, &info);
2573 case EXCP_INTERRUPT:
2574 /* just indicate that signals should be handled asap */
2580 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
2583 info.si_signo = sig;
2585 info.si_code = TARGET_TRAP_BRKPT;
2586 queue_signal(env, info.si_signo, &info);
2591 if (do_store_exclusive(env)) {
2592 info.si_signo = TARGET_SIGSEGV;
2594 info.si_code = TARGET_SEGV_MAPERR;
2595 info._sifields._sigfault._addr = env->active_tc.PC;
2596 queue_signal(env, info.si_signo, &info);
2600 info.si_signo = TARGET_SIGILL;
2602 info.si_code = TARGET_ILL_ILLOPC;
2603 queue_signal(env, info.si_signo, &info);
2605 /* The code below was inspired by the MIPS Linux kernel trap
2606 * handling code in arch/mips/kernel/traps.c.
2610 abi_ulong trap_instr;
2613 if (env->hflags & MIPS_HFLAG_M16) {
2614 if (env->insn_flags & ASE_MICROMIPS) {
2615 /* microMIPS mode */
2616 ret = get_user_u16(trap_instr, env->active_tc.PC);
2621 if ((trap_instr >> 10) == 0x11) {
2622 /* 16-bit instruction */
2623 code = trap_instr & 0xf;
2625 /* 32-bit instruction */
2628 ret = get_user_u16(instr_lo,
2629 env->active_tc.PC + 2);
2633 trap_instr = (trap_instr << 16) | instr_lo;
2634 code = ((trap_instr >> 6) & ((1 << 20) - 1));
2635 /* Unfortunately, microMIPS also suffers from
2636 the old assembler bug... */
2637 if (code >= (1 << 10)) {
2643 ret = get_user_u16(trap_instr, env->active_tc.PC);
2647 code = (trap_instr >> 6) & 0x3f;
2650 ret = get_user_u32(trap_instr, env->active_tc.PC);
2655 /* As described in the original Linux kernel code, the
2656 * below checks on 'code' are to work around an old
2659 code = ((trap_instr >> 6) & ((1 << 20) - 1));
2660 if (code >= (1 << 10)) {
2665 if (do_break(env, &info, code) != 0) {
2672 abi_ulong trap_instr;
2673 unsigned int code = 0;
2675 if (env->hflags & MIPS_HFLAG_M16) {
2676 /* microMIPS mode */
2679 ret = get_user_u16(instr[0], env->active_tc.PC) ||
2680 get_user_u16(instr[1], env->active_tc.PC + 2);
2682 trap_instr = (instr[0] << 16) | instr[1];
2684 ret = get_user_u32(trap_instr, env->active_tc.PC);
2691 /* The immediate versions don't provide a code. */
2692 if (!(trap_instr & 0xFC000000)) {
2693 if (env->hflags & MIPS_HFLAG_M16) {
2694 /* microMIPS mode */
2695 code = ((trap_instr >> 12) & ((1 << 4) - 1));
2697 code = ((trap_instr >> 6) & ((1 << 10) - 1));
2701 if (do_break(env, &info, code) != 0) {
2708 EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
2711 process_pending_signals(env);
2716 #ifdef TARGET_OPENRISC
2718 void cpu_loop(CPUOpenRISCState *env)
2720 CPUState *cs = CPU(openrisc_env_get_cpu(env));
2725 trapnr = cpu_openrisc_exec(cs);
2731 qemu_log_mask(CPU_LOG_INT, "\nReset request, exit, pc is %#x\n", env->pc);
2735 qemu_log_mask(CPU_LOG_INT, "\nBus error, exit, pc is %#x\n", env->pc);
2736 gdbsig = TARGET_SIGBUS;
2740 cpu_dump_state(cs, stderr, fprintf, 0);
2741 gdbsig = TARGET_SIGSEGV;
2744 qemu_log_mask(CPU_LOG_INT, "\nTick time interrupt pc is %#x\n", env->pc);
2747 qemu_log_mask(CPU_LOG_INT, "\nAlignment pc is %#x\n", env->pc);
2748 gdbsig = TARGET_SIGBUS;
2751 qemu_log_mask(CPU_LOG_INT, "\nIllegal instructionpc is %#x\n", env->pc);
2752 gdbsig = TARGET_SIGILL;
2755 qemu_log_mask(CPU_LOG_INT, "\nExternal interruptpc is %#x\n", env->pc);
2759 qemu_log_mask(CPU_LOG_INT, "\nTLB miss\n");
2762 qemu_log_mask(CPU_LOG_INT, "\nRange\n");
2763 gdbsig = TARGET_SIGSEGV;
2766 env->pc += 4; /* 0xc00; */
2767 env->gpr[11] = do_syscall(env,
2768 env->gpr[11], /* return value */
2769 env->gpr[3], /* r3 - r7 are params */
2777 qemu_log_mask(CPU_LOG_INT, "\nFloating point error\n");
2780 qemu_log_mask(CPU_LOG_INT, "\nTrap\n");
2781 gdbsig = TARGET_SIGTRAP;
2784 qemu_log_mask(CPU_LOG_INT, "\nNR\n");
2787 EXCP_DUMP(env, "\nqemu: unhandled CPU exception %#x - aborting\n",
2789 gdbsig = TARGET_SIGILL;
2793 gdb_handlesig(cs, gdbsig);
2794 if (gdbsig != TARGET_SIGTRAP) {
2799 process_pending_signals(env);
2803 #endif /* TARGET_OPENRISC */
2806 void cpu_loop(CPUSH4State *env)
2808 CPUState *cs = CPU(sh_env_get_cpu(env));
2810 target_siginfo_t info;
2814 trapnr = cpu_sh4_exec(cs);
2820 ret = do_syscall(env,
2829 if (ret == -TARGET_ERESTARTSYS) {
2831 } else if (ret != -TARGET_QEMU_ESIGRETURN) {
2832 env->gregs[0] = ret;
2835 case EXCP_INTERRUPT:
2836 /* just indicate that signals should be handled asap */
2842 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
2845 info.si_signo = sig;
2847 info.si_code = TARGET_TRAP_BRKPT;
2848 queue_signal(env, info.si_signo, &info);
2854 info.si_signo = TARGET_SIGSEGV;
2856 info.si_code = TARGET_SEGV_MAPERR;
2857 info._sifields._sigfault._addr = env->tea;
2858 queue_signal(env, info.si_signo, &info);
2862 printf ("Unhandled trap: 0x%x\n", trapnr);
2863 cpu_dump_state(cs, stderr, fprintf, 0);
2866 process_pending_signals (env);
2872 void cpu_loop(CPUCRISState *env)
2874 CPUState *cs = CPU(cris_env_get_cpu(env));
2876 target_siginfo_t info;
2880 trapnr = cpu_cris_exec(cs);
2885 info.si_signo = TARGET_SIGSEGV;
2887 /* XXX: check env->error_code */
2888 info.si_code = TARGET_SEGV_MAPERR;
2889 info._sifields._sigfault._addr = env->pregs[PR_EDA];
2890 queue_signal(env, info.si_signo, &info);
2893 case EXCP_INTERRUPT:
2894 /* just indicate that signals should be handled asap */
2897 ret = do_syscall(env,
2906 env->regs[10] = ret;
2912 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
2915 info.si_signo = sig;
2917 info.si_code = TARGET_TRAP_BRKPT;
2918 queue_signal(env, info.si_signo, &info);
2923 printf ("Unhandled trap: 0x%x\n", trapnr);
2924 cpu_dump_state(cs, stderr, fprintf, 0);
2927 process_pending_signals (env);
2932 #ifdef TARGET_MICROBLAZE
2933 void cpu_loop(CPUMBState *env)
2935 CPUState *cs = CPU(mb_env_get_cpu(env));
2937 target_siginfo_t info;
2941 trapnr = cpu_mb_exec(cs);
2946 info.si_signo = TARGET_SIGSEGV;
2948 /* XXX: check env->error_code */
2949 info.si_code = TARGET_SEGV_MAPERR;
2950 info._sifields._sigfault._addr = 0;
2951 queue_signal(env, info.si_signo, &info);
2954 case EXCP_INTERRUPT:
2955 /* just indicate that signals should be handled asap */
2958 /* Return address is 4 bytes after the call. */
2960 env->sregs[SR_PC] = env->regs[14];
2961 ret = do_syscall(env,
2973 env->regs[17] = env->sregs[SR_PC] + 4;
2974 if (env->iflags & D_FLAG) {
2975 env->sregs[SR_ESR] |= 1 << 12;
2976 env->sregs[SR_PC] -= 4;
2977 /* FIXME: if branch was immed, replay the imm as well. */
2980 env->iflags &= ~(IMM_FLAG | D_FLAG);
2982 switch (env->sregs[SR_ESR] & 31) {
2983 case ESR_EC_DIVZERO:
2984 info.si_signo = TARGET_SIGFPE;
2986 info.si_code = TARGET_FPE_FLTDIV;
2987 info._sifields._sigfault._addr = 0;
2988 queue_signal(env, info.si_signo, &info);
2991 info.si_signo = TARGET_SIGFPE;
2993 if (env->sregs[SR_FSR] & FSR_IO) {
2994 info.si_code = TARGET_FPE_FLTINV;
2996 if (env->sregs[SR_FSR] & FSR_DZ) {
2997 info.si_code = TARGET_FPE_FLTDIV;
2999 info._sifields._sigfault._addr = 0;
3000 queue_signal(env, info.si_signo, &info);
3003 printf ("Unhandled hw-exception: 0x%x\n",
3004 env->sregs[SR_ESR] & ESR_EC_MASK);
3005 cpu_dump_state(cs, stderr, fprintf, 0);
3014 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
3017 info.si_signo = sig;
3019 info.si_code = TARGET_TRAP_BRKPT;
3020 queue_signal(env, info.si_signo, &info);
3025 printf ("Unhandled trap: 0x%x\n", trapnr);
3026 cpu_dump_state(cs, stderr, fprintf, 0);
3029 process_pending_signals (env);
3036 void cpu_loop(CPUM68KState *env)
3038 CPUState *cs = CPU(m68k_env_get_cpu(env));
3041 target_siginfo_t info;
3042 TaskState *ts = cs->opaque;
3046 trapnr = cpu_m68k_exec(cs);
3051 if (ts->sim_syscalls) {
3053 get_user_u16(nr, env->pc + 2);
3055 do_m68k_simcall(env, nr);
3061 case EXCP_HALT_INSN:
3062 /* Semihosing syscall. */
3064 do_m68k_semihosting(env, env->dregs[0]);
3068 case EXCP_UNSUPPORTED:
3070 info.si_signo = TARGET_SIGILL;
3072 info.si_code = TARGET_ILL_ILLOPN;
3073 info._sifields._sigfault._addr = env->pc;
3074 queue_signal(env, info.si_signo, &info);
3078 ts->sim_syscalls = 0;
3081 env->dregs[0] = do_syscall(env,
3092 case EXCP_INTERRUPT:
3093 /* just indicate that signals should be handled asap */
3097 info.si_signo = TARGET_SIGSEGV;
3099 /* XXX: check env->error_code */
3100 info.si_code = TARGET_SEGV_MAPERR;
3101 info._sifields._sigfault._addr = env->mmu.ar;
3102 queue_signal(env, info.si_signo, &info);
3109 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
3112 info.si_signo = sig;
3114 info.si_code = TARGET_TRAP_BRKPT;
3115 queue_signal(env, info.si_signo, &info);
3120 EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
3123 process_pending_signals(env);
3126 #endif /* TARGET_M68K */
3129 static void do_store_exclusive(CPUAlphaState *env, int reg, int quad)
3131 target_ulong addr, val, tmp;
3132 target_siginfo_t info;
3135 addr = env->lock_addr;
3136 tmp = env->lock_st_addr;
3137 env->lock_addr = -1;
3138 env->lock_st_addr = 0;
3144 if (quad ? get_user_s64(val, addr) : get_user_s32(val, addr)) {
3148 if (val == env->lock_value) {
3150 if (quad ? put_user_u64(tmp, addr) : put_user_u32(tmp, addr)) {
3167 info.si_signo = TARGET_SIGSEGV;
3169 info.si_code = TARGET_SEGV_MAPERR;
3170 info._sifields._sigfault._addr = addr;
3171 queue_signal(env, TARGET_SIGSEGV, &info);
3174 void cpu_loop(CPUAlphaState *env)
3176 CPUState *cs = CPU(alpha_env_get_cpu(env));
3178 target_siginfo_t info;
3183 trapnr = cpu_alpha_exec(cs);
3186 /* All of the traps imply a transition through PALcode, which
3187 implies an REI instruction has been executed. Which means
3188 that the intr_flag should be cleared. */
3193 fprintf(stderr, "Reset requested. Exit\n");
3197 fprintf(stderr, "Machine check exception. Exit\n");
3200 case EXCP_SMP_INTERRUPT:
3201 case EXCP_CLK_INTERRUPT:
3202 case EXCP_DEV_INTERRUPT:
3203 fprintf(stderr, "External interrupt. Exit\n");
3207 env->lock_addr = -1;
3208 info.si_signo = TARGET_SIGSEGV;
3210 info.si_code = (page_get_flags(env->trap_arg0) & PAGE_VALID
3211 ? TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR);
3212 info._sifields._sigfault._addr = env->trap_arg0;
3213 queue_signal(env, info.si_signo, &info);
3216 env->lock_addr = -1;
3217 info.si_signo = TARGET_SIGBUS;
3219 info.si_code = TARGET_BUS_ADRALN;
3220 info._sifields._sigfault._addr = env->trap_arg0;
3221 queue_signal(env, info.si_signo, &info);
3225 env->lock_addr = -1;
3226 info.si_signo = TARGET_SIGILL;
3228 info.si_code = TARGET_ILL_ILLOPC;
3229 info._sifields._sigfault._addr = env->pc;
3230 queue_signal(env, info.si_signo, &info);
3233 env->lock_addr = -1;
3234 info.si_signo = TARGET_SIGFPE;
3236 info.si_code = TARGET_FPE_FLTINV;
3237 info._sifields._sigfault._addr = env->pc;
3238 queue_signal(env, info.si_signo, &info);
3241 /* No-op. Linux simply re-enables the FPU. */
3244 env->lock_addr = -1;
3245 switch (env->error_code) {
3248 info.si_signo = TARGET_SIGTRAP;
3250 info.si_code = TARGET_TRAP_BRKPT;
3251 info._sifields._sigfault._addr = env->pc;
3252 queue_signal(env, info.si_signo, &info);
3256 info.si_signo = TARGET_SIGTRAP;
3259 info._sifields._sigfault._addr = env->pc;
3260 queue_signal(env, info.si_signo, &info);
3264 trapnr = env->ir[IR_V0];
3265 sysret = do_syscall(env, trapnr,
3266 env->ir[IR_A0], env->ir[IR_A1],
3267 env->ir[IR_A2], env->ir[IR_A3],
3268 env->ir[IR_A4], env->ir[IR_A5],
3270 if (sysret == -TARGET_ERESTARTSYS) {
3274 if (sysret == -TARGET_QEMU_ESIGRETURN) {
3277 /* Syscall writes 0 to V0 to bypass error check, similar
3278 to how this is handled internal to Linux kernel.
3279 (Ab)use trapnr temporarily as boolean indicating error. */
3280 trapnr = (env->ir[IR_V0] != 0 && sysret < 0);
3281 env->ir[IR_V0] = (trapnr ? -sysret : sysret);
3282 env->ir[IR_A3] = trapnr;
3286 /* ??? We can probably elide the code using page_unprotect
3287 that is checking for self-modifying code. Instead we
3288 could simply call tb_flush here. Until we work out the
3289 changes required to turn off the extra write protection,
3290 this can be a no-op. */
3294 /* Handled in the translator for usermode. */
3298 /* Handled in the translator for usermode. */
3302 info.si_signo = TARGET_SIGFPE;
3303 switch (env->ir[IR_A0]) {
3304 case TARGET_GEN_INTOVF:
3305 info.si_code = TARGET_FPE_INTOVF;
3307 case TARGET_GEN_INTDIV:
3308 info.si_code = TARGET_FPE_INTDIV;
3310 case TARGET_GEN_FLTOVF:
3311 info.si_code = TARGET_FPE_FLTOVF;
3313 case TARGET_GEN_FLTUND:
3314 info.si_code = TARGET_FPE_FLTUND;
3316 case TARGET_GEN_FLTINV:
3317 info.si_code = TARGET_FPE_FLTINV;
3319 case TARGET_GEN_FLTINE:
3320 info.si_code = TARGET_FPE_FLTRES;
3322 case TARGET_GEN_ROPRAND:
3326 info.si_signo = TARGET_SIGTRAP;
3331 info._sifields._sigfault._addr = env->pc;
3332 queue_signal(env, info.si_signo, &info);
3339 info.si_signo = gdb_handlesig(cs, TARGET_SIGTRAP);
3340 if (info.si_signo) {
3341 env->lock_addr = -1;
3343 info.si_code = TARGET_TRAP_BRKPT;
3344 queue_signal(env, info.si_signo, &info);
3349 do_store_exclusive(env, env->error_code, trapnr - EXCP_STL_C);
3351 case EXCP_INTERRUPT:
3352 /* Just indicate that signals should be handled asap. */
3355 printf ("Unhandled trap: 0x%x\n", trapnr);
3356 cpu_dump_state(cs, stderr, fprintf, 0);
3359 process_pending_signals (env);
3362 #endif /* TARGET_ALPHA */
3365 void cpu_loop(CPUS390XState *env)
3367 CPUState *cs = CPU(s390_env_get_cpu(env));
3369 target_siginfo_t info;
3374 trapnr = cpu_s390x_exec(cs);
3377 case EXCP_INTERRUPT:
3378 /* Just indicate that signals should be handled asap. */
3382 n = env->int_svc_code;
3384 /* syscalls > 255 */
3387 env->psw.addr += env->int_svc_ilen;
3388 env->regs[2] = do_syscall(env, n, env->regs[2], env->regs[3],
3389 env->regs[4], env->regs[5],
3390 env->regs[6], env->regs[7], 0, 0);
3394 sig = gdb_handlesig(cs, TARGET_SIGTRAP);
3396 n = TARGET_TRAP_BRKPT;
3401 n = env->int_pgm_code;
3404 case PGM_PRIVILEGED:
3405 sig = TARGET_SIGILL;
3406 n = TARGET_ILL_ILLOPC;
3408 case PGM_PROTECTION:
3409 case PGM_ADDRESSING:
3410 sig = TARGET_SIGSEGV;
3411 /* XXX: check env->error_code */
3412 n = TARGET_SEGV_MAPERR;
3413 addr = env->__excp_addr;
3416 case PGM_SPECIFICATION:
3417 case PGM_SPECIAL_OP:
3420 sig = TARGET_SIGILL;
3421 n = TARGET_ILL_ILLOPN;
3424 case PGM_FIXPT_OVERFLOW:
3425 sig = TARGET_SIGFPE;
3426 n = TARGET_FPE_INTOVF;
3428 case PGM_FIXPT_DIVIDE:
3429 sig = TARGET_SIGFPE;
3430 n = TARGET_FPE_INTDIV;
3434 n = (env->fpc >> 8) & 0xff;
3436 /* compare-and-trap */
3439 /* An IEEE exception, simulated or otherwise. */
3441 n = TARGET_FPE_FLTINV;
3442 } else if (n & 0x40) {
3443 n = TARGET_FPE_FLTDIV;
3444 } else if (n & 0x20) {
3445 n = TARGET_FPE_FLTOVF;
3446 } else if (n & 0x10) {
3447 n = TARGET_FPE_FLTUND;
3448 } else if (n & 0x08) {
3449 n = TARGET_FPE_FLTRES;
3451 /* ??? Quantum exception; BFP, DFP error. */
3454 sig = TARGET_SIGFPE;
3459 fprintf(stderr, "Unhandled program exception: %#x\n", n);
3460 cpu_dump_state(cs, stderr, fprintf, 0);
3466 addr = env->psw.addr;
3468 info.si_signo = sig;
3471 info._sifields._sigfault._addr = addr;
3472 queue_signal(env, info.si_signo, &info);
3476 fprintf(stderr, "Unhandled trap: 0x%x\n", trapnr);
3477 cpu_dump_state(cs, stderr, fprintf, 0);
3480 process_pending_signals (env);
3484 #endif /* TARGET_S390X */
3486 #ifdef TARGET_TILEGX
3488 static void gen_sigill_reg(CPUTLGState *env)
3490 target_siginfo_t info;
3492 info.si_signo = TARGET_SIGILL;
3494 info.si_code = TARGET_ILL_PRVREG;
3495 info._sifields._sigfault._addr = env->pc;
3496 queue_signal(env, info.si_signo, &info);
3499 static void do_signal(CPUTLGState *env, int signo, int sigcode)
3501 target_siginfo_t info;
3503 info.si_signo = signo;
3505 info._sifields._sigfault._addr = env->pc;
3507 if (signo == TARGET_SIGSEGV) {
3508 /* The passed in sigcode is a dummy; check for a page mapping
3509 and pass either MAPERR or ACCERR. */
3510 target_ulong addr = env->excaddr;
3511 info._sifields._sigfault._addr = addr;
3512 if (page_check_range(addr, 1, PAGE_VALID) < 0) {
3513 sigcode = TARGET_SEGV_MAPERR;
3515 sigcode = TARGET_SEGV_ACCERR;
3518 info.si_code = sigcode;
3520 queue_signal(env, info.si_signo, &info);
3523 static void gen_sigsegv_maperr(CPUTLGState *env, target_ulong addr)
3525 env->excaddr = addr;
3526 do_signal(env, TARGET_SIGSEGV, 0);
3529 static void set_regval(CPUTLGState *env, uint8_t reg, uint64_t val)
3531 if (unlikely(reg >= TILEGX_R_COUNT)) {
3542 gen_sigill_reg(env);
3545 g_assert_not_reached();
3548 env->regs[reg] = val;
3552 * Compare the 8-byte contents of the CmpValue SPR with the 8-byte value in
3553 * memory at the address held in the first source register. If the values are
3554 * not equal, then no memory operation is performed. If the values are equal,
3555 * the 8-byte quantity from the second source register is written into memory
3556 * at the address held in the first source register. In either case, the result
3557 * of the instruction is the value read from memory. The compare and write to
3558 * memory are atomic and thus can be used for synchronization purposes. This
3559 * instruction only operates for addresses aligned to a 8-byte boundary.
3560 * Unaligned memory access causes an Unaligned Data Reference interrupt.
3562 * Functional Description (64-bit)
3563 * uint64_t memVal = memoryReadDoubleWord (rf[SrcA]);
3564 * rf[Dest] = memVal;
3565 * if (memVal == SPR[CmpValueSPR])
3566 * memoryWriteDoubleWord (rf[SrcA], rf[SrcB]);
3568 * Functional Description (32-bit)
3569 * uint64_t memVal = signExtend32 (memoryReadWord (rf[SrcA]));
3570 * rf[Dest] = memVal;
3571 * if (memVal == signExtend32 (SPR[CmpValueSPR]))
3572 * memoryWriteWord (rf[SrcA], rf[SrcB]);
3575 * This function also processes exch and exch4 which need not process SPR.
3577 static void do_exch(CPUTLGState *env, bool quad, bool cmp)
3580 target_long val, sprval;
3584 addr = env->atomic_srca;
3585 if (quad ? get_user_s64(val, addr) : get_user_s32(val, addr)) {
3586 goto sigsegv_maperr;
3591 sprval = env->spregs[TILEGX_SPR_CMPEXCH];
3593 sprval = sextract64(env->spregs[TILEGX_SPR_CMPEXCH], 0, 32);
3597 if (!cmp || val == sprval) {
3598 target_long valb = env->atomic_srcb;
3599 if (quad ? put_user_u64(valb, addr) : put_user_u32(valb, addr)) {
3600 goto sigsegv_maperr;
3604 set_regval(env, env->atomic_dstr, val);
3610 gen_sigsegv_maperr(env, addr);
3613 static void do_fetch(CPUTLGState *env, int trapnr, bool quad)
3617 target_long val, valb;
3621 addr = env->atomic_srca;
3622 valb = env->atomic_srcb;
3623 if (quad ? get_user_s64(val, addr) : get_user_s32(val, addr)) {
3624 goto sigsegv_maperr;
3628 case TILEGX_EXCP_OPCODE_FETCHADD:
3629 case TILEGX_EXCP_OPCODE_FETCHADD4:
3632 case TILEGX_EXCP_OPCODE_FETCHADDGEZ:
3638 case TILEGX_EXCP_OPCODE_FETCHADDGEZ4:
3640 if ((int32_t)valb < 0) {
3644 case TILEGX_EXCP_OPCODE_FETCHAND:
3645 case TILEGX_EXCP_OPCODE_FETCHAND4:
3648 case TILEGX_EXCP_OPCODE_FETCHOR:
3649 case TILEGX_EXCP_OPCODE_FETCHOR4:
3653 g_assert_not_reached();
3657 if (quad ? put_user_u64(valb, addr) : put_user_u32(valb, addr)) {
3658 goto sigsegv_maperr;
3662 set_regval(env, env->atomic_dstr, val);
3668 gen_sigsegv_maperr(env, addr);
3671 void cpu_loop(CPUTLGState *env)
3673 CPUState *cs = CPU(tilegx_env_get_cpu(env));
3678 trapnr = cpu_tilegx_exec(cs);
3681 case TILEGX_EXCP_SYSCALL:
3682 env->regs[TILEGX_R_RE] = do_syscall(env, env->regs[TILEGX_R_NR],
3683 env->regs[0], env->regs[1],
3684 env->regs[2], env->regs[3],
3685 env->regs[4], env->regs[5],
3686 env->regs[6], env->regs[7]);
3687 env->regs[TILEGX_R_ERR] = TILEGX_IS_ERRNO(env->regs[TILEGX_R_RE])
3688 ? - env->regs[TILEGX_R_RE]
3691 case TILEGX_EXCP_OPCODE_EXCH:
3692 do_exch(env, true, false);
3694 case TILEGX_EXCP_OPCODE_EXCH4:
3695 do_exch(env, false, false);
3697 case TILEGX_EXCP_OPCODE_CMPEXCH:
3698 do_exch(env, true, true);
3700 case TILEGX_EXCP_OPCODE_CMPEXCH4:
3701 do_exch(env, false, true);
3703 case TILEGX_EXCP_OPCODE_FETCHADD:
3704 case TILEGX_EXCP_OPCODE_FETCHADDGEZ:
3705 case TILEGX_EXCP_OPCODE_FETCHAND:
3706 case TILEGX_EXCP_OPCODE_FETCHOR:
3707 do_fetch(env, trapnr, true);
3709 case TILEGX_EXCP_OPCODE_FETCHADD4:
3710 case TILEGX_EXCP_OPCODE_FETCHADDGEZ4:
3711 case TILEGX_EXCP_OPCODE_FETCHAND4:
3712 case TILEGX_EXCP_OPCODE_FETCHOR4:
3713 do_fetch(env, trapnr, false);
3715 case TILEGX_EXCP_SIGNAL:
3716 do_signal(env, env->signo, env->sigcode);
3718 case TILEGX_EXCP_REG_IDN_ACCESS:
3719 case TILEGX_EXCP_REG_UDN_ACCESS:
3720 gen_sigill_reg(env);
3723 fprintf(stderr, "trapnr is %d[0x%x].\n", trapnr, trapnr);
3724 g_assert_not_reached();
3726 process_pending_signals(env);
3732 THREAD CPUState *thread_cpu;
3734 void task_settid(TaskState *ts)
3736 if (ts->ts_tid == 0) {
3737 ts->ts_tid = (pid_t)syscall(SYS_gettid);
3741 void stop_all_tasks(void)
3744 * We trust that when using NPTL, start_exclusive()
3745 * handles thread stopping correctly.
3750 /* Assumes contents are already zeroed. */
3751 void init_task_state(TaskState *ts)
3756 ts->first_free = ts->sigqueue_table;
3757 for (i = 0; i < MAX_SIGQUEUE_SIZE - 1; i++) {
3758 ts->sigqueue_table[i].next = &ts->sigqueue_table[i + 1];
3760 ts->sigqueue_table[i].next = NULL;
3763 CPUArchState *cpu_copy(CPUArchState *env)
3765 CPUState *cpu = ENV_GET_CPU(env);
3766 CPUState *new_cpu = cpu_init(cpu_model);
3767 CPUArchState *new_env = new_cpu->env_ptr;
3771 /* Reset non arch specific state */
3774 memcpy(new_env, env, sizeof(CPUArchState));
3776 /* Clone all break/watchpoints.
3777 Note: Once we support ptrace with hw-debug register access, make sure
3778 BP_CPU break/watchpoints are handled correctly on clone. */
3779 QTAILQ_INIT(&new_cpu->breakpoints);
3780 QTAILQ_INIT(&new_cpu->watchpoints);
3781 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
3782 cpu_breakpoint_insert(new_cpu, bp->pc, bp->flags, NULL);
3784 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
3785 cpu_watchpoint_insert(new_cpu, wp->vaddr, wp->len, wp->flags, NULL);
3791 static void handle_arg_help(const char *arg)
3793 usage(EXIT_SUCCESS);
3796 static void handle_arg_log(const char *arg)
3800 mask = qemu_str_to_log_mask(arg);
3802 qemu_print_log_usage(stdout);
3805 qemu_log_needs_buffers();
3809 static void handle_arg_log_filename(const char *arg)
3811 qemu_set_log_filename(arg);
3814 static void handle_arg_set_env(const char *arg)
3816 char *r, *p, *token;
3817 r = p = strdup(arg);
3818 while ((token = strsep(&p, ",")) != NULL) {
3819 if (envlist_setenv(envlist, token) != 0) {
3820 usage(EXIT_FAILURE);
3826 static void handle_arg_unset_env(const char *arg)
3828 char *r, *p, *token;
3829 r = p = strdup(arg);
3830 while ((token = strsep(&p, ",")) != NULL) {
3831 if (envlist_unsetenv(envlist, token) != 0) {
3832 usage(EXIT_FAILURE);
3838 static void handle_arg_argv0(const char *arg)
3840 argv0 = strdup(arg);
3843 static void handle_arg_stack_size(const char *arg)
3846 guest_stack_size = strtoul(arg, &p, 0);
3847 if (guest_stack_size == 0) {
3848 usage(EXIT_FAILURE);
3852 guest_stack_size *= 1024 * 1024;
3853 } else if (*p == 'k' || *p == 'K') {
3854 guest_stack_size *= 1024;
3858 static void handle_arg_ld_prefix(const char *arg)
3860 interp_prefix = strdup(arg);
3863 static void handle_arg_pagesize(const char *arg)
3865 qemu_host_page_size = atoi(arg);
3866 if (qemu_host_page_size == 0 ||
3867 (qemu_host_page_size & (qemu_host_page_size - 1)) != 0) {
3868 fprintf(stderr, "page size must be a power of two\n");
3873 static void handle_arg_randseed(const char *arg)
3875 unsigned long long seed;
3877 if (parse_uint_full(arg, &seed, 0) != 0 || seed > UINT_MAX) {
3878 fprintf(stderr, "Invalid seed number: %s\n", arg);
3884 static void handle_arg_gdb(const char *arg)
3886 gdbstub_port = atoi(arg);
3889 static void handle_arg_uname(const char *arg)
3891 qemu_uname_release = strdup(arg);
3894 static void handle_arg_cpu(const char *arg)
3896 cpu_model = strdup(arg);
3897 if (cpu_model == NULL || is_help_option(cpu_model)) {
3898 /* XXX: implement xxx_cpu_list for targets that still miss it */
3899 #if defined(cpu_list)
3900 cpu_list(stdout, &fprintf);
3906 static void handle_arg_guest_base(const char *arg)
3908 guest_base = strtol(arg, NULL, 0);
3909 have_guest_base = 1;
3912 static void handle_arg_reserved_va(const char *arg)
3916 reserved_va = strtoul(arg, &p, 0);
3930 unsigned long unshifted = reserved_va;
3932 reserved_va <<= shift;
3933 if (((reserved_va >> shift) != unshifted)
3934 #if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
3935 || (reserved_va > (1ul << TARGET_VIRT_ADDR_SPACE_BITS))
3938 fprintf(stderr, "Reserved virtual address too big\n");
3943 fprintf(stderr, "Unrecognised -R size suffix '%s'\n", p);
3948 static void handle_arg_singlestep(const char *arg)
3953 static void handle_arg_strace(const char *arg)
3958 static void handle_arg_version(const char *arg)
3960 printf("qemu-" TARGET_NAME " version " QEMU_VERSION QEMU_PKGVERSION
3961 ", Copyright (c) 2003-2008 Fabrice Bellard\n");
3965 struct qemu_argument {
3969 void (*handle_opt)(const char *arg);
3970 const char *example;
3974 static const struct qemu_argument arg_table[] = {
3975 {"h", "", false, handle_arg_help,
3976 "", "print this help"},
3977 {"help", "", false, handle_arg_help,
3979 {"g", "QEMU_GDB", true, handle_arg_gdb,
3980 "port", "wait gdb connection to 'port'"},
3981 {"L", "QEMU_LD_PREFIX", true, handle_arg_ld_prefix,
3982 "path", "set the elf interpreter prefix to 'path'"},
3983 {"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size,
3984 "size", "set the stack size to 'size' bytes"},
3985 {"cpu", "QEMU_CPU", true, handle_arg_cpu,
3986 "model", "select CPU (-cpu help for list)"},
3987 {"E", "QEMU_SET_ENV", true, handle_arg_set_env,
3988 "var=value", "sets targets environment variable (see below)"},
3989 {"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env,
3990 "var", "unsets targets environment variable (see below)"},
3991 {"0", "QEMU_ARGV0", true, handle_arg_argv0,
3992 "argv0", "forces target process argv[0] to be 'argv0'"},
3993 {"r", "QEMU_UNAME", true, handle_arg_uname,
3994 "uname", "set qemu uname release string to 'uname'"},
3995 {"B", "QEMU_GUEST_BASE", true, handle_arg_guest_base,
3996 "address", "set guest_base address to 'address'"},
3997 {"R", "QEMU_RESERVED_VA", true, handle_arg_reserved_va,
3998 "size", "reserve 'size' bytes for guest virtual address space"},
3999 {"d", "QEMU_LOG", true, handle_arg_log,
4000 "item[,...]", "enable logging of specified items "
4001 "(use '-d help' for a list of items)"},
4002 {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename,
4003 "logfile", "write logs to 'logfile' (default stderr)"},
4004 {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize,
4005 "pagesize", "set the host page size to 'pagesize'"},
4006 {"singlestep", "QEMU_SINGLESTEP", false, handle_arg_singlestep,
4007 "", "run in singlestep mode"},
4008 {"strace", "QEMU_STRACE", false, handle_arg_strace,
4009 "", "log system calls"},
4010 {"seed", "QEMU_RAND_SEED", true, handle_arg_randseed,
4011 "", "Seed for pseudo-random number generator"},
4012 {"version", "QEMU_VERSION", false, handle_arg_version,
4013 "", "display version information and exit"},
4014 {NULL, NULL, false, NULL, NULL, NULL}
4017 static void usage(int exitcode)
4019 const struct qemu_argument *arginfo;
4023 printf("usage: qemu-" TARGET_NAME " [options] program [arguments...]\n"
4024 "Linux CPU emulator (compiled for " TARGET_NAME " emulation)\n"
4026 "Options and associated environment variables:\n"
4029 /* Calculate column widths. We must always have at least enough space
4030 * for the column header.
4032 maxarglen = strlen("Argument");
4033 maxenvlen = strlen("Env-variable");
4035 for (arginfo = arg_table; arginfo->handle_opt != NULL; arginfo++) {
4036 int arglen = strlen(arginfo->argv);
4037 if (arginfo->has_arg) {
4038 arglen += strlen(arginfo->example) + 1;
4040 if (strlen(arginfo->env) > maxenvlen) {
4041 maxenvlen = strlen(arginfo->env);
4043 if (arglen > maxarglen) {
4048 printf("%-*s %-*s Description\n", maxarglen+1, "Argument",
4049 maxenvlen, "Env-variable");
4051 for (arginfo = arg_table; arginfo->handle_opt != NULL; arginfo++) {
4052 if (arginfo->has_arg) {
4053 printf("-%s %-*s %-*s %s\n", arginfo->argv,
4054 (int)(maxarglen - strlen(arginfo->argv) - 1),
4055 arginfo->example, maxenvlen, arginfo->env, arginfo->help);
4057 printf("-%-*s %-*s %s\n", maxarglen, arginfo->argv,
4058 maxenvlen, arginfo->env,
4065 "QEMU_LD_PREFIX = %s\n"
4066 "QEMU_STACK_SIZE = %ld byte\n",
4071 "You can use -E and -U options or the QEMU_SET_ENV and\n"
4072 "QEMU_UNSET_ENV environment variables to set and unset\n"
4073 "environment variables for the target process.\n"
4074 "It is possible to provide several variables by separating them\n"
4075 "by commas in getsubopt(3) style. Additionally it is possible to\n"
4076 "provide the -E and -U options multiple times.\n"
4077 "The following lines are equivalent:\n"
4078 " -E var1=val2 -E var2=val2 -U LD_PRELOAD -U LD_DEBUG\n"
4079 " -E var1=val2,var2=val2 -U LD_PRELOAD,LD_DEBUG\n"
4080 " QEMU_SET_ENV=var1=val2,var2=val2 QEMU_UNSET_ENV=LD_PRELOAD,LD_DEBUG\n"
4081 "Note that if you provide several changes to a single variable\n"
4082 "the last change will stay in effect.\n");
4087 static int parse_args(int argc, char **argv)
4091 const struct qemu_argument *arginfo;
4093 for (arginfo = arg_table; arginfo->handle_opt != NULL; arginfo++) {
4094 if (arginfo->env == NULL) {
4098 r = getenv(arginfo->env);
4100 arginfo->handle_opt(r);
4106 if (optind >= argc) {
4115 if (!strcmp(r, "-")) {
4118 /* Treat --foo the same as -foo. */
4123 for (arginfo = arg_table; arginfo->handle_opt != NULL; arginfo++) {
4124 if (!strcmp(r, arginfo->argv)) {
4125 if (arginfo->has_arg) {
4126 if (optind >= argc) {
4127 (void) fprintf(stderr,
4128 "qemu: missing argument for option '%s'\n", r);
4131 arginfo->handle_opt(argv[optind]);
4134 arginfo->handle_opt(NULL);
4140 /* no option matched the current argv */
4141 if (arginfo->handle_opt == NULL) {
4142 (void) fprintf(stderr, "qemu: unknown option '%s'\n", r);
4147 if (optind >= argc) {
4148 (void) fprintf(stderr, "qemu: no user program specified\n");
4152 filename = argv[optind];
4153 exec_path = argv[optind];
4158 int main(int argc, char **argv, char **envp)
4160 struct target_pt_regs regs1, *regs = ®s1;
4161 struct image_info info1, *info = &info1;
4162 struct linux_binprm bprm;
4167 char **target_environ, **wrk;
4174 module_call_init(MODULE_INIT_QOM);
4176 if ((envlist = envlist_create()) == NULL) {
4177 (void) fprintf(stderr, "Unable to allocate envlist\n");
4181 /* add current environment into the list */
4182 for (wrk = environ; *wrk != NULL; wrk++) {
4183 (void) envlist_setenv(envlist, *wrk);
4186 /* Read the stack limit from the kernel. If it's "unlimited",
4187 then we can do little else besides use the default. */
4190 if (getrlimit(RLIMIT_STACK, &lim) == 0
4191 && lim.rlim_cur != RLIM_INFINITY
4192 && lim.rlim_cur == (target_long)lim.rlim_cur) {
4193 guest_stack_size = lim.rlim_cur;
4201 optind = parse_args(argc, argv);
4204 memset(regs, 0, sizeof(struct target_pt_regs));
4206 /* Zero out image_info */
4207 memset(info, 0, sizeof(struct image_info));
4209 memset(&bprm, 0, sizeof (bprm));
4211 /* Scan interp_prefix dir for replacement files. */
4212 init_paths(interp_prefix);
4214 init_qemu_uname_release();
4216 if (cpu_model == NULL) {
4217 #if defined(TARGET_I386)
4218 #ifdef TARGET_X86_64
4219 cpu_model = "qemu64";
4221 cpu_model = "qemu32";
4223 #elif defined(TARGET_ARM)
4225 #elif defined(TARGET_UNICORE32)
4227 #elif defined(TARGET_M68K)
4229 #elif defined(TARGET_SPARC)
4230 #ifdef TARGET_SPARC64
4231 cpu_model = "TI UltraSparc II";
4233 cpu_model = "Fujitsu MB86904";
4235 #elif defined(TARGET_MIPS)
4236 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
4241 #elif defined TARGET_OPENRISC
4242 cpu_model = "or1200";
4243 #elif defined(TARGET_PPC)
4244 # ifdef TARGET_PPC64
4245 cpu_model = "POWER8";
4249 #elif defined TARGET_SH4
4250 cpu_model = TYPE_SH7785_CPU;
4256 /* NOTE: we need to init the CPU at this stage to get
4257 qemu_host_page_size */
4258 cpu = cpu_init(cpu_model);
4260 fprintf(stderr, "Unable to find CPU definition\n");
4268 if (getenv("QEMU_STRACE")) {
4272 if (getenv("QEMU_RAND_SEED")) {
4273 handle_arg_randseed(getenv("QEMU_RAND_SEED"));
4276 target_environ = envlist_to_environ(envlist, NULL);
4277 envlist_free(envlist);
4280 * Now that page sizes are configured in cpu_init() we can do
4281 * proper page alignment for guest_base.
4283 guest_base = HOST_PAGE_ALIGN(guest_base);
4285 if (reserved_va || have_guest_base) {
4286 guest_base = init_guest_space(guest_base, reserved_va, 0,
4288 if (guest_base == (unsigned long)-1) {
4289 fprintf(stderr, "Unable to reserve 0x%lx bytes of virtual address "
4290 "space for use as guest address space (check your virtual "
4291 "memory ulimit setting or reserve less using -R option)\n",
4297 mmap_next_start = reserved_va;
4302 * Read in mmap_min_addr kernel parameter. This value is used
4303 * When loading the ELF image to determine whether guest_base
4304 * is needed. It is also used in mmap_find_vma.
4309 if ((fp = fopen("/proc/sys/vm/mmap_min_addr", "r")) != NULL) {
4311 if (fscanf(fp, "%lu", &tmp) == 1) {
4312 mmap_min_addr = tmp;
4313 qemu_log_mask(CPU_LOG_PAGE, "host mmap_min_addr=0x%lx\n", mmap_min_addr);
4320 * Prepare copy of argv vector for target.
4322 target_argc = argc - optind;
4323 target_argv = calloc(target_argc + 1, sizeof (char *));
4324 if (target_argv == NULL) {
4325 (void) fprintf(stderr, "Unable to allocate memory for target_argv\n");
4330 * If argv0 is specified (using '-0' switch) we replace
4331 * argv[0] pointer with the given one.
4334 if (argv0 != NULL) {
4335 target_argv[i++] = strdup(argv0);
4337 for (; i < target_argc; i++) {
4338 target_argv[i] = strdup(argv[optind + i]);
4340 target_argv[target_argc] = NULL;
4342 ts = g_new0(TaskState, 1);
4343 init_task_state(ts);
4344 /* build Task State */
4350 execfd = qemu_getauxval(AT_EXECFD);
4352 execfd = open(filename, O_RDONLY);
4354 printf("Error while loading %s: %s\n", filename, strerror(errno));
4355 _exit(EXIT_FAILURE);
4359 ret = loader_exec(execfd, filename, target_argv, target_environ, regs,
4362 printf("Error while loading %s: %s\n", filename, strerror(-ret));
4363 _exit(EXIT_FAILURE);
4366 for (wrk = target_environ; *wrk; wrk++) {
4370 free(target_environ);
4372 if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
4373 qemu_log("guest_base 0x%lx\n", guest_base);
4376 qemu_log("start_brk 0x" TARGET_ABI_FMT_lx "\n", info->start_brk);
4377 qemu_log("end_code 0x" TARGET_ABI_FMT_lx "\n", info->end_code);
4378 qemu_log("start_code 0x" TARGET_ABI_FMT_lx "\n",
4380 qemu_log("start_data 0x" TARGET_ABI_FMT_lx "\n",
4382 qemu_log("end_data 0x" TARGET_ABI_FMT_lx "\n", info->end_data);
4383 qemu_log("start_stack 0x" TARGET_ABI_FMT_lx "\n",
4385 qemu_log("brk 0x" TARGET_ABI_FMT_lx "\n", info->brk);
4386 qemu_log("entry 0x" TARGET_ABI_FMT_lx "\n", info->entry);
4389 target_set_brk(info->brk);
4393 /* Now that we've loaded the binary, GUEST_BASE is fixed. Delay
4394 generating the prologue until now so that the prologue can take
4395 the real value of GUEST_BASE into account. */
4396 tcg_prologue_init(&tcg_ctx);
4398 #if defined(TARGET_I386)
4399 env->cr[0] = CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK;
4400 env->hflags |= HF_PE_MASK | HF_CPL_MASK;
4401 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
4402 env->cr[4] |= CR4_OSFXSR_MASK;
4403 env->hflags |= HF_OSFXSR_MASK;
4405 #ifndef TARGET_ABI32
4406 /* enable 64 bit mode if possible */
4407 if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) {
4408 fprintf(stderr, "The selected x86 CPU does not support 64 bit mode\n");
4411 env->cr[4] |= CR4_PAE_MASK;
4412 env->efer |= MSR_EFER_LMA | MSR_EFER_LME;
4413 env->hflags |= HF_LMA_MASK;
4416 /* flags setup : we activate the IRQs by default as in user mode */
4417 env->eflags |= IF_MASK;
4419 /* linux register setup */
4420 #ifndef TARGET_ABI32
4421 env->regs[R_EAX] = regs->rax;
4422 env->regs[R_EBX] = regs->rbx;
4423 env->regs[R_ECX] = regs->rcx;
4424 env->regs[R_EDX] = regs->rdx;
4425 env->regs[R_ESI] = regs->rsi;
4426 env->regs[R_EDI] = regs->rdi;
4427 env->regs[R_EBP] = regs->rbp;
4428 env->regs[R_ESP] = regs->rsp;
4429 env->eip = regs->rip;
4431 env->regs[R_EAX] = regs->eax;
4432 env->regs[R_EBX] = regs->ebx;
4433 env->regs[R_ECX] = regs->ecx;
4434 env->regs[R_EDX] = regs->edx;
4435 env->regs[R_ESI] = regs->esi;
4436 env->regs[R_EDI] = regs->edi;
4437 env->regs[R_EBP] = regs->ebp;
4438 env->regs[R_ESP] = regs->esp;
4439 env->eip = regs->eip;
4442 /* linux interrupt setup */
4443 #ifndef TARGET_ABI32
4444 env->idt.limit = 511;
4446 env->idt.limit = 255;
4448 env->idt.base = target_mmap(0, sizeof(uint64_t) * (env->idt.limit + 1),
4449 PROT_READ|PROT_WRITE,
4450 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4451 idt_table = g2h(env->idt.base);
4474 /* linux segment setup */
4476 uint64_t *gdt_table;
4477 env->gdt.base = target_mmap(0, sizeof(uint64_t) * TARGET_GDT_ENTRIES,
4478 PROT_READ|PROT_WRITE,
4479 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
4480 env->gdt.limit = sizeof(uint64_t) * TARGET_GDT_ENTRIES - 1;
4481 gdt_table = g2h(env->gdt.base);
4483 write_dt(&gdt_table[__USER_CS >> 3], 0, 0xfffff,
4484 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
4485 (3 << DESC_DPL_SHIFT) | (0xa << DESC_TYPE_SHIFT));
4487 /* 64 bit code segment */
4488 write_dt(&gdt_table[__USER_CS >> 3], 0, 0xfffff,
4489 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
4491 (3 << DESC_DPL_SHIFT) | (0xa << DESC_TYPE_SHIFT));
4493 write_dt(&gdt_table[__USER_DS >> 3], 0, 0xfffff,
4494 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK |
4495 (3 << DESC_DPL_SHIFT) | (0x2 << DESC_TYPE_SHIFT));
4497 cpu_x86_load_seg(env, R_CS, __USER_CS);
4498 cpu_x86_load_seg(env, R_SS, __USER_DS);
4500 cpu_x86_load_seg(env, R_DS, __USER_DS);
4501 cpu_x86_load_seg(env, R_ES, __USER_DS);
4502 cpu_x86_load_seg(env, R_FS, __USER_DS);
4503 cpu_x86_load_seg(env, R_GS, __USER_DS);
4504 /* This hack makes Wine work... */
4505 env->segs[R_FS].selector = 0;
4507 cpu_x86_load_seg(env, R_DS, 0);
4508 cpu_x86_load_seg(env, R_ES, 0);
4509 cpu_x86_load_seg(env, R_FS, 0);
4510 cpu_x86_load_seg(env, R_GS, 0);
4512 #elif defined(TARGET_AARCH64)
4516 if (!(arm_feature(env, ARM_FEATURE_AARCH64))) {
4518 "The selected ARM CPU does not support 64 bit mode\n");
4522 for (i = 0; i < 31; i++) {
4523 env->xregs[i] = regs->regs[i];
4526 env->xregs[31] = regs->sp;
4528 #elif defined(TARGET_ARM)
4531 cpsr_write(env, regs->uregs[16], CPSR_USER | CPSR_EXEC,
4533 for(i = 0; i < 16; i++) {
4534 env->regs[i] = regs->uregs[i];
4536 #ifdef TARGET_WORDS_BIGENDIAN
4538 if (EF_ARM_EABI_VERSION(info->elf_flags) >= EF_ARM_EABI_VER4
4539 && (info->elf_flags & EF_ARM_BE8)) {
4540 env->uncached_cpsr |= CPSR_E;
4541 env->cp15.sctlr_el[1] |= SCTLR_E0E;
4543 env->cp15.sctlr_el[1] |= SCTLR_B;
4547 #elif defined(TARGET_UNICORE32)
4550 cpu_asr_write(env, regs->uregs[32], 0xffffffff);
4551 for (i = 0; i < 32; i++) {
4552 env->regs[i] = regs->uregs[i];
4555 #elif defined(TARGET_SPARC)
4559 env->npc = regs->npc;
4561 for(i = 0; i < 8; i++)
4562 env->gregs[i] = regs->u_regs[i];
4563 for(i = 0; i < 8; i++)
4564 env->regwptr[i] = regs->u_regs[i + 8];
4566 #elif defined(TARGET_PPC)
4570 #if defined(TARGET_PPC64)
4571 #if defined(TARGET_ABI32)
4572 env->msr &= ~((target_ulong)1 << MSR_SF);
4574 env->msr |= (target_ulong)1 << MSR_SF;
4577 env->nip = regs->nip;
4578 for(i = 0; i < 32; i++) {
4579 env->gpr[i] = regs->gpr[i];
4582 #elif defined(TARGET_M68K)
4585 env->dregs[0] = regs->d0;
4586 env->dregs[1] = regs->d1;
4587 env->dregs[2] = regs->d2;
4588 env->dregs[3] = regs->d3;
4589 env->dregs[4] = regs->d4;
4590 env->dregs[5] = regs->d5;
4591 env->dregs[6] = regs->d6;
4592 env->dregs[7] = regs->d7;
4593 env->aregs[0] = regs->a0;
4594 env->aregs[1] = regs->a1;
4595 env->aregs[2] = regs->a2;
4596 env->aregs[3] = regs->a3;
4597 env->aregs[4] = regs->a4;
4598 env->aregs[5] = regs->a5;
4599 env->aregs[6] = regs->a6;
4600 env->aregs[7] = regs->usp;
4602 ts->sim_syscalls = 1;
4604 #elif defined(TARGET_MICROBLAZE)
4606 env->regs[0] = regs->r0;
4607 env->regs[1] = regs->r1;
4608 env->regs[2] = regs->r2;
4609 env->regs[3] = regs->r3;
4610 env->regs[4] = regs->r4;
4611 env->regs[5] = regs->r5;
4612 env->regs[6] = regs->r6;
4613 env->regs[7] = regs->r7;
4614 env->regs[8] = regs->r8;
4615 env->regs[9] = regs->r9;
4616 env->regs[10] = regs->r10;
4617 env->regs[11] = regs->r11;
4618 env->regs[12] = regs->r12;
4619 env->regs[13] = regs->r13;
4620 env->regs[14] = regs->r14;
4621 env->regs[15] = regs->r15;
4622 env->regs[16] = regs->r16;
4623 env->regs[17] = regs->r17;
4624 env->regs[18] = regs->r18;
4625 env->regs[19] = regs->r19;
4626 env->regs[20] = regs->r20;
4627 env->regs[21] = regs->r21;
4628 env->regs[22] = regs->r22;
4629 env->regs[23] = regs->r23;
4630 env->regs[24] = regs->r24;
4631 env->regs[25] = regs->r25;
4632 env->regs[26] = regs->r26;
4633 env->regs[27] = regs->r27;
4634 env->regs[28] = regs->r28;
4635 env->regs[29] = regs->r29;
4636 env->regs[30] = regs->r30;
4637 env->regs[31] = regs->r31;
4638 env->sregs[SR_PC] = regs->pc;
4640 #elif defined(TARGET_MIPS)
4644 for(i = 0; i < 32; i++) {
4645 env->active_tc.gpr[i] = regs->regs[i];
4647 env->active_tc.PC = regs->cp0_epc & ~(target_ulong)1;
4648 if (regs->cp0_epc & 1) {
4649 env->hflags |= MIPS_HFLAG_M16;
4652 #elif defined(TARGET_OPENRISC)
4656 for (i = 0; i < 32; i++) {
4657 env->gpr[i] = regs->gpr[i];
4663 #elif defined(TARGET_SH4)
4667 for(i = 0; i < 16; i++) {
4668 env->gregs[i] = regs->regs[i];
4672 #elif defined(TARGET_ALPHA)
4676 for(i = 0; i < 28; i++) {
4677 env->ir[i] = ((abi_ulong *)regs)[i];
4679 env->ir[IR_SP] = regs->usp;
4682 #elif defined(TARGET_CRIS)
4684 env->regs[0] = regs->r0;
4685 env->regs[1] = regs->r1;
4686 env->regs[2] = regs->r2;
4687 env->regs[3] = regs->r3;
4688 env->regs[4] = regs->r4;
4689 env->regs[5] = regs->r5;
4690 env->regs[6] = regs->r6;
4691 env->regs[7] = regs->r7;
4692 env->regs[8] = regs->r8;
4693 env->regs[9] = regs->r9;
4694 env->regs[10] = regs->r10;
4695 env->regs[11] = regs->r11;
4696 env->regs[12] = regs->r12;
4697 env->regs[13] = regs->r13;
4698 env->regs[14] = info->start_stack;
4699 env->regs[15] = regs->acr;
4700 env->pc = regs->erp;
4702 #elif defined(TARGET_S390X)
4705 for (i = 0; i < 16; i++) {
4706 env->regs[i] = regs->gprs[i];
4708 env->psw.mask = regs->psw.mask;
4709 env->psw.addr = regs->psw.addr;
4711 #elif defined(TARGET_TILEGX)
4714 for (i = 0; i < TILEGX_R_COUNT; i++) {
4715 env->regs[i] = regs->regs[i];
4717 for (i = 0; i < TILEGX_SPR_COUNT; i++) {
4723 #error unsupported target CPU
4726 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4727 ts->stack_base = info->start_stack;
4728 ts->heap_base = info->brk;
4729 /* This will be filled in on the first SYS_HEAPINFO call. */
4734 if (gdbserver_start(gdbstub_port) < 0) {
4735 fprintf(stderr, "qemu: could not open gdbserver on port %d\n",
4739 gdb_handlesig(cpu, 0);