--- /dev/null
+
+/* this struct defines the way the registers are stored on the
+ stack during a system call. */
+
+struct target_pt_regs {
+ target_long uregs[18];
+};
+
+#define ARM_cpsr uregs[16]
+#define ARM_pc uregs[15]
+#define ARM_lr uregs[14]
+#define ARM_sp uregs[13]
+#define ARM_ip uregs[12]
+#define ARM_fp uregs[11]
+#define ARM_r10 uregs[10]
+#define ARM_r9 uregs[9]
+#define ARM_r8 uregs[8]
+#define ARM_r7 uregs[7]
+#define ARM_r6 uregs[6]
+#define ARM_r5 uregs[5]
+#define ARM_r4 uregs[4]
+#define ARM_r3 uregs[3]
+#define ARM_r2 uregs[2]
+#define ARM_r1 uregs[1]
+#define ARM_r0 uregs[0]
+#define ARM_ORIG_r0 uregs[17]
+
+#define ARM_SYSCALL_BASE 0x900000
--- /dev/null
+/*
+ * This file contains the system call numbers.
+ */
+
+#define TARGET_NR_restart_syscall ( 0)
+#define TARGET_NR_exit ( 1)
+#define TARGET_NR_fork ( 2)
+#define TARGET_NR_read ( 3)
+#define TARGET_NR_write ( 4)
+#define TARGET_NR_open ( 5)
+#define TARGET_NR_close ( 6)
+#define TARGET_NR_waitpid ( 7) /* removed */
+#define TARGET_NR_creat ( 8)
+#define TARGET_NR_link ( 9)
+#define TARGET_NR_unlink ( 10)
+#define TARGET_NR_execve ( 11)
+#define TARGET_NR_chdir ( 12)
+#define TARGET_NR_time ( 13)
+#define TARGET_NR_mknod ( 14)
+#define TARGET_NR_chmod ( 15)
+#define TARGET_NR_lchown ( 16)
+#define TARGET_NR_break ( 17) /* removed */
+ /* 18 was sys_stat */
+#define TARGET_NR_lseek ( 19)
+#define TARGET_NR_getpid ( 20)
+#define TARGET_NR_mount ( 21)
+#define TARGET_NR_umount ( 22)
+#define TARGET_NR_setuid ( 23)
+#define TARGET_NR_getuid ( 24)
+#define TARGET_NR_stime ( 25)
+#define TARGET_NR_ptrace ( 26)
+#define TARGET_NR_alarm ( 27)
+
+#define TARGET_NR_pause ( 29)
+#define TARGET_NR_utime ( 30)
+#define TARGET_NR_stty ( 31) /* removed */
+#define TARGET_NR_gtty ( 32) /* removed */
+#define TARGET_NR_access ( 33)
+#define TARGET_NR_nice ( 34)
+#define TARGET_NR_ftime ( 35) /* removed */
+#define TARGET_NR_sync ( 36)
+#define TARGET_NR_kill ( 37)
+#define TARGET_NR_rename ( 38)
+#define TARGET_NR_mkdir ( 39)
+#define TARGET_NR_rmdir ( 40)
+#define TARGET_NR_dup ( 41)
+#define TARGET_NR_pipe ( 42)
+#define TARGET_NR_times ( 43)
+#define TARGET_NR_prof ( 44) /* removed */
+#define TARGET_NR_brk ( 45)
+#define TARGET_NR_setgid ( 46)
+#define TARGET_NR_getgid ( 47)
+#define TARGET_NR_signal ( 48) /* removed */
+#define TARGET_NR_geteuid ( 49)
+#define TARGET_NR_getegid ( 50)
+#define TARGET_NR_acct ( 51)
+#define TARGET_NR_umount2 ( 52)
+#define TARGET_NR_lock ( 53) /* removed */
+#define TARGET_NR_ioctl ( 54)
+#define TARGET_NR_fcntl ( 55)
+#define TARGET_NR_mpx ( 56) /* removed */
+#define TARGET_NR_setpgid ( 57)
+#define TARGET_NR_ulimit ( 58) /* removed */
+ /* 59 was sys_olduname */
+#define TARGET_NR_umask ( 60)
+#define TARGET_NR_chroot ( 61)
+#define TARGET_NR_ustat ( 62)
+#define TARGET_NR_dup2 ( 63)
+#define TARGET_NR_getppid ( 64)
+#define TARGET_NR_getpgrp ( 65)
+#define TARGET_NR_setsid ( 66)
+#define TARGET_NR_sigaction ( 67)
+#define TARGET_NR_sgetmask ( 68) /* removed */
+#define TARGET_NR_ssetmask ( 69) /* removed */
+#define TARGET_NR_setreuid ( 70)
+#define TARGET_NR_setregid ( 71)
+#define TARGET_NR_sigsuspend ( 72)
+#define TARGET_NR_sigpending ( 73)
+#define TARGET_NR_sethostname ( 74)
+#define TARGET_NR_setrlimit ( 75)
+#define TARGET_NR_getrlimit ( 76) /* Back compat 2GB limited rlimit */
+#define TARGET_NR_getrusage ( 77)
+#define TARGET_NR_gettimeofday ( 78)
+#define TARGET_NR_settimeofday ( 79)
+#define TARGET_NR_getgroups ( 80)
+#define TARGET_NR_setgroups ( 81)
+#define TARGET_NR_select ( 82)
+#define TARGET_NR_symlink ( 83)
+ /* 84 was sys_lstat */
+#define TARGET_NR_readlink ( 85)
+#define TARGET_NR_uselib ( 86)
+#define TARGET_NR_swapon ( 87)
+#define TARGET_NR_reboot ( 88)
+#define TARGET_NR_readdir ( 89)
+#define TARGET_NR_mmap ( 90)
+#define TARGET_NR_munmap ( 91)
+#define TARGET_NR_truncate ( 92)
+#define TARGET_NR_ftruncate ( 93)
+#define TARGET_NR_fchmod ( 94)
+#define TARGET_NR_fchown ( 95)
+#define TARGET_NR_getpriority ( 96)
+#define TARGET_NR_setpriority ( 97)
+#define TARGET_NR_profil ( 98) /* removed */
+#define TARGET_NR_statfs ( 99)
+#define TARGET_NR_fstatfs (100)
+#define TARGET_NR_ioperm (101)
+#define TARGET_NR_socketcall (102)
+#define TARGET_NR_syslog (103)
+#define TARGET_NR_setitimer (104)
+#define TARGET_NR_getitimer (105)
+#define TARGET_NR_stat (106)
+#define TARGET_NR_lstat (107)
+#define TARGET_NR_fstat (108)
+ /* 109 was sys_uname */
+ /* 110 was sys_iopl */
+#define TARGET_NR_vhangup (111)
+#define TARGET_NR_idle (112)
+#define TARGET_NR_syscall (113) /* syscall to call a syscall! */
+#define TARGET_NR_wait4 (114)
+#define TARGET_NR_swapoff (115)
+#define TARGET_NR_sysinfo (116)
+#define TARGET_NR_ipc (117)
+#define TARGET_NR_fsync (118)
+#define TARGET_NR_sigreturn (119)
+#define TARGET_NR_clone (120)
+#define TARGET_NR_setdomainname (121)
+#define TARGET_NR_uname (122)
+#define TARGET_NR_modify_ldt (123)
+#define TARGET_NR_adjtimex (124)
+#define TARGET_NR_mprotect (125)
+#define TARGET_NR_sigprocmask (126)
+#define TARGET_NR_create_module (127) /* removed */
+#define TARGET_NR_init_module (128)
+#define TARGET_NR_delete_module (129)
+#define TARGET_NR_get_kernel_syms (130) /* removed */
+#define TARGET_NR_quotactl (131)
+#define TARGET_NR_getpgid (132)
+#define TARGET_NR_fchdir (133)
+#define TARGET_NR_bdflush (134)
+#define TARGET_NR_sysfs (135)
+#define TARGET_NR_personality (136)
+#define TARGET_NR_afs_syscall (137) /* Syscall for Andrew File System */
+#define TARGET_NR_setfsuid (138)
+#define TARGET_NR_setfsgid (139)
+#define TARGET_NR__llseek (140)
+#define TARGET_NR_getdents (141)
+#define TARGET_NR__newselect (142)
+#define TARGET_NR_flock (143)
+#define TARGET_NR_msync (144)
+#define TARGET_NR_readv (145)
+#define TARGET_NR_writev (146)
+#define TARGET_NR_getsid (147)
+#define TARGET_NR_fdatasync (148)
+#define TARGET_NR__sysctl (149)
+#define TARGET_NR_mlock (150)
+#define TARGET_NR_munlock (151)
+#define TARGET_NR_mlockall (152)
+#define TARGET_NR_munlockall (153)
+#define TARGET_NR_sched_setparam (154)
+#define TARGET_NR_sched_getparam (155)
+#define TARGET_NR_sched_setscheduler (156)
+#define TARGET_NR_sched_getscheduler (157)
+#define TARGET_NR_sched_yield (158)
+#define TARGET_NR_sched_get_priority_max (159)
+#define TARGET_NR_sched_get_priority_min (160)
+#define TARGET_NR_sched_rr_get_interval (161)
+#define TARGET_NR_nanosleep (162)
+#define TARGET_NR_mremap (163)
+#define TARGET_NR_setresuid (164)
+#define TARGET_NR_getresuid (165)
+#define TARGET_NR_vm86 (166) /* removed */
+#define TARGET_NR_query_module (167) /* removed */
+#define TARGET_NR_poll (168)
+#define TARGET_NR_nfsservctl (169)
+#define TARGET_NR_setresgid (170)
+#define TARGET_NR_getresgid (171)
+#define TARGET_NR_prctl (172)
+#define TARGET_NR_rt_sigreturn (173)
+#define TARGET_NR_rt_sigaction (174)
+#define TARGET_NR_rt_sigprocmask (175)
+#define TARGET_NR_rt_sigpending (176)
+#define TARGET_NR_rt_sigtimedwait (177)
+#define TARGET_NR_rt_sigqueueinfo (178)
+#define TARGET_NR_rt_sigsuspend (179)
+#define TARGET_NR_pread (180)
+#define TARGET_NR_pwrite (181)
+#define TARGET_NR_chown (182)
+#define TARGET_NR_getcwd (183)
+#define TARGET_NR_capget (184)
+#define TARGET_NR_capset (185)
+#define TARGET_NR_sigaltstack (186)
+#define TARGET_NR_sendfile (187)
+ /* 188 reserved */
+ /* 189 reserved */
+#define TARGET_NR_vfork (190)
+#define TARGET_NR_ugetrlimit (191) /* SuS compliant getrlimit */
+#define TARGET_NR_mmap2 (192)
+#define TARGET_NR_truncate64 (193)
+#define TARGET_NR_ftruncate64 (194)
+#define TARGET_NR_stat64 (195)
+#define TARGET_NR_lstat64 (196)
+#define TARGET_NR_fstat64 (197)
+#define TARGET_NR_lchown32 (198)
+#define TARGET_NR_getuid32 (199)
+#define TARGET_NR_getgid32 (200)
+#define TARGET_NR_geteuid32 (201)
+#define TARGET_NR_getegid32 (202)
+#define TARGET_NR_setreuid32 (203)
+#define TARGET_NR_setregid32 (204)
+#define TARGET_NR_getgroups32 (205)
+#define TARGET_NR_setgroups32 (206)
+#define TARGET_NR_fchown32 (207)
+#define TARGET_NR_setresuid32 (208)
+#define TARGET_NR_getresuid32 (209)
+#define TARGET_NR_setresgid32 (210)
+#define TARGET_NR_getresgid32 (211)
+#define TARGET_NR_chown32 (212)
+#define TARGET_NR_setuid32 (213)
+#define TARGET_NR_setgid32 (214)
+#define TARGET_NR_setfsuid32 (215)
+#define TARGET_NR_setfsgid32 (216)
+#define TARGET_NR_getdents64 (217)
+#define TARGET_NR_pivot_root (218)
+#define TARGET_NR_mincore (219)
+#define TARGET_NR_madvise (220)
+#define TARGET_NR_fcntl64 (221)
+ /* 222 for tux */
+ /* 223 is unused */
+#define TARGET_NR_gettid (224)
+#define TARGET_NR_readahead (225)
+#define TARGET_NR_setxattr (226)
+#define TARGET_NR_lsetxattr (227)
+#define TARGET_NR_fsetxattr (228)
+#define TARGET_NR_getxattr (229)
+#define TARGET_NR_lgetxattr (230)
+#define TARGET_NR_fgetxattr (231)
+#define TARGET_NR_listxattr (232)
+#define TARGET_NR_llistxattr (233)
+#define TARGET_NR_flistxattr (234)
+#define TARGET_NR_removexattr (235)
+#define TARGET_NR_lremovexattr (236)
+#define TARGET_NR_fremovexattr (237)
+#define TARGET_NR_tkill (238)
+#define TARGET_NR_sendfile64 (239)
+#define TARGET_NR_futex (240)
+#define TARGET_NR_sched_setaffinity (241)
+#define TARGET_NR_sched_getaffinity (242)
+#define TARGET_NR_io_setup (243)
+#define TARGET_NR_io_destroy (244)
+#define TARGET_NR_io_getevents (245)
+#define TARGET_NR_io_submit (246)
+#define TARGET_NR_io_cancel (247)
+#define TARGET_NR_exit_group (248)
+#define TARGET_NR_lookup_dcookie (249)
+#define TARGET_NR_epoll_create (250)
+#define TARGET_NR_epoll_ctl (251)
+#define TARGET_NR_epoll_wait (252)
+#define TARGET_NR_remap_file_pages (253)
+ /* 254 for set_thread_area */
+ /* 255 for get_thread_area */
+ /* 256 for set_tid_address */
--- /dev/null
+/* default linux values for the selectors */
+#define __USER_CS (0x23)
+#define __USER_DS (0x2B)
+
+struct target_pt_regs {
+ long ebx;
+ long ecx;
+ long edx;
+ long esi;
+ long edi;
+ long ebp;
+ long eax;
+ int xds;
+ int xes;
+ long orig_eax;
+ long eip;
+ int xcs;
+ long eflags;
+ long esp;
+ int xss;
+};
+
+/* ioctls */
+
+#define TARGET_LDT_ENTRIES 8192
+#define TARGET_LDT_ENTRY_SIZE 8
+
+#define TARGET_GDT_ENTRY_TLS_ENTRIES 3
+#define TARGET_GDT_ENTRY_TLS_MIN 6
+#define TARGET_GDT_ENTRY_TLS_MAX (TARGET_GDT_ENTRY_TLS_MIN + TARGET_GDT_ENTRY_TLS_ENTRIES - 1)
+
+struct target_modify_ldt_ldt_s {
+ unsigned int entry_number;
+ target_ulong base_addr;
+ unsigned int limit;
+ unsigned int flags;
+};
+
+/* vm86 defines */
+
+#define TARGET_BIOSSEG 0x0f000
+
+#define TARGET_CPU_086 0
+#define TARGET_CPU_186 1
+#define TARGET_CPU_286 2
+#define TARGET_CPU_386 3
+#define TARGET_CPU_486 4
+#define TARGET_CPU_586 5
+
+#define TARGET_VM86_SIGNAL 0 /* return due to signal */
+#define TARGET_VM86_UNKNOWN 1 /* unhandled GP fault - IO-instruction or similar */
+#define TARGET_VM86_INTx 2 /* int3/int x instruction (ARG = x) */
+#define TARGET_VM86_STI 3 /* sti/popf/iret instruction enabled virtual interrupts */
+
+/*
+ * Additional return values when invoking new vm86()
+ */
+#define TARGET_VM86_PICRETURN 4 /* return due to pending PIC request */
+#define TARGET_VM86_TRAP 6 /* return due to DOS-debugger request */
+
+/*
+ * function codes when invoking new vm86()
+ */
+#define TARGET_VM86_PLUS_INSTALL_CHECK 0
+#define TARGET_VM86_ENTER 1
+#define TARGET_VM86_ENTER_NO_BYPASS 2
+#define TARGET_VM86_REQUEST_IRQ 3
+#define TARGET_VM86_FREE_IRQ 4
+#define TARGET_VM86_GET_IRQ_BITS 5
+#define TARGET_VM86_GET_AND_RESET_IRQ 6
+
+/*
+ * This is the stack-layout seen by the user space program when we have
+ * done a translation of "SAVE_ALL" from vm86 mode. The real kernel layout
+ * is 'kernel_vm86_regs' (see below).
+ */
+
+struct target_vm86_regs {
+/*
+ * normal regs, with special meaning for the segment descriptors..
+ */
+ target_long ebx;
+ target_long ecx;
+ target_long edx;
+ target_long esi;
+ target_long edi;
+ target_long ebp;
+ target_long eax;
+ target_long __null_ds;
+ target_long __null_es;
+ target_long __null_fs;
+ target_long __null_gs;
+ target_long orig_eax;
+ target_long eip;
+ unsigned short cs, __csh;
+ target_long eflags;
+ target_long esp;
+ unsigned short ss, __ssh;
+/*
+ * these are specific to v86 mode:
+ */
+ unsigned short es, __esh;
+ unsigned short ds, __dsh;
+ unsigned short fs, __fsh;
+ unsigned short gs, __gsh;
+};
+
+struct target_revectored_struct {
+ target_ulong __map[8]; /* 256 bits */
+};
+
+struct target_vm86_struct {
+ struct target_vm86_regs regs;
+ target_ulong flags;
+ target_ulong screen_bitmap;
+ target_ulong cpu_type;
+ struct target_revectored_struct int_revectored;
+ struct target_revectored_struct int21_revectored;
+};
+
+/*
+ * flags masks
+ */
+#define TARGET_VM86_SCREEN_BITMAP 0x0001
+
+struct target_vm86plus_info_struct {
+ target_ulong flags;
+#define TARGET_force_return_for_pic (1 << 0)
+#define TARGET_vm86dbg_active (1 << 1) /* for debugger */
+#define TARGET_vm86dbg_TFpendig (1 << 2) /* for debugger */
+#define TARGET_is_vm86pus (1 << 31) /* for vm86 internal use */
+ unsigned char vm86dbg_intxxtab[32]; /* for debugger */
+};
+
+struct target_vm86plus_struct {
+ struct target_vm86_regs regs;
+ target_ulong flags;
+ target_ulong screen_bitmap;
+ target_ulong cpu_type;
+ struct target_revectored_struct int_revectored;
+ struct target_revectored_struct int21_revectored;
+ struct target_vm86plus_info_struct vm86plus;
+};
+
+/* ipcs */
+
+#define TARGET_SEMOP 1
+#define TARGET_SEMGET 2
+#define TARGET_SEMCTL 3
+#define TARGET_MSGSND 11
+#define TARGET_MSGRCV 12
+#define TARGET_MSGGET 13
+#define TARGET_MSGCTL 14
+#define TARGET_SHMAT 21
+#define TARGET_SHMDT 22
+#define TARGET_SHMGET 23
+#define TARGET_SHMCTL 24
+
+struct target_msgbuf {
+ int mtype;
+ char mtext[1];
+};
+
+struct target_ipc_kludge {
+ unsigned int msgp; /* Really (struct msgbuf *) */
+ int msgtyp;
+};
+
+struct target_ipc_perm {
+ int key;
+ unsigned short uid;
+ unsigned short gid;
+ unsigned short cuid;
+ unsigned short cgid;
+ unsigned short mode;
+ unsigned short seq;
+};
+
+struct target_msqid_ds {
+ struct target_ipc_perm msg_perm;
+ unsigned int msg_first; /* really struct target_msg* */
+ unsigned int msg_last; /* really struct target_msg* */
+ unsigned int msg_stime; /* really target_time_t */
+ unsigned int msg_rtime; /* really target_time_t */
+ unsigned int msg_ctime; /* really target_time_t */
+ unsigned int wwait; /* really struct wait_queue* */
+ unsigned int rwait; /* really struct wait_queue* */
+ unsigned short msg_cbytes;
+ unsigned short msg_qnum;
+ unsigned short msg_qbytes;
+ unsigned short msg_lspid;
+ unsigned short msg_lrpid;
+};
+
+struct target_shmid_ds {
+ struct target_ipc_perm shm_perm;
+ int shm_segsz;
+ unsigned int shm_atime; /* really target_time_t */
+ unsigned int shm_dtime; /* really target_time_t */
+ unsigned int shm_ctime; /* really target_time_t */
+ unsigned short shm_cpid;
+ unsigned short shm_lpid;
+ short shm_nattch;
+ unsigned short shm_npages;
+ unsigned long *shm_pages;
+ void *attaches; /* really struct shm_desc * */
+};
+
+#define TARGET_IPC_RMID 0
+#define TARGET_IPC_SET 1
+#define TARGET_IPC_STAT 2
+
+union target_semun {
+ int val;
+ unsigned int buf; /* really struct semid_ds * */
+ unsigned int array; /* really unsigned short * */
+ unsigned int __buf; /* really struct seminfo * */
+ unsigned int __pad; /* really void* */
+};
+
--- /dev/null
+/*
+ * This file contains the system call numbers.
+ */
+
+#define TARGET_NR_restart_syscall 0
+#define TARGET_NR_exit 1
+#define TARGET_NR_fork 2
+#define TARGET_NR_read 3
+#define TARGET_NR_write 4
+#define TARGET_NR_open 5
+#define TARGET_NR_close 6
+#define TARGET_NR_waitpid 7
+#define TARGET_NR_creat 8
+#define TARGET_NR_link 9
+#define TARGET_NR_unlink 10
+#define TARGET_NR_execve 11
+#define TARGET_NR_chdir 12
+#define TARGET_NR_time 13
+#define TARGET_NR_mknod 14
+#define TARGET_NR_chmod 15
+#define TARGET_NR_lchown 16
+#define TARGET_NR_break 17
+#define TARGET_NR_oldstat 18
+#define TARGET_NR_lseek 19
+#define TARGET_NR_getpid 20
+#define TARGET_NR_mount 21
+#define TARGET_NR_umount 22
+#define TARGET_NR_setuid 23
+#define TARGET_NR_getuid 24
+#define TARGET_NR_stime 25
+#define TARGET_NR_ptrace 26
+#define TARGET_NR_alarm 27
+#define TARGET_NR_oldfstat 28
+#define TARGET_NR_pause 29
+#define TARGET_NR_utime 30
+#define TARGET_NR_stty 31
+#define TARGET_NR_gtty 32
+#define TARGET_NR_access 33
+#define TARGET_NR_nice 34
+#define TARGET_NR_ftime 35
+#define TARGET_NR_sync 36
+#define TARGET_NR_kill 37
+#define TARGET_NR_rename 38
+#define TARGET_NR_mkdir 39
+#define TARGET_NR_rmdir 40
+#define TARGET_NR_dup 41
+#define TARGET_NR_pipe 42
+#define TARGET_NR_times 43
+#define TARGET_NR_prof 44
+#define TARGET_NR_brk 45
+#define TARGET_NR_setgid 46
+#define TARGET_NR_getgid 47
+#define TARGET_NR_signal 48
+#define TARGET_NR_geteuid 49
+#define TARGET_NR_getegid 50
+#define TARGET_NR_acct 51
+#define TARGET_NR_umount2 52
+#define TARGET_NR_lock 53
+#define TARGET_NR_ioctl 54
+#define TARGET_NR_fcntl 55
+#define TARGET_NR_mpx 56
+#define TARGET_NR_setpgid 57
+#define TARGET_NR_ulimit 58
+#define TARGET_NR_oldolduname 59
+#define TARGET_NR_umask 60
+#define TARGET_NR_chroot 61
+#define TARGET_NR_ustat 62
+#define TARGET_NR_dup2 63
+#define TARGET_NR_getppid 64
+#define TARGET_NR_getpgrp 65
+#define TARGET_NR_setsid 66
+#define TARGET_NR_sigaction 67
+#define TARGET_NR_sgetmask 68
+#define TARGET_NR_ssetmask 69
+#define TARGET_NR_setreuid 70
+#define TARGET_NR_setregid 71
+#define TARGET_NR_sigsuspend 72
+#define TARGET_NR_sigpending 73
+#define TARGET_NR_sethostname 74
+#define TARGET_NR_setrlimit 75
+#define TARGET_NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */
+#define TARGET_NR_getrusage 77
+#define TARGET_NR_gettimeofday 78
+#define TARGET_NR_settimeofday 79
+#define TARGET_NR_getgroups 80
+#define TARGET_NR_setgroups 81
+#define TARGET_NR_select 82
+#define TARGET_NR_symlink 83
+#define TARGET_NR_oldlstat 84
+#define TARGET_NR_readlink 85
+#define TARGET_NR_uselib 86
+#define TARGET_NR_swapon 87
+#define TARGET_NR_reboot 88
+#define TARGET_NR_readdir 89
+#define TARGET_NR_mmap 90
+#define TARGET_NR_munmap 91
+#define TARGET_NR_truncate 92
+#define TARGET_NR_ftruncate 93
+#define TARGET_NR_fchmod 94
+#define TARGET_NR_fchown 95
+#define TARGET_NR_getpriority 96
+#define TARGET_NR_setpriority 97
+#define TARGET_NR_profil 98
+#define TARGET_NR_statfs 99
+#define TARGET_NR_fstatfs 100
+#define TARGET_NR_ioperm 101
+#define TARGET_NR_socketcall 102
+#define TARGET_NR_syslog 103
+#define TARGET_NR_setitimer 104
+#define TARGET_NR_getitimer 105
+#define TARGET_NR_stat 106
+#define TARGET_NR_lstat 107
+#define TARGET_NR_fstat 108
+#define TARGET_NR_olduname 109
+#define TARGET_NR_iopl 110
+#define TARGET_NR_vhangup 111
+#define TARGET_NR_idle 112
+#define TARGET_NR_vm86old 113
+#define TARGET_NR_wait4 114
+#define TARGET_NR_swapoff 115
+#define TARGET_NR_sysinfo 116
+#define TARGET_NR_ipc 117
+#define TARGET_NR_fsync 118
+#define TARGET_NR_sigreturn 119
+#define TARGET_NR_clone 120
+#define TARGET_NR_setdomainname 121
+#define TARGET_NR_uname 122
+#define TARGET_NR_modify_ldt 123
+#define TARGET_NR_adjtimex 124
+#define TARGET_NR_mprotect 125
+#define TARGET_NR_sigprocmask 126
+#define TARGET_NR_create_module 127
+#define TARGET_NR_init_module 128
+#define TARGET_NR_delete_module 129
+#define TARGET_NR_get_kernel_syms 130
+#define TARGET_NR_quotactl 131
+#define TARGET_NR_getpgid 132
+#define TARGET_NR_fchdir 133
+#define TARGET_NR_bdflush 134
+#define TARGET_NR_sysfs 135
+#define TARGET_NR_personality 136
+#define TARGET_NR_afs_syscall 137 /* Syscall for Andrew File System */
+#define TARGET_NR_setfsuid 138
+#define TARGET_NR_setfsgid 139
+#define TARGET_NR__llseek 140
+#define TARGET_NR_getdents 141
+#define TARGET_NR__newselect 142
+#define TARGET_NR_flock 143
+#define TARGET_NR_msync 144
+#define TARGET_NR_readv 145
+#define TARGET_NR_writev 146
+#define TARGET_NR_getsid 147
+#define TARGET_NR_fdatasync 148
+#define TARGET_NR__sysctl 149
+#define TARGET_NR_mlock 150
+#define TARGET_NR_munlock 151
+#define TARGET_NR_mlockall 152
+#define TARGET_NR_munlockall 153
+#define TARGET_NR_sched_setparam 154
+#define TARGET_NR_sched_getparam 155
+#define TARGET_NR_sched_setscheduler 156
+#define TARGET_NR_sched_getscheduler 157
+#define TARGET_NR_sched_yield 158
+#define TARGET_NR_sched_get_priority_max 159
+#define TARGET_NR_sched_get_priority_min 160
+#define TARGET_NR_sched_rr_get_interval 161
+#define TARGET_NR_nanosleep 162
+#define TARGET_NR_mremap 163
+#define TARGET_NR_setresuid 164
+#define TARGET_NR_getresuid 165
+#define TARGET_NR_vm86 166
+#define TARGET_NR_query_module 167
+#define TARGET_NR_poll 168
+#define TARGET_NR_nfsservctl 169
+#define TARGET_NR_setresgid 170
+#define TARGET_NR_getresgid 171
+#define TARGET_NR_prctl 172
+#define TARGET_NR_rt_sigreturn 173
+#define TARGET_NR_rt_sigaction 174
+#define TARGET_NR_rt_sigprocmask 175
+#define TARGET_NR_rt_sigpending 176
+#define TARGET_NR_rt_sigtimedwait 177
+#define TARGET_NR_rt_sigqueueinfo 178
+#define TARGET_NR_rt_sigsuspend 179
+#define TARGET_NR_pread 180
+#define TARGET_NR_pwrite 181
+#define TARGET_NR_chown 182
+#define TARGET_NR_getcwd 183
+#define TARGET_NR_capget 184
+#define TARGET_NR_capset 185
+#define TARGET_NR_sigaltstack 186
+#define TARGET_NR_sendfile 187
+#define TARGET_NR_getpmsg 188 /* some people actually want streams */
+#define TARGET_NR_putpmsg 189 /* some people actually want streams */
+#define TARGET_NR_vfork 190
+#define TARGET_NR_ugetrlimit 191 /* SuS compliant getrlimit */
+#define TARGET_NR_mmap2 192
+#define TARGET_NR_truncate64 193
+#define TARGET_NR_ftruncate64 194
+#define TARGET_NR_stat64 195
+#define TARGET_NR_lstat64 196
+#define TARGET_NR_fstat64 197
+#define TARGET_NR_lchown32 198
+#define TARGET_NR_getuid32 199
+#define TARGET_NR_getgid32 200
+#define TARGET_NR_geteuid32 201
+#define TARGET_NR_getegid32 202
+#define TARGET_NR_setreuid32 203
+#define TARGET_NR_setregid32 204
+#define TARGET_NR_getgroups32 205
+#define TARGET_NR_setgroups32 206
+#define TARGET_NR_fchown32 207
+#define TARGET_NR_setresuid32 208
+#define TARGET_NR_getresuid32 209
+#define TARGET_NR_setresgid32 210
+#define TARGET_NR_getresgid32 211
+#define TARGET_NR_chown32 212
+#define TARGET_NR_setuid32 213
+#define TARGET_NR_setgid32 214
+#define TARGET_NR_setfsuid32 215
+#define TARGET_NR_setfsgid32 216
+#define TARGET_NR_pivot_root 217
+#define TARGET_NR_mincore 218
+#define TARGET_NR_madvise 219
+#define TARGET_NR_madvise1 219 /* delete when C lib stub is removed */
+#define TARGET_NR_getdents64 220
+#define TARGET_NR_fcntl64 221
+/* 223 is unused */
+#define TARGET_NR_gettid 224
+#define TARGET_NR_readahead 225
+#define TARGET_NR_setxattr 226
+#define TARGET_NR_lsetxattr 227
+#define TARGET_NR_fsetxattr 228
+#define TARGET_NR_getxattr 229
+#define TARGET_NR_lgetxattr 230
+#define TARGET_NR_fgetxattr 231
+#define TARGET_NR_listxattr 232
+#define TARGET_NR_llistxattr 233
+#define TARGET_NR_flistxattr 234
+#define TARGET_NR_removexattr 235
+#define TARGET_NR_lremovexattr 236
+#define TARGET_NR_fremovexattr 237
+#define TARGET_NR_tkill 238
+#define TARGET_NR_sendfile64 239
+#define TARGET_NR_futex 240
+#define TARGET_NR_sched_setaffinity 241
+#define TARGET_NR_sched_getaffinity 242
+#define TARGET_NR_set_thread_area 243
+#define TARGET_NR_get_thread_area 244
+#define TARGET_NR_io_setup 245
+#define TARGET_NR_io_destroy 246
+#define TARGET_NR_io_getevents 247
+#define TARGET_NR_io_submit 248
+#define TARGET_NR_io_cancel 249
+#define TARGET_NR_fadvise64 250
+
+#define TARGET_NR_exit_group 252
+#define TARGET_NR_lookup_dcookie 253
+#define TARGET_NR_epoll_create 254
+#define TARGET_NR_epoll_ctl 255
+#define TARGET_NR_epoll_wait 256
+#define TARGET_NR_remap_file_pages 257
+#define TARGET_NR_set_tid_address 258
+#define TARGET_NR_timer_create 259
+#define TARGET_NR_timer_settime (TARGET_NR_timer_create+1)
+#define TARGET_NR_timer_gettime (TARGET_NR_timer_create+2)
+#define TARGET_NR_timer_getoverrun (TARGET_NR_timer_create+3)
+#define TARGET_NR_timer_delete (TARGET_NR_timer_create+4)
+#define TARGET_NR_clock_settime (TARGET_NR_timer_create+5)
+#define TARGET_NR_clock_gettime (TARGET_NR_timer_create+6)
+#define TARGET_NR_clock_getres (TARGET_NR_timer_create+7)
+#define TARGET_NR_clock_nanosleep (TARGET_NR_timer_create+8)
+
--- /dev/null
+/*
+ * ARM virtual CPU header
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef CPU_ARM_H
+#define CPU_ARM_H
+
+#include "cpu-defs.h"
+
+#define EXCP_UDEF 1 /* undefined instruction */
+#define EXCP_SWI 2 /* software interrupt */
+
+typedef struct CPUARMState {
+ uint32_t regs[16];
+ uint32_t cpsr;
+
+ /* cpsr flag cache for faster execution */
+ uint32_t CF; /* 0 or 1 */
+ uint32_t VF; /* V is the bit 31. All other bits are undefined */
+ uint32_t NZF; /* N is bit 31. Z is computed from NZF */
+
+ /* exception/interrupt handling */
+ jmp_buf jmp_env;
+ int exception_index;
+ int interrupt_request;
+ struct TranslationBlock *current_tb;
+ int user_mode_only;
+
+ /* user data */
+ void *opaque;
+} CPUARMState;
+
+CPUARMState *cpu_arm_init(void);
+int cpu_arm_exec(CPUARMState *s);
+void cpu_arm_close(CPUARMState *s);
+/* you can call this signal handler from your SIGBUS and SIGSEGV
+ signal handlers to inform the virtual CPU of exceptions. non zero
+ is returned if the signal was handled by the virtual CPU. */
+struct siginfo;
+int cpu_arm_signal_handler(int host_signum, struct siginfo *info,
+ void *puc);
+
+void cpu_arm_dump_state(CPUARMState *env, FILE *f, int flags);
+
+#define TARGET_PAGE_BITS 12
+#include "cpu-all.h"
+
+#endif
--- /dev/null
+/*
+ * ARM execution defines
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include "dyngen-exec.h"
+
+register struct CPUARMState *env asm(AREG0);
+register uint32_t T0 asm(AREG1);
+register uint32_t T1 asm(AREG2);
+register uint32_t T2 asm(AREG3);
+
+#include "cpu.h"
+#include "exec-all.h"
+
+void cpu_lock(void);
+void cpu_unlock(void);
+void cpu_loop_exit(void);
+
+static inline int compute_cpsr(void)
+{
+ int ZF;
+ ZF = (env->NZF == 0);
+ return env->cpsr | (env->NZF & 0x80000000) | (ZF << 30) |
+ (env->CF << 29) | ((env->VF & 0x80000000) >> 3);
+}
--- /dev/null
+/*
+ * ARM micro operations
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include "exec.h"
+
+#define REGNAME r0
+#define REG (env->regs[0])
+#include "op_template.h"
+
+#define REGNAME r1
+#define REG (env->regs[1])
+#include "op_template.h"
+
+#define REGNAME r2
+#define REG (env->regs[2])
+#include "op_template.h"
+
+#define REGNAME r3
+#define REG (env->regs[3])
+#include "op_template.h"
+
+#define REGNAME r4
+#define REG (env->regs[4])
+#include "op_template.h"
+
+#define REGNAME r5
+#define REG (env->regs[5])
+#include "op_template.h"
+
+#define REGNAME r6
+#define REG (env->regs[6])
+#include "op_template.h"
+
+#define REGNAME r7
+#define REG (env->regs[7])
+#include "op_template.h"
+
+#define REGNAME r8
+#define REG (env->regs[8])
+#include "op_template.h"
+
+#define REGNAME r9
+#define REG (env->regs[9])
+#include "op_template.h"
+
+#define REGNAME r10
+#define REG (env->regs[10])
+#include "op_template.h"
+
+#define REGNAME r11
+#define REG (env->regs[11])
+#include "op_template.h"
+
+#define REGNAME r12
+#define REG (env->regs[12])
+#include "op_template.h"
+
+#define REGNAME r13
+#define REG (env->regs[13])
+#include "op_template.h"
+
+#define REGNAME r14
+#define REG (env->regs[14])
+#include "op_template.h"
+
+#define REGNAME r15
+#define REG (env->regs[15])
+#include "op_template.h"
+
+void OPPROTO op_movl_T0_0(void)
+{
+ T0 = 0;
+}
+
+void OPPROTO op_movl_T0_im(void)
+{
+ T0 = PARAM1;
+}
+
+void OPPROTO op_movl_T1_im(void)
+{
+ T1 = PARAM1;
+}
+
+void OPPROTO op_movl_T2_im(void)
+{
+ T2 = PARAM1;
+}
+
+void OPPROTO op_addl_T1_im(void)
+{
+ T1 += PARAM1;
+}
+
+void OPPROTO op_addl_T1_T2(void)
+{
+ T1 += T2;
+}
+
+void OPPROTO op_subl_T1_T2(void)
+{
+ T1 -= T2;
+}
+
+void OPPROTO op_addl_T0_T1(void)
+{
+ T0 += T1;
+}
+
+void OPPROTO op_addl_T0_T1_cc(void)
+{
+ unsigned int src1;
+ src1 = T0;
+ T0 += T1;
+ env->NZF = T0;
+ env->CF = T0 < src1;
+ env->VF = (src1 ^ T1 ^ -1) & (src1 ^ T0);
+}
+
+void OPPROTO op_adcl_T0_T1(void)
+{
+ T0 += T1 + env->CF;
+}
+
+void OPPROTO op_adcl_T0_T1_cc(void)
+{
+ unsigned int src1;
+ src1 = T0;
+ if (!env->CF) {
+ T0 += T1;
+ env->CF = T0 < src1;
+ } else {
+ T0 += T1 + 1;
+ env->CF = T0 <= src1;
+ }
+ env->VF = (src1 ^ T1 ^ -1) & (src1 ^ T0);
+ env->NZF = T0;
+ FORCE_RET();
+}
+
+#define OPSUB(sub, sbc, res, T0, T1) \
+ \
+void OPPROTO op_ ## sub ## l_T0_T1(void) \
+{ \
+ res = T0 - T1; \
+} \
+ \
+void OPPROTO op_ ## sub ## l_T0_T1_cc(void) \
+{ \
+ unsigned int src1; \
+ src1 = T0; \
+ T0 -= T1; \
+ env->NZF = T0; \
+ env->CF = src1 >= T1; \
+ env->VF = (src1 ^ T1) & (src1 ^ T0); \
+ res = T0; \
+} \
+ \
+void OPPROTO op_ ## sbc ## l_T0_T1(void) \
+{ \
+ res = T0 - T1 + env->CF - 1; \
+} \
+ \
+void OPPROTO op_ ## sbc ## l_T0_T1_cc(void) \
+{ \
+ unsigned int src1; \
+ src1 = T0; \
+ if (!env->CF) { \
+ T0 = T0 - T1 - 1; \
+ env->CF = src1 >= T1; \
+ } else { \
+ T0 = T0 - T1; \
+ env->CF = src1 > T1; \
+ } \
+ env->VF = (src1 ^ T1) & (src1 ^ T0); \
+ env->NZF = T0; \
+ res = T0; \
+ FORCE_RET(); \
+}
+
+OPSUB(sub, sbc, T0, T0, T1)
+
+OPSUB(rsb, rsc, T0, T1, T0)
+
+void OPPROTO op_andl_T0_T1(void)
+{
+ T0 &= T1;
+}
+
+void OPPROTO op_xorl_T0_T1(void)
+{
+ T0 ^= T1;
+}
+
+void OPPROTO op_orl_T0_T1(void)
+{
+ T0 |= T1;
+}
+
+void OPPROTO op_bicl_T0_T1(void)
+{
+ T0 &= ~T1;
+}
+
+void OPPROTO op_notl_T1(void)
+{
+ T1 = ~T1;
+}
+
+void OPPROTO op_logic_T0_cc(void)
+{
+ env->NZF = T0;
+}
+
+void OPPROTO op_logic_T1_cc(void)
+{
+ env->NZF = T1;
+}
+
+#define EIP (env->regs[15])
+
+void OPPROTO op_test_eq(void)
+{
+ if (env->NZF == 0)
+ JUMP_TB(op_test_eq, PARAM1, 0, PARAM2);
+ FORCE_RET();
+}
+
+void OPPROTO op_test_ne(void)
+{
+ if (env->NZF != 0)
+ JUMP_TB(op_test_ne, PARAM1, 0, PARAM2);
+ FORCE_RET();
+}
+
+void OPPROTO op_test_cs(void)
+{
+ if (env->CF != 0)
+ JUMP_TB(op_test_cs, PARAM1, 0, PARAM2);
+ FORCE_RET();
+}
+
+void OPPROTO op_test_cc(void)
+{
+ if (env->CF == 0)
+ JUMP_TB(op_test_cc, PARAM1, 0, PARAM2);
+ FORCE_RET();
+}
+
+void OPPROTO op_test_mi(void)
+{
+ if ((env->NZF & 0x80000000) != 0)
+ JUMP_TB(op_test_mi, PARAM1, 0, PARAM2);
+ FORCE_RET();
+}
+
+void OPPROTO op_test_pl(void)
+{
+ if ((env->NZF & 0x80000000) == 0)
+ JUMP_TB(op_test_pl, PARAM1, 0, PARAM2);
+ FORCE_RET();
+}
+
+void OPPROTO op_test_vs(void)
+{
+ if ((env->VF & 0x80000000) != 0)
+ JUMP_TB(op_test_vs, PARAM1, 0, PARAM2);
+ FORCE_RET();
+}
+
+void OPPROTO op_test_vc(void)
+{
+ if ((env->VF & 0x80000000) == 0)
+ JUMP_TB(op_test_vc, PARAM1, 0, PARAM2);
+ FORCE_RET();
+}
+
+void OPPROTO op_test_hi(void)
+{
+ if (env->CF != 0 && env->NZF != 0)
+ JUMP_TB(op_test_hi, PARAM1, 0, PARAM2);
+ FORCE_RET();
+}
+
+void OPPROTO op_test_ls(void)
+{
+ if (env->CF == 0 || env->NZF == 0)
+ JUMP_TB(op_test_ls, PARAM1, 0, PARAM2);
+ FORCE_RET();
+}
+
+void OPPROTO op_test_ge(void)
+{
+ if (((env->VF ^ env->NZF) & 0x80000000) == 0)
+ JUMP_TB(op_test_ge, PARAM1, 0, PARAM2);
+ FORCE_RET();
+}
+
+void OPPROTO op_test_lt(void)
+{
+ if (((env->VF ^ env->NZF) & 0x80000000) != 0)
+ JUMP_TB(op_test_lt, PARAM1, 0, PARAM2);
+ FORCE_RET();
+}
+
+void OPPROTO op_test_gt(void)
+{
+ if (env->NZF != 0 && ((env->VF ^ env->NZF) & 0x80000000) == 0)
+ JUMP_TB(op_test_gt, PARAM1, 0, PARAM2);
+ FORCE_RET();
+}
+
+void OPPROTO op_test_le(void)
+{
+ if (env->NZF == 0 || ((env->VF ^ env->NZF) & 0x80000000) != 0)
+ JUMP_TB(op_test_le, PARAM1, 0, PARAM2);
+ FORCE_RET();
+}
+
+void OPPROTO op_jmp(void)
+{
+ JUMP_TB(op_jmp, PARAM1, 1, PARAM2);
+}
+
+void OPPROTO op_exit_tb(void)
+{
+ EXIT_TB();
+}
+
+void OPPROTO op_movl_T0_psr(void)
+{
+ T0 = compute_cpsr();
+}
+
+/* NOTE: N = 1 and Z = 1 cannot be stored currently */
+void OPPROTO op_movl_psr_T0(void)
+{
+ unsigned int psr;
+ psr = T0;
+ env->CF = (psr >> 29) & 1;
+ env->NZF = (psr & 0xc0000000) ^ 0x40000000;
+ env->VF = (psr << 3) & 0x80000000;
+ /* for user mode we do not update other state info */
+}
+
+void OPPROTO op_mul_T0_T1(void)
+{
+ T0 = T0 * T1;
+}
+
+/* 64 bit unsigned mul */
+void OPPROTO op_mull_T0_T1(void)
+{
+ uint64_t res;
+ res = T0 * T1;
+ T1 = res >> 32;
+ T0 = res;
+}
+
+/* 64 bit signed mul */
+void OPPROTO op_imull_T0_T1(void)
+{
+ uint64_t res;
+ res = (int32_t)T0 * (int32_t)T1;
+ T1 = res >> 32;
+ T0 = res;
+}
+
+void OPPROTO op_addq_T0_T1(void)
+{
+ uint64_t res;
+ res = ((uint64_t)T1 << 32) | T0;
+ res += ((uint64_t)(env->regs[PARAM2]) << 32) | (env->regs[PARAM1]);
+ T1 = res >> 32;
+ T0 = res;
+}
+
+void OPPROTO op_logicq_cc(void)
+{
+ env->NZF = (T1 & 0x80000000) | ((T0 | T1) != 0);
+}
+
+/* memory access */
+
+void OPPROTO op_ldub_T0_T1(void)
+{
+ T0 = ldub((void *)T1);
+}
+
+void OPPROTO op_ldsb_T0_T1(void)
+{
+ T0 = ldsb((void *)T1);
+}
+
+void OPPROTO op_lduw_T0_T1(void)
+{
+ T0 = lduw((void *)T1);
+}
+
+void OPPROTO op_ldsw_T0_T1(void)
+{
+ T0 = ldsw((void *)T1);
+}
+
+void OPPROTO op_ldl_T0_T1(void)
+{
+ T0 = ldl((void *)T1);
+}
+
+void OPPROTO op_stb_T0_T1(void)
+{
+ stb((void *)T1, T0);
+}
+
+void OPPROTO op_stw_T0_T1(void)
+{
+ stw((void *)T1, T0);
+}
+
+void OPPROTO op_stl_T0_T1(void)
+{
+ stl((void *)T1, T0);
+}
+
+void OPPROTO op_swpb_T0_T1(void)
+{
+ int tmp;
+
+ cpu_lock();
+ tmp = ldub((void *)T1);
+ stb((void *)T1, T0);
+ T0 = tmp;
+ cpu_unlock();
+}
+
+void OPPROTO op_swpl_T0_T1(void)
+{
+ int tmp;
+
+ cpu_lock();
+ tmp = ldl((void *)T1);
+ stl((void *)T1, T0);
+ T0 = tmp;
+ cpu_unlock();
+}
+
+/* shifts */
+
+/* T1 based */
+void OPPROTO op_shll_T1_im(void)
+{
+ T1 = T1 << PARAM1;
+}
+
+void OPPROTO op_shrl_T1_im(void)
+{
+ T1 = (uint32_t)T1 >> PARAM1;
+}
+
+void OPPROTO op_sarl_T1_im(void)
+{
+ T1 = (int32_t)T1 >> PARAM1;
+}
+
+void OPPROTO op_rorl_T1_im(void)
+{
+ int shift;
+ shift = PARAM1;
+ T1 = ((uint32_t)T1 >> shift) | (T1 << (32 - shift));
+}
+
+/* T1 based, set C flag */
+void OPPROTO op_shll_T1_im_cc(void)
+{
+ env->CF = (T1 >> (32 - PARAM1)) & 1;
+ T1 = T1 << PARAM1;
+}
+
+void OPPROTO op_shrl_T1_im_cc(void)
+{
+ env->CF = (T1 >> (PARAM1 - 1)) & 1;
+ T1 = (uint32_t)T1 >> PARAM1;
+}
+
+void OPPROTO op_sarl_T1_im_cc(void)
+{
+ env->CF = (T1 >> (PARAM1 - 1)) & 1;
+ T1 = (int32_t)T1 >> PARAM1;
+}
+
+void OPPROTO op_rorl_T1_im_cc(void)
+{
+ int shift;
+ shift = PARAM1;
+ env->CF = (T1 >> (shift - 1)) & 1;
+ T1 = ((uint32_t)T1 >> shift) | (T1 << (32 - shift));
+}
+
+/* T2 based */
+void OPPROTO op_shll_T2_im(void)
+{
+ T2 = T2 << PARAM1;
+}
+
+void OPPROTO op_shrl_T2_im(void)
+{
+ T2 = (uint32_t)T2 >> PARAM1;
+}
+
+void OPPROTO op_sarl_T2_im(void)
+{
+ T2 = (int32_t)T2 >> PARAM1;
+}
+
+void OPPROTO op_rorl_T2_im(void)
+{
+ int shift;
+ shift = PARAM1;
+ T2 = ((uint32_t)T2 >> shift) | (T2 << (32 - shift));
+}
+
+/* T1 based, use T0 as shift count */
+
+void OPPROTO op_shll_T1_T0(void)
+{
+ int shift;
+ shift = T0 & 0xff;
+ if (shift >= 32)
+ T1 = 0;
+ else
+ T1 = T1 << shift;
+ FORCE_RET();
+}
+
+void OPPROTO op_shrl_T1_T0(void)
+{
+ int shift;
+ shift = T0 & 0xff;
+ if (shift >= 32)
+ T1 = 0;
+ else
+ T1 = (uint32_t)T1 >> shift;
+ FORCE_RET();
+}
+
+void OPPROTO op_sarl_T1_T0(void)
+{
+ int shift;
+ shift = T0 & 0xff;
+ if (shift >= 32)
+ shift = 31;
+ T1 = (int32_t)T1 >> shift;
+}
+
+void OPPROTO op_rorl_T1_T0(void)
+{
+ int shift;
+ shift = T0 & 0x1f;
+ if (shift) {
+ T1 = ((uint32_t)T1 >> shift) | (T1 << (32 - shift));
+ }
+ FORCE_RET();
+}
+
+/* T1 based, use T0 as shift count and compute CF */
+
+void OPPROTO op_shll_T1_T0_cc(void)
+{
+ int shift;
+ shift = T0 & 0xff;
+ if (shift >= 32) {
+ if (shift == 32)
+ env->CF = T1 & 1;
+ else
+ env->CF = 0;
+ T1 = 0;
+ } else if (shift != 0) {
+ env->CF = (T1 >> (32 - shift)) & 1;
+ T1 = T1 << shift;
+ }
+ FORCE_RET();
+}
+
+void OPPROTO op_shrl_T1_T0_cc(void)
+{
+ int shift;
+ shift = T0 & 0xff;
+ if (shift >= 32) {
+ if (shift == 32)
+ env->CF = (T1 >> 31) & 1;
+ else
+ env->CF = 0;
+ T1 = 0;
+ } else if (shift != 0) {
+ env->CF = (T1 >> (shift - 1)) & 1;
+ T1 = (uint32_t)T1 >> shift;
+ }
+ FORCE_RET();
+}
+
+void OPPROTO op_sarl_T1_T0_cc(void)
+{
+ int shift;
+ shift = T0 & 0xff;
+ if (shift >= 32) {
+ env->CF = (T1 >> 31) & 1;
+ T1 = (int32_t)T1 >> 31;
+ } else {
+ env->CF = (T1 >> (shift - 1)) & 1;
+ T1 = (int32_t)T1 >> shift;
+ }
+ FORCE_RET();
+}
+
+void OPPROTO op_rorl_T1_T0_cc(void)
+{
+ int shift1, shift;
+ shift1 = T0 & 0xff;
+ shift = shift1 & 0x1f;
+ if (shift == 0) {
+ if (shift1 != 0)
+ env->CF = (T1 >> 31) & 1;
+ } else {
+ env->CF = (T1 >> (shift - 1)) & 1;
+ T1 = ((uint32_t)T1 >> shift) | (T1 << (32 - shift));
+ }
+ FORCE_RET();
+}
+
+/* exceptions */
+
+void OPPROTO op_swi(void)
+{
+ env->exception_index = EXCP_SWI;
+ cpu_loop_exit();
+}
+
+void OPPROTO op_undef_insn(void)
+{
+ env->exception_index = EXCP_UDEF;
+ cpu_loop_exit();
+}
+
+/* thread support */
+
+spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
+
+void cpu_lock(void)
+{
+ spin_lock(&global_cpu_lock);
+}
+
+void cpu_unlock(void)
+{
+ spin_unlock(&global_cpu_lock);
+}
+
--- /dev/null
+/*
+ * ARM micro operations (templates for various register related
+ * operations)
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+void OPPROTO glue(op_movl_T0_, REGNAME)(void)
+{
+ T0 = REG;
+}
+
+void OPPROTO glue(op_movl_T1_, REGNAME)(void)
+{
+ T1 = REG;
+}
+
+void OPPROTO glue(op_movl_T2_, REGNAME)(void)
+{
+ T2 = REG;
+}
+
+void OPPROTO glue(glue(op_movl_, REGNAME), _T0)(void)
+{
+ REG = T0;
+}
+
+void OPPROTO glue(glue(op_movl_, REGNAME), _T1)(void)
+{
+ REG = T1;
+}
+
+#undef REG
+#undef REGNAME
--- /dev/null
+/*
+ * ARM translation
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include "cpu.h"
+#include "exec-all.h"
+#include "disas.h"
+
+/* internal defines */
+typedef struct DisasContext {
+ uint8_t *pc;
+ int is_jmp;
+ struct TranslationBlock *tb;
+} DisasContext;
+
+#define DISAS_JUMP_NEXT 4
+
+/* XXX: move that elsewhere */
+static uint16_t *gen_opc_ptr;
+static uint32_t *gen_opparam_ptr;
+extern FILE *logfile;
+extern int loglevel;
+
+enum {
+#define DEF(s, n, copy_size) INDEX_op_ ## s,
+#include "opc.h"
+#undef DEF
+ NB_OPS,
+};
+
+#include "gen-op.h"
+
+typedef void (GenOpFunc)(void);
+typedef void (GenOpFunc1)(long);
+typedef void (GenOpFunc2)(long, long);
+typedef void (GenOpFunc3)(long, long, long);
+
+static GenOpFunc2 *gen_test_cc[14] = {
+ gen_op_test_eq,
+ gen_op_test_ne,
+ gen_op_test_cs,
+ gen_op_test_cc,
+ gen_op_test_mi,
+ gen_op_test_pl,
+ gen_op_test_vs,
+ gen_op_test_vc,
+ gen_op_test_hi,
+ gen_op_test_ls,
+ gen_op_test_ge,
+ gen_op_test_lt,
+ gen_op_test_gt,
+ gen_op_test_le,
+};
+
+const uint8_t table_logic_cc[16] = {
+ 1, /* and */
+ 1, /* xor */
+ 0, /* sub */
+ 0, /* rsb */
+ 0, /* add */
+ 0, /* adc */
+ 0, /* sbc */
+ 0, /* rsc */
+ 1, /* andl */
+ 1, /* xorl */
+ 0, /* cmp */
+ 0, /* cmn */
+ 1, /* orr */
+ 1, /* mov */
+ 1, /* bic */
+ 1, /* mvn */
+};
+
+static GenOpFunc1 *gen_shift_T1_im[4] = {
+ gen_op_shll_T1_im,
+ gen_op_shrl_T1_im,
+ gen_op_sarl_T1_im,
+ gen_op_rorl_T1_im,
+};
+
+static GenOpFunc1 *gen_shift_T2_im[4] = {
+ gen_op_shll_T2_im,
+ gen_op_shrl_T2_im,
+ gen_op_sarl_T2_im,
+ gen_op_rorl_T2_im,
+};
+
+static GenOpFunc1 *gen_shift_T1_im_cc[4] = {
+ gen_op_shll_T1_im_cc,
+ gen_op_shrl_T1_im_cc,
+ gen_op_sarl_T1_im_cc,
+ gen_op_rorl_T1_im_cc,
+};
+
+static GenOpFunc *gen_shift_T1_T0[4] = {
+ gen_op_shll_T1_T0,
+ gen_op_shrl_T1_T0,
+ gen_op_sarl_T1_T0,
+ gen_op_rorl_T1_T0,
+};
+
+static GenOpFunc *gen_shift_T1_T0_cc[4] = {
+ gen_op_shll_T1_T0_cc,
+ gen_op_shrl_T1_T0_cc,
+ gen_op_sarl_T1_T0_cc,
+ gen_op_rorl_T1_T0_cc,
+};
+
+static GenOpFunc *gen_op_movl_TN_reg[3][16] = {
+ {
+ gen_op_movl_T0_r0,
+ gen_op_movl_T0_r1,
+ gen_op_movl_T0_r2,
+ gen_op_movl_T0_r3,
+ gen_op_movl_T0_r4,
+ gen_op_movl_T0_r5,
+ gen_op_movl_T0_r6,
+ gen_op_movl_T0_r7,
+ gen_op_movl_T0_r8,
+ gen_op_movl_T0_r9,
+ gen_op_movl_T0_r10,
+ gen_op_movl_T0_r11,
+ gen_op_movl_T0_r12,
+ gen_op_movl_T0_r13,
+ gen_op_movl_T0_r14,
+ gen_op_movl_T0_r15,
+ },
+ {
+ gen_op_movl_T1_r0,
+ gen_op_movl_T1_r1,
+ gen_op_movl_T1_r2,
+ gen_op_movl_T1_r3,
+ gen_op_movl_T1_r4,
+ gen_op_movl_T1_r5,
+ gen_op_movl_T1_r6,
+ gen_op_movl_T1_r7,
+ gen_op_movl_T1_r8,
+ gen_op_movl_T1_r9,
+ gen_op_movl_T1_r10,
+ gen_op_movl_T1_r11,
+ gen_op_movl_T1_r12,
+ gen_op_movl_T1_r13,
+ gen_op_movl_T1_r14,
+ gen_op_movl_T1_r15,
+ },
+ {
+ gen_op_movl_T2_r0,
+ gen_op_movl_T2_r1,
+ gen_op_movl_T2_r2,
+ gen_op_movl_T2_r3,
+ gen_op_movl_T2_r4,
+ gen_op_movl_T2_r5,
+ gen_op_movl_T2_r6,
+ gen_op_movl_T2_r7,
+ gen_op_movl_T2_r8,
+ gen_op_movl_T2_r9,
+ gen_op_movl_T2_r10,
+ gen_op_movl_T2_r11,
+ gen_op_movl_T2_r12,
+ gen_op_movl_T2_r13,
+ gen_op_movl_T2_r14,
+ gen_op_movl_T2_r15,
+ },
+};
+
+static GenOpFunc *gen_op_movl_reg_TN[2][16] = {
+ {
+ gen_op_movl_r0_T0,
+ gen_op_movl_r1_T0,
+ gen_op_movl_r2_T0,
+ gen_op_movl_r3_T0,
+ gen_op_movl_r4_T0,
+ gen_op_movl_r5_T0,
+ gen_op_movl_r6_T0,
+ gen_op_movl_r7_T0,
+ gen_op_movl_r8_T0,
+ gen_op_movl_r9_T0,
+ gen_op_movl_r10_T0,
+ gen_op_movl_r11_T0,
+ gen_op_movl_r12_T0,
+ gen_op_movl_r13_T0,
+ gen_op_movl_r14_T0,
+ gen_op_movl_r15_T0,
+ },
+ {
+ gen_op_movl_r0_T1,
+ gen_op_movl_r1_T1,
+ gen_op_movl_r2_T1,
+ gen_op_movl_r3_T1,
+ gen_op_movl_r4_T1,
+ gen_op_movl_r5_T1,
+ gen_op_movl_r6_T1,
+ gen_op_movl_r7_T1,
+ gen_op_movl_r8_T1,
+ gen_op_movl_r9_T1,
+ gen_op_movl_r10_T1,
+ gen_op_movl_r11_T1,
+ gen_op_movl_r12_T1,
+ gen_op_movl_r13_T1,
+ gen_op_movl_r14_T1,
+ gen_op_movl_r15_T1,
+ },
+};
+
+static GenOpFunc1 *gen_op_movl_TN_im[3] = {
+ gen_op_movl_T0_im,
+ gen_op_movl_T1_im,
+ gen_op_movl_T2_im,
+};
+
+static inline void gen_movl_TN_reg(DisasContext *s, int reg, int t)
+{
+ int val;
+
+ if (reg == 15) {
+ /* normaly, since we updated PC, we need only to add 4 */
+ val = (long)s->pc + 4;
+ gen_op_movl_TN_im[t](val);
+ } else {
+ gen_op_movl_TN_reg[t][reg]();
+ }
+}
+
+static inline void gen_movl_T0_reg(DisasContext *s, int reg)
+{
+ gen_movl_TN_reg(s, reg, 0);
+}
+
+static inline void gen_movl_T1_reg(DisasContext *s, int reg)
+{
+ gen_movl_TN_reg(s, reg, 1);
+}
+
+static inline void gen_movl_T2_reg(DisasContext *s, int reg)
+{
+ gen_movl_TN_reg(s, reg, 2);
+}
+
+static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
+{
+ gen_op_movl_reg_TN[t][reg]();
+ if (reg == 15) {
+ s->is_jmp = DISAS_JUMP;
+ }
+}
+
+static inline void gen_movl_reg_T0(DisasContext *s, int reg)
+{
+ gen_movl_reg_TN(s, reg, 0);
+}
+
+static inline void gen_movl_reg_T1(DisasContext *s, int reg)
+{
+ gen_movl_reg_TN(s, reg, 1);
+}
+
+static inline void gen_add_data_offset(DisasContext *s, unsigned int insn)
+{
+ int val, rm, shift;
+
+ if (!(insn & (1 << 25))) {
+ /* immediate */
+ val = insn & 0xfff;
+ if (!(insn & (1 << 23)))
+ val = -val;
+ gen_op_addl_T1_im(val);
+ } else {
+ /* shift/register */
+ rm = (insn) & 0xf;
+ shift = (insn >> 7) & 0x1f;
+ gen_movl_T2_reg(s, rm);
+ if (shift != 0) {
+ gen_shift_T2_im[(insn >> 5) & 3](shift);
+ }
+ if (!(insn & (1 << 23)))
+ gen_op_subl_T1_T2();
+ else
+ gen_op_addl_T1_T2();
+ }
+}
+
+static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn)
+{
+ int val, rm;
+
+ if (insn & (1 << 22)) {
+ /* immediate */
+ val = (insn & 0xf) | ((insn >> 4) & 0xf0);
+ if (!(insn & (1 << 23)))
+ val = -val;
+ gen_op_addl_T1_im(val);
+ } else {
+ /* register */
+ rm = (insn) & 0xf;
+ gen_movl_T2_reg(s, rm);
+ if (!(insn & (1 << 23)))
+ gen_op_subl_T1_T2();
+ else
+ gen_op_addl_T1_T2();
+ }
+}
+
+static void disas_arm_insn(DisasContext *s)
+{
+ unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
+
+ insn = ldl(s->pc);
+ s->pc += 4;
+
+ cond = insn >> 28;
+ if (cond == 0xf)
+ goto illegal_op;
+ if (cond != 0xe) {
+ /* if not always execute, we generate a conditional jump to
+ next instruction */
+ gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
+ s->is_jmp = DISAS_JUMP_NEXT;
+ }
+ if (((insn & 0x0e000000) == 0 &&
+ (insn & 0x00000090) != 0x90) ||
+ ((insn & 0x0e000000) == (1 << 25))) {
+ int set_cc, logic_cc, shiftop;
+
+ op1 = (insn >> 21) & 0xf;
+ set_cc = (insn >> 20) & 1;
+ logic_cc = table_logic_cc[op1] & set_cc;
+
+ /* data processing instruction */
+ if (insn & (1 << 25)) {
+ /* immediate operand */
+ val = insn & 0xff;
+ shift = ((insn >> 8) & 0xf) * 2;
+ if (shift)
+ val = (val >> shift) | (val << (32 - shift));
+ gen_op_movl_T1_im(val);
+ /* XXX: is CF modified ? */
+ } else {
+ /* register */
+ rm = (insn) & 0xf;
+ gen_movl_T1_reg(s, rm);
+ shiftop = (insn >> 5) & 3;
+ if (!(insn & (1 << 4))) {
+ shift = (insn >> 7) & 0x1f;
+ if (shift != 0) {
+ if (logic_cc) {
+ gen_shift_T1_im_cc[shiftop](shift);
+ } else {
+ gen_shift_T1_im[shiftop](shift);
+ }
+ }
+ } else {
+ rs = (insn >> 8) & 0xf;
+ gen_movl_T0_reg(s, rs);
+ if (logic_cc) {
+ gen_shift_T1_T0_cc[shiftop]();
+ } else {
+ gen_shift_T1_T0[shiftop]();
+ }
+ }
+ }
+ if (op1 != 0x0f && op1 != 0x0d) {
+ rn = (insn >> 16) & 0xf;
+ gen_movl_T0_reg(s, rn);
+ }
+ rd = (insn >> 12) & 0xf;
+ switch(op1) {
+ case 0x00:
+ gen_op_andl_T0_T1();
+ gen_movl_reg_T0(s, rd);
+ if (logic_cc)
+ gen_op_logic_T0_cc();
+ break;
+ case 0x01:
+ gen_op_xorl_T0_T1();
+ gen_movl_reg_T0(s, rd);
+ if (logic_cc)
+ gen_op_logic_T0_cc();
+ break;
+ case 0x02:
+ if (set_cc)
+ gen_op_subl_T0_T1_cc();
+ else
+ gen_op_subl_T0_T1();
+ gen_movl_reg_T0(s, rd);
+ break;
+ case 0x03:
+ if (set_cc)
+ gen_op_rsbl_T0_T1_cc();
+ else
+ gen_op_rsbl_T0_T1();
+ gen_movl_reg_T0(s, rd);
+ break;
+ case 0x04:
+ if (set_cc)
+ gen_op_addl_T0_T1_cc();
+ else
+ gen_op_addl_T0_T1();
+ gen_movl_reg_T0(s, rd);
+ break;
+ case 0x05:
+ if (set_cc)
+ gen_op_adcl_T0_T1_cc();
+ else
+ gen_op_adcl_T0_T1();
+ gen_movl_reg_T0(s, rd);
+ break;
+ case 0x06:
+ if (set_cc)
+ gen_op_sbcl_T0_T1_cc();
+ else
+ gen_op_sbcl_T0_T1();
+ gen_movl_reg_T0(s, rd);
+ break;
+ case 0x07:
+ if (set_cc)
+ gen_op_rscl_T0_T1_cc();
+ else
+ gen_op_rscl_T0_T1();
+ gen_movl_reg_T0(s, rd);
+ break;
+ case 0x08:
+ if (set_cc) {
+ gen_op_andl_T0_T1();
+ gen_op_logic_T0_cc();
+ }
+ break;
+ case 0x09:
+ if (set_cc) {
+ gen_op_xorl_T0_T1();
+ gen_op_logic_T0_cc();
+ }
+ break;
+ case 0x0a:
+ if (set_cc) {
+ gen_op_subl_T0_T1_cc();
+ }
+ break;
+ case 0x0b:
+ if (set_cc) {
+ gen_op_addl_T0_T1_cc();
+ }
+ break;
+ case 0x0c:
+ gen_op_orl_T0_T1();
+ gen_movl_reg_T0(s, rd);
+ if (logic_cc)
+ gen_op_logic_T0_cc();
+ break;
+ case 0x0d:
+ gen_movl_reg_T1(s, rd);
+ if (logic_cc)
+ gen_op_logic_T1_cc();
+ break;
+ case 0x0e:
+ gen_op_bicl_T0_T1();
+ gen_movl_reg_T0(s, rd);
+ if (logic_cc)
+ gen_op_logic_T0_cc();
+ break;
+ default:
+ case 0x0f:
+ gen_op_notl_T1();
+ gen_movl_reg_T1(s, rd);
+ if (logic_cc)
+ gen_op_logic_T1_cc();
+ break;
+ }
+ } else {
+ /* other instructions */
+ op1 = (insn >> 24) & 0xf;
+ switch(op1) {
+ case 0x0:
+ case 0x1:
+ sh = (insn >> 5) & 3;
+ if (sh == 0) {
+ if (op1 == 0x0) {
+ rd = (insn >> 16) & 0xf;
+ rn = (insn >> 12) & 0xf;
+ rs = (insn >> 8) & 0xf;
+ rm = (insn) & 0xf;
+ if (!(insn & (1 << 23))) {
+ /* 32 bit mul */
+ gen_movl_T0_reg(s, rs);
+ gen_movl_T1_reg(s, rm);
+ gen_op_mul_T0_T1();
+ if (insn & (1 << 21)) {
+ gen_movl_T1_reg(s, rn);
+ gen_op_addl_T0_T1();
+ }
+ if (insn & (1 << 20))
+ gen_op_logic_T0_cc();
+ gen_movl_reg_T0(s, rd);
+ } else {
+ /* 64 bit mul */
+ gen_movl_T0_reg(s, rs);
+ gen_movl_T1_reg(s, rm);
+ if (insn & (1 << 22))
+ gen_op_mull_T0_T1();
+ else
+ gen_op_imull_T0_T1();
+ if (insn & (1 << 21))
+ gen_op_addq_T0_T1(rn, rd);
+ if (insn & (1 << 20))
+ gen_op_logicq_cc();
+ gen_movl_reg_T0(s, rn);
+ gen_movl_reg_T1(s, rd);
+ }
+ } else {
+ /* SWP instruction */
+ rn = (insn >> 16) & 0xf;
+ rd = (insn >> 12) & 0xf;
+ rm = (insn) & 0xf;
+
+ gen_movl_T0_reg(s, rm);
+ gen_movl_T1_reg(s, rn);
+ if (insn & (1 << 22)) {
+ gen_op_swpb_T0_T1();
+ } else {
+ gen_op_swpl_T0_T1();
+ }
+ gen_movl_reg_T0(s, rd);
+ }
+ } else {
+ /* load/store half word */
+ rn = (insn >> 16) & 0xf;
+ rd = (insn >> 12) & 0xf;
+ gen_movl_T1_reg(s, rn);
+ if (insn & (1 << 25))
+ gen_add_datah_offset(s, insn);
+ if (insn & (1 << 20)) {
+ /* load */
+ switch(sh) {
+ case 1:
+ gen_op_lduw_T0_T1();
+ break;
+ case 2:
+ gen_op_ldsb_T0_T1();
+ break;
+ default:
+ case 3:
+ gen_op_ldsw_T0_T1();
+ break;
+ }
+ } else {
+ /* store */
+ gen_op_stw_T0_T1();
+ }
+ if (!(insn & (1 << 24))) {
+ gen_add_datah_offset(s, insn);
+ gen_movl_reg_T1(s, rn);
+ } else if (insn & (1 << 21)) {
+ gen_movl_reg_T1(s, rn);
+ }
+ }
+ break;
+ case 0x4:
+ case 0x5:
+ case 0x6:
+ case 0x7:
+ /* load/store byte/word */
+ rn = (insn >> 16) & 0xf;
+ rd = (insn >> 12) & 0xf;
+ gen_movl_T1_reg(s, rn);
+ if (insn & (1 << 24))
+ gen_add_data_offset(s, insn);
+ if (insn & (1 << 20)) {
+ /* load */
+ if (insn & (1 << 22))
+ gen_op_ldub_T0_T1();
+ else
+ gen_op_ldl_T0_T1();
+ gen_movl_reg_T0(s, rd);
+ } else {
+ /* store */
+ gen_movl_T0_reg(s, rd);
+ if (insn & (1 << 22))
+ gen_op_stb_T0_T1();
+ else
+ gen_op_stl_T0_T1();
+ }
+ if (!(insn & (1 << 24))) {
+ gen_add_data_offset(s, insn);
+ gen_movl_reg_T1(s, rn);
+ } else if (insn & (1 << 21))
+ gen_movl_reg_T1(s, rn); {
+ }
+ break;
+ case 0x08:
+ case 0x09:
+ {
+ int j, n;
+ /* load/store multiple words */
+ /* XXX: store correct base if write back */
+ if (insn & (1 << 22))
+ goto illegal_op; /* only usable in supervisor mode */
+ rn = (insn >> 16) & 0xf;
+ gen_movl_T1_reg(s, rn);
+
+ /* compute total size */
+ n = 0;
+ for(i=0;i<16;i++) {
+ if (insn & (1 << i))
+ n++;
+ }
+ /* XXX: test invalid n == 0 case ? */
+ if (insn & (1 << 23)) {
+ if (insn & (1 << 24)) {
+ /* pre increment */
+ gen_op_addl_T1_im(4);
+ } else {
+ /* post increment */
+ }
+ } else {
+ if (insn & (1 << 24)) {
+ /* pre decrement */
+ gen_op_addl_T1_im(-(n * 4));
+ } else {
+ /* post decrement */
+ if (n != 1)
+ gen_op_addl_T1_im(-((n - 1) * 4));
+ }
+ }
+ j = 0;
+ for(i=0;i<16;i++) {
+ if (insn & (1 << i)) {
+ if (insn & (1 << 20)) {
+ /* load */
+ gen_op_ldl_T0_T1();
+ gen_movl_reg_T0(s, i);
+ } else {
+ /* store */
+ if (i == 15) {
+ /* special case: r15 = PC + 12 */
+ val = (long)s->pc + 8;
+ gen_op_movl_TN_im[0](val);
+ } else {
+ gen_movl_T0_reg(s, i);
+ }
+ gen_op_stl_T0_T1();
+ }
+ j++;
+ /* no need to add after the last transfer */
+ if (j != n)
+ gen_op_addl_T1_im(4);
+ }
+ }
+ if (insn & (1 << 21)) {
+ /* write back */
+ if (insn & (1 << 23)) {
+ if (insn & (1 << 24)) {
+ /* pre increment */
+ } else {
+ /* post increment */
+ gen_op_addl_T1_im(4);
+ }
+ } else {
+ if (insn & (1 << 24)) {
+ /* pre decrement */
+ if (n != 1)
+ gen_op_addl_T1_im(-((n - 1) * 4));
+ } else {
+ /* post decrement */
+ gen_op_addl_T1_im(-(n * 4));
+ }
+ }
+ gen_movl_reg_T1(s, rn);
+ }
+ }
+ break;
+ case 0xa:
+ case 0xb:
+ {
+ int offset;
+
+ /* branch (and link) */
+ val = (int)s->pc;
+ if (insn & (1 << 24)) {
+ gen_op_movl_T0_im(val);
+ gen_op_movl_reg_TN[0][14]();
+ }
+ offset = (((int)insn << 8) >> 8);
+ val += (offset << 2) + 4;
+ gen_op_jmp((long)s->tb, val);
+ s->is_jmp = DISAS_TB_JUMP;
+ }
+ break;
+ case 0xf:
+ /* swi */
+ gen_op_movl_T0_im((long)s->pc);
+ gen_op_movl_reg_TN[0][15]();
+ gen_op_swi();
+ s->is_jmp = DISAS_JUMP;
+ break;
+ case 0xc:
+ case 0xd:
+ rd = (insn >> 12) & 0x7;
+ rn = (insn >> 16) & 0xf;
+ gen_movl_T1_reg(s, rn);
+ val = (insn) & 0xff;
+ if (!(insn & (1 << 23)))
+ val = -val;
+ switch((insn >> 8) & 0xf) {
+ case 0x1:
+ /* load/store */
+ if ((insn & (1 << 24)))
+ gen_op_addl_T1_im(val);
+ /* XXX: do it */
+ if (!(insn & (1 << 24)))
+ gen_op_addl_T1_im(val);
+ if (insn & (1 << 21))
+ gen_movl_reg_T1(s, rn);
+ break;
+ case 0x2:
+ {
+ int n, i;
+ /* load store multiple */
+ if ((insn & (1 << 24)))
+ gen_op_addl_T1_im(val);
+ switch(insn & 0x00408000) {
+ case 0x00008000: n = 1; break;
+ case 0x00400000: n = 2; break;
+ case 0x00408000: n = 3; break;
+ default: n = 4; break;
+ }
+ for(i = 0;i < n; i++) {
+ /* XXX: do it */
+ }
+ if (!(insn & (1 << 24)))
+ gen_op_addl_T1_im(val);
+ if (insn & (1 << 21))
+ gen_movl_reg_T1(s, rn);
+ }
+ break;
+ default:
+ goto illegal_op;
+ }
+ break;
+ case 0x0e:
+ /* float ops */
+ /* XXX: do it */
+ switch((insn >> 20) & 0xf) {
+ case 0x2: /* wfs */
+ break;
+ case 0x3: /* rfs */
+ break;
+ case 0x4: /* wfc */
+ break;
+ case 0x5: /* rfc */
+ break;
+ default:
+ goto illegal_op;
+ }
+ break;
+ default:
+ illegal_op:
+ gen_op_movl_T0_im((long)s->pc - 4);
+ gen_op_movl_reg_TN[0][15]();
+ gen_op_undef_insn();
+ s->is_jmp = DISAS_JUMP;
+ break;
+ }
+ }
+}
+
+/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
+ basic block 'tb'. If search_pc is TRUE, also generate PC
+ information for each intermediate instruction. */
+static inline int gen_intermediate_code_internal(CPUState *env,
+ TranslationBlock *tb,
+ int search_pc)
+{
+ DisasContext dc1, *dc = &dc1;
+ uint16_t *gen_opc_end;
+ int j, lj;
+ uint8_t *pc_start;
+
+ /* generate intermediate code */
+ pc_start = (uint8_t *)tb->pc;
+
+ dc->tb = tb;
+
+ gen_opc_ptr = gen_opc_buf;
+ gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
+ gen_opparam_ptr = gen_opparam_buf;
+
+ dc->is_jmp = DISAS_NEXT;
+ dc->pc = pc_start;
+ lj = -1;
+ do {
+ if (search_pc) {
+ j = gen_opc_ptr - gen_opc_buf;
+ if (lj < j) {
+ lj++;
+ while (lj < j)
+ gen_opc_instr_start[lj++] = 0;
+ }
+ gen_opc_pc[lj] = (uint32_t)dc->pc;
+ gen_opc_instr_start[lj] = 1;
+ }
+ disas_arm_insn(dc);
+ } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
+ (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32));
+ switch(dc->is_jmp) {
+ case DISAS_JUMP_NEXT:
+ case DISAS_NEXT:
+ gen_op_jmp((long)dc->tb, (long)dc->pc);
+ break;
+ default:
+ case DISAS_JUMP:
+ /* indicate that the hash table must be used to find the next TB */
+ gen_op_movl_T0_0();
+ gen_op_exit_tb();
+ break;
+ case DISAS_TB_JUMP:
+ /* nothing more to generate */
+ break;
+ }
+ *gen_opc_ptr = INDEX_op_end;
+
+#ifdef DEBUG_DISAS
+ if (loglevel) {
+ fprintf(logfile, "----------------\n");
+ fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
+ disas(logfile, pc_start, dc->pc - pc_start, 0, 0);
+ fprintf(logfile, "\n");
+
+ fprintf(logfile, "OP:\n");
+ dump_ops(gen_opc_buf, gen_opparam_buf);
+ fprintf(logfile, "\n");
+ }
+#endif
+ if (!search_pc)
+ tb->size = dc->pc - pc_start;
+ return 0;
+}
+
+int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
+{
+ return gen_intermediate_code_internal(env, tb, 0);
+}
+
+int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
+{
+ return gen_intermediate_code_internal(env, tb, 1);
+}
+
+CPUARMState *cpu_arm_init(void)
+{
+ CPUARMState *env;
+
+ cpu_exec_init();
+
+ env = malloc(sizeof(CPUARMState));
+ if (!env)
+ return NULL;
+ memset(env, 0, sizeof(CPUARMState));
+ return env;
+}
+
+void cpu_arm_close(CPUARMState *env)
+{
+ free(env);
+}
+
+void cpu_arm_dump_state(CPUARMState *env, FILE *f, int flags)
+{
+ int i;
+
+ for(i=0;i<16;i++) {
+ fprintf(f, "R%02d=%08x", i, env->regs[i]);
+ if ((i % 4) == 3)
+ fprintf(f, "\n");
+ else
+ fprintf(f, " ");
+ }
+ fprintf(f, "PSR=%08x %c%c%c%c\n",
+ env->cpsr,
+ env->cpsr & (1 << 31) ? 'N' : '-',
+ env->cpsr & (1 << 30) ? 'Z' : '-',
+ env->cpsr & (1 << 29) ? 'C' : '-',
+ env->cpsr & (1 << 28) ? 'V' : '-');
+}
--- /dev/null
+/*
+ * i386 virtual CPU header
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef CPU_I386_H
+#define CPU_I386_H
+
+#include "cpu-defs.h"
+
+#define R_EAX 0
+#define R_ECX 1
+#define R_EDX 2
+#define R_EBX 3
+#define R_ESP 4
+#define R_EBP 5
+#define R_ESI 6
+#define R_EDI 7
+
+#define R_AL 0
+#define R_CL 1
+#define R_DL 2
+#define R_BL 3
+#define R_AH 4
+#define R_CH 5
+#define R_DH 6
+#define R_BH 7
+
+#define R_ES 0
+#define R_CS 1
+#define R_SS 2
+#define R_DS 3
+#define R_FS 4
+#define R_GS 5
+
+/* segment descriptor fields */
+#define DESC_G_MASK (1 << 23)
+#define DESC_B_SHIFT 22
+#define DESC_B_MASK (1 << DESC_B_SHIFT)
+#define DESC_AVL_MASK (1 << 20)
+#define DESC_P_MASK (1 << 15)
+#define DESC_DPL_SHIFT 13
+#define DESC_S_MASK (1 << 12)
+#define DESC_TYPE_SHIFT 8
+#define DESC_A_MASK (1 << 8)
+
+#define DESC_CS_MASK (1 << 11)
+#define DESC_C_MASK (1 << 10)
+#define DESC_R_MASK (1 << 9)
+
+#define DESC_E_MASK (1 << 10)
+#define DESC_W_MASK (1 << 9)
+
+/* eflags masks */
+#define CC_C 0x0001
+#define CC_P 0x0004
+#define CC_A 0x0010
+#define CC_Z 0x0040
+#define CC_S 0x0080
+#define CC_O 0x0800
+
+#define TF_SHIFT 8
+#define IOPL_SHIFT 12
+#define VM_SHIFT 17
+
+#define TF_MASK 0x00000100
+#define IF_MASK 0x00000200
+#define DF_MASK 0x00000400
+#define IOPL_MASK 0x00003000
+#define NT_MASK 0x00004000
+#define RF_MASK 0x00010000
+#define VM_MASK 0x00020000
+#define AC_MASK 0x00040000
+#define VIF_MASK 0x00080000
+#define VIP_MASK 0x00100000
+#define ID_MASK 0x00200000
+
+/* hidden flags - used internally by qemu to represent additionnal cpu
+ states. Only the CPL and INHIBIT_IRQ are not redundant. We avoid
+ using the IOPL_MASK, TF_MASK and VM_MASK bit position to ease oring
+ with eflags. */
+/* current cpl */
+#define HF_CPL_SHIFT 0
+/* true if soft mmu is being used */
+#define HF_SOFTMMU_SHIFT 2
+/* true if hardware interrupts must be disabled for next instruction */
+#define HF_INHIBIT_IRQ_SHIFT 3
+/* 16 or 32 segments */
+#define HF_CS32_SHIFT 4
+#define HF_SS32_SHIFT 5
+/* zero base for DS, ES and SS */
+#define HF_ADDSEG_SHIFT 6
+
+#define HF_CPL_MASK (3 << HF_CPL_SHIFT)
+#define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT)
+#define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT)
+#define HF_CS32_MASK (1 << HF_CS32_SHIFT)
+#define HF_SS32_MASK (1 << HF_SS32_SHIFT)
+#define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT)
+
+#define CR0_PE_MASK (1 << 0)
+#define CR0_TS_MASK (1 << 3)
+#define CR0_WP_MASK (1 << 16)
+#define CR0_AM_MASK (1 << 18)
+#define CR0_PG_MASK (1 << 31)
+
+#define CR4_VME_MASK (1 << 0)
+#define CR4_PVI_MASK (1 << 1)
+#define CR4_TSD_MASK (1 << 2)
+#define CR4_DE_MASK (1 << 3)
+#define CR4_PSE_MASK (1 << 4)
+
+#define PG_PRESENT_BIT 0
+#define PG_RW_BIT 1
+#define PG_USER_BIT 2
+#define PG_PWT_BIT 3
+#define PG_PCD_BIT 4
+#define PG_ACCESSED_BIT 5
+#define PG_DIRTY_BIT 6
+#define PG_PSE_BIT 7
+#define PG_GLOBAL_BIT 8
+
+#define PG_PRESENT_MASK (1 << PG_PRESENT_BIT)
+#define PG_RW_MASK (1 << PG_RW_BIT)
+#define PG_USER_MASK (1 << PG_USER_BIT)
+#define PG_PWT_MASK (1 << PG_PWT_BIT)
+#define PG_PCD_MASK (1 << PG_PCD_BIT)
+#define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT)
+#define PG_DIRTY_MASK (1 << PG_DIRTY_BIT)
+#define PG_PSE_MASK (1 << PG_PSE_BIT)
+#define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT)
+
+#define PG_ERROR_W_BIT 1
+
+#define PG_ERROR_P_MASK 0x01
+#define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT)
+#define PG_ERROR_U_MASK 0x04
+#define PG_ERROR_RSVD_MASK 0x08
+
+#define MSR_IA32_APICBASE 0x1b
+#define MSR_IA32_APICBASE_BSP (1<<8)
+#define MSR_IA32_APICBASE_ENABLE (1<<11)
+#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
+
+#define MSR_IA32_SYSENTER_CS 0x174
+#define MSR_IA32_SYSENTER_ESP 0x175
+#define MSR_IA32_SYSENTER_EIP 0x176
+
+#define EXCP00_DIVZ 0
+#define EXCP01_SSTP 1
+#define EXCP02_NMI 2
+#define EXCP03_INT3 3
+#define EXCP04_INTO 4
+#define EXCP05_BOUND 5
+#define EXCP06_ILLOP 6
+#define EXCP07_PREX 7
+#define EXCP08_DBLE 8
+#define EXCP09_XERR 9
+#define EXCP0A_TSS 10
+#define EXCP0B_NOSEG 11
+#define EXCP0C_STACK 12
+#define EXCP0D_GPF 13
+#define EXCP0E_PAGE 14
+#define EXCP10_COPR 16
+#define EXCP11_ALGN 17
+#define EXCP12_MCHK 18
+
+enum {
+ CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
+ CC_OP_EFLAGS, /* all cc are explicitely computed, CC_SRC = flags */
+ CC_OP_MUL, /* modify all flags, C, O = (CC_SRC != 0) */
+
+ CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+ CC_OP_ADDW,
+ CC_OP_ADDL,
+
+ CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+ CC_OP_ADCW,
+ CC_OP_ADCL,
+
+ CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+ CC_OP_SUBW,
+ CC_OP_SUBL,
+
+ CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+ CC_OP_SBBW,
+ CC_OP_SBBL,
+
+ CC_OP_LOGICB, /* modify all flags, CC_DST = res */
+ CC_OP_LOGICW,
+ CC_OP_LOGICL,
+
+ CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */
+ CC_OP_INCW,
+ CC_OP_INCL,
+
+ CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */
+ CC_OP_DECW,
+ CC_OP_DECL,
+
+ CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
+ CC_OP_SHLW,
+ CC_OP_SHLL,
+
+ CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
+ CC_OP_SARW,
+ CC_OP_SARL,
+
+ CC_OP_NB,
+};
+
+#ifdef __i386__
+#define USE_X86LDOUBLE
+#endif
+
+#ifdef USE_X86LDOUBLE
+typedef long double CPU86_LDouble;
+#else
+typedef double CPU86_LDouble;
+#endif
+
+typedef struct SegmentCache {
+ uint32_t selector;
+ uint8_t *base;
+ uint32_t limit;
+ uint32_t flags;
+} SegmentCache;
+
+typedef struct CPUX86State {
+ /* standard registers */
+ uint32_t regs[8];
+ uint32_t eip;
+ uint32_t eflags; /* eflags register. During CPU emulation, CC
+ flags and DF are set to zero because they are
+ stored elsewhere */
+
+ /* emulator internal eflags handling */
+ uint32_t cc_src;
+ uint32_t cc_dst;
+ uint32_t cc_op;
+ int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
+ uint32_t hflags; /* hidden flags, see HF_xxx constants */
+
+ /* FPU state */
+ unsigned int fpstt; /* top of stack index */
+ unsigned int fpus;
+ unsigned int fpuc;
+ uint8_t fptags[8]; /* 0 = valid, 1 = empty */
+ CPU86_LDouble fpregs[8];
+
+ /* emulator internal variables */
+ CPU86_LDouble ft0;
+ union {
+ float f;
+ double d;
+ int i32;
+ int64_t i64;
+ } fp_convert;
+
+ /* segments */
+ SegmentCache segs[6]; /* selector values */
+ SegmentCache ldt;
+ SegmentCache tr;
+ SegmentCache gdt; /* only base and limit are used */
+ SegmentCache idt; /* only base and limit are used */
+
+ /* sysenter registers */
+ uint32_t sysenter_cs;
+ uint32_t sysenter_esp;
+ uint32_t sysenter_eip;
+
+ /* exception/interrupt handling */
+ jmp_buf jmp_env;
+ int exception_index;
+ int error_code;
+ int exception_is_int;
+ int exception_next_eip;
+ struct TranslationBlock *current_tb; /* currently executing TB */
+ uint32_t cr[5]; /* NOTE: cr1 is unused */
+ uint32_t dr[8]; /* debug registers */
+ int interrupt_request;
+ int user_mode_only; /* user mode only simulation */
+
+ /* soft mmu support */
+ /* 0 = kernel, 1 = user */
+ CPUTLBEntry tlb_read[2][CPU_TLB_SIZE];
+ CPUTLBEntry tlb_write[2][CPU_TLB_SIZE];
+
+ /* ice debug support */
+ uint32_t breakpoints[MAX_BREAKPOINTS];
+ int nb_breakpoints;
+ int singlestep_enabled;
+
+ /* user data */
+ void *opaque;
+} CPUX86State;
+
+#ifndef IN_OP_I386
+void cpu_x86_outb(CPUX86State *env, int addr, int val);
+void cpu_x86_outw(CPUX86State *env, int addr, int val);
+void cpu_x86_outl(CPUX86State *env, int addr, int val);
+int cpu_x86_inb(CPUX86State *env, int addr);
+int cpu_x86_inw(CPUX86State *env, int addr);
+int cpu_x86_inl(CPUX86State *env, int addr);
+#endif
+
+CPUX86State *cpu_x86_init(void);
+int cpu_x86_exec(CPUX86State *s);
+void cpu_x86_close(CPUX86State *s);
+int cpu_x86_get_pic_interrupt(CPUX86State *s);
+
+/* this function must always be used to load data in the segment
+ cache: it synchronizes the hflags with the segment cache values */
+static inline void cpu_x86_load_seg_cache(CPUX86State *env,
+ int seg_reg, unsigned int selector,
+ uint8_t *base, unsigned int limit,
+ unsigned int flags)
+{
+ SegmentCache *sc;
+ unsigned int new_hflags;
+
+ sc = &env->segs[seg_reg];
+ sc->selector = selector;
+ sc->base = base;
+ sc->limit = limit;
+ sc->flags = flags;
+
+ /* update the hidden flags */
+ new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
+ >> (DESC_B_SHIFT - HF_CS32_SHIFT);
+ new_hflags |= (env->segs[R_SS].flags & DESC_B_MASK)
+ >> (DESC_B_SHIFT - HF_SS32_SHIFT);
+ if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
+ /* XXX: try to avoid this test. The problem comes from the
+ fact that is real mode or vm86 mode we only modify the
+ 'base' and 'selector' fields of the segment cache to go
+ faster. A solution may be to force addseg to one in
+ translate-i386.c. */
+ new_hflags |= HF_ADDSEG_MASK;
+ } else {
+ new_hflags |= (((unsigned long)env->segs[R_DS].base |
+ (unsigned long)env->segs[R_ES].base |
+ (unsigned long)env->segs[R_SS].base) != 0) <<
+ HF_ADDSEG_SHIFT;
+ }
+ env->hflags = (env->hflags &
+ ~(HF_CS32_MASK | HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags;
+}
+
+/* wrapper, just in case memory mappings must be changed */
+static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl)
+{
+#if HF_CPL_MASK == 3
+ s->hflags = (s->hflags & ~HF_CPL_MASK) | cpl;
+#else
+#error HF_CPL_MASK is hardcoded
+#endif
+}
+
+/* the following helpers are only usable in user mode simulation as
+ they can trigger unexpected exceptions */
+void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
+void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32);
+void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32);
+
+/* you can call this signal handler from your SIGBUS and SIGSEGV
+ signal handlers to inform the virtual CPU of exceptions. non zero
+ is returned if the signal was handled by the virtual CPU. */
+struct siginfo;
+int cpu_x86_signal_handler(int host_signum, struct siginfo *info,
+ void *puc);
+
+/* MMU defines */
+void cpu_x86_init_mmu(CPUX86State *env);
+extern int phys_ram_size;
+extern int phys_ram_fd;
+extern uint8_t *phys_ram_base;
+
+/* used to debug */
+#define X86_DUMP_FPU 0x0001 /* dump FPU state too */
+#define X86_DUMP_CCOP 0x0002 /* dump qemu flag cache */
+void cpu_x86_dump_state(CPUX86State *env, FILE *f, int flags);
+
+#define TARGET_PAGE_BITS 12
+#include "cpu-all.h"
+
+#endif /* CPU_I386_H */
--- /dev/null
+/*
+ * i386 execution defines
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include "dyngen-exec.h"
+
+/* at least 4 register variables are defines */
+register struct CPUX86State *env asm(AREG0);
+register uint32_t T0 asm(AREG1);
+register uint32_t T1 asm(AREG2);
+register uint32_t T2 asm(AREG3);
+
+#define A0 T2
+
+/* if more registers are available, we define some registers too */
+#ifdef AREG4
+register uint32_t EAX asm(AREG4);
+#define reg_EAX
+#endif
+
+#ifdef AREG5
+register uint32_t ESP asm(AREG5);
+#define reg_ESP
+#endif
+
+#ifdef AREG6
+register uint32_t EBP asm(AREG6);
+#define reg_EBP
+#endif
+
+#ifdef AREG7
+register uint32_t ECX asm(AREG7);
+#define reg_ECX
+#endif
+
+#ifdef AREG8
+register uint32_t EDX asm(AREG8);
+#define reg_EDX
+#endif
+
+#ifdef AREG9
+register uint32_t EBX asm(AREG9);
+#define reg_EBX
+#endif
+
+#ifdef AREG10
+register uint32_t ESI asm(AREG10);
+#define reg_ESI
+#endif
+
+#ifdef AREG11
+register uint32_t EDI asm(AREG11);
+#define reg_EDI
+#endif
+
+extern FILE *logfile;
+extern int loglevel;
+
+#ifndef reg_EAX
+#define EAX (env->regs[R_EAX])
+#endif
+#ifndef reg_ECX
+#define ECX (env->regs[R_ECX])
+#endif
+#ifndef reg_EDX
+#define EDX (env->regs[R_EDX])
+#endif
+#ifndef reg_EBX
+#define EBX (env->regs[R_EBX])
+#endif
+#ifndef reg_ESP
+#define ESP (env->regs[R_ESP])
+#endif
+#ifndef reg_EBP
+#define EBP (env->regs[R_EBP])
+#endif
+#ifndef reg_ESI
+#define ESI (env->regs[R_ESI])
+#endif
+#ifndef reg_EDI
+#define EDI (env->regs[R_EDI])
+#endif
+#define EIP (env->eip)
+#define DF (env->df)
+
+#define CC_SRC (env->cc_src)
+#define CC_DST (env->cc_dst)
+#define CC_OP (env->cc_op)
+
+/* float macros */
+#define FT0 (env->ft0)
+#define ST0 (env->fpregs[env->fpstt])
+#define ST(n) (env->fpregs[(env->fpstt + (n)) & 7])
+#define ST1 ST(1)
+
+#ifdef USE_FP_CONVERT
+#define FP_CONVERT (env->fp_convert)
+#endif
+
+#include "cpu.h"
+#include "exec-all.h"
+
+typedef struct CCTable {
+ int (*compute_all)(void); /* return all the flags */
+ int (*compute_c)(void); /* return the C flag */
+} CCTable;
+
+extern CCTable cc_table[];
+
+void load_seg(int seg_reg, int selector, unsigned cur_eip);
+void helper_ljmp_protected_T0_T1(void);
+void helper_lcall_real_T0_T1(int shift, int next_eip);
+void helper_lcall_protected_T0_T1(int shift, int next_eip);
+void helper_iret_real(int shift);
+void helper_iret_protected(int shift);
+void helper_lret_protected(int shift, int addend);
+void helper_lldt_T0(void);
+void helper_ltr_T0(void);
+void helper_movl_crN_T0(int reg);
+void helper_movl_drN_T0(int reg);
+void helper_invlpg(unsigned int addr);
+void cpu_x86_update_cr0(CPUX86State *env);
+void cpu_x86_update_cr3(CPUX86State *env);
+void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr);
+int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write);
+void tlb_fill(unsigned long addr, int is_write, void *retaddr);
+void __hidden cpu_lock(void);
+void __hidden cpu_unlock(void);
+void do_interrupt(int intno, int is_int, int error_code,
+ unsigned int next_eip, int is_hw);
+void do_interrupt_user(int intno, int is_int, int error_code,
+ unsigned int next_eip);
+void raise_interrupt(int intno, int is_int, int error_code,
+ unsigned int next_eip);
+void raise_exception_err(int exception_index, int error_code);
+void raise_exception(int exception_index);
+void __hidden cpu_loop_exit(void);
+void helper_fsave(uint8_t *ptr, int data32);
+void helper_frstor(uint8_t *ptr, int data32);
+
+void OPPROTO op_movl_eflags_T0(void);
+void OPPROTO op_movl_T0_eflags(void);
+void raise_interrupt(int intno, int is_int, int error_code,
+ unsigned int next_eip);
+void raise_exception_err(int exception_index, int error_code);
+void raise_exception(int exception_index);
+void helper_divl_EAX_T0(uint32_t eip);
+void helper_idivl_EAX_T0(uint32_t eip);
+void helper_cmpxchg8b(void);
+void helper_cpuid(void);
+void helper_rdtsc(void);
+void helper_rdmsr(void);
+void helper_wrmsr(void);
+void helper_lsl(void);
+void helper_lar(void);
+
+#ifdef USE_X86LDOUBLE
+/* use long double functions */
+#define lrint lrintl
+#define llrint llrintl
+#define fabs fabsl
+#define sin sinl
+#define cos cosl
+#define sqrt sqrtl
+#define pow powl
+#define log logl
+#define tan tanl
+#define atan2 atan2l
+#define floor floorl
+#define ceil ceill
+#define rint rintl
+#endif
+
+extern int lrint(CPU86_LDouble x);
+extern int64_t llrint(CPU86_LDouble x);
+extern CPU86_LDouble fabs(CPU86_LDouble x);
+extern CPU86_LDouble sin(CPU86_LDouble x);
+extern CPU86_LDouble cos(CPU86_LDouble x);
+extern CPU86_LDouble sqrt(CPU86_LDouble x);
+extern CPU86_LDouble pow(CPU86_LDouble, CPU86_LDouble);
+extern CPU86_LDouble log(CPU86_LDouble x);
+extern CPU86_LDouble tan(CPU86_LDouble x);
+extern CPU86_LDouble atan2(CPU86_LDouble, CPU86_LDouble);
+extern CPU86_LDouble floor(CPU86_LDouble x);
+extern CPU86_LDouble ceil(CPU86_LDouble x);
+extern CPU86_LDouble rint(CPU86_LDouble x);
+
+#define RC_MASK 0xc00
+#define RC_NEAR 0x000
+#define RC_DOWN 0x400
+#define RC_UP 0x800
+#define RC_CHOP 0xc00
+
+#define MAXTAN 9223372036854775808.0
+
+#ifdef __arm__
+/* we have no way to do correct rounding - a FPU emulator is needed */
+#define FE_DOWNWARD FE_TONEAREST
+#define FE_UPWARD FE_TONEAREST
+#define FE_TOWARDZERO FE_TONEAREST
+#endif
+
+#ifdef USE_X86LDOUBLE
+
+/* only for x86 */
+typedef union {
+ long double d;
+ struct {
+ unsigned long long lower;
+ unsigned short upper;
+ } l;
+} CPU86_LDoubleU;
+
+/* the following deal with x86 long double-precision numbers */
+#define MAXEXPD 0x7fff
+#define EXPBIAS 16383
+#define EXPD(fp) (fp.l.upper & 0x7fff)
+#define SIGND(fp) ((fp.l.upper) & 0x8000)
+#define MANTD(fp) (fp.l.lower)
+#define BIASEXPONENT(fp) fp.l.upper = (fp.l.upper & ~(0x7fff)) | EXPBIAS
+
+#else
+
+/* NOTE: arm is horrible as double 32 bit words are stored in big endian ! */
+typedef union {
+ double d;
+#if !defined(WORDS_BIGENDIAN) && !defined(__arm__)
+ struct {
+ uint32_t lower;
+ int32_t upper;
+ } l;
+#else
+ struct {
+ int32_t upper;
+ uint32_t lower;
+ } l;
+#endif
+#ifndef __arm__
+ int64_t ll;
+#endif
+} CPU86_LDoubleU;
+
+/* the following deal with IEEE double-precision numbers */
+#define MAXEXPD 0x7ff
+#define EXPBIAS 1023
+#define EXPD(fp) (((fp.l.upper) >> 20) & 0x7FF)
+#define SIGND(fp) ((fp.l.upper) & 0x80000000)
+#ifdef __arm__
+#define MANTD(fp) (fp.l.lower | ((uint64_t)(fp.l.upper & ((1 << 20) - 1)) << 32))
+#else
+#define MANTD(fp) (fp.ll & ((1LL << 52) - 1))
+#endif
+#define BIASEXPONENT(fp) fp.l.upper = (fp.l.upper & ~(0x7ff << 20)) | (EXPBIAS << 20)
+#endif
+
+static inline void fpush(void)
+{
+ env->fpstt = (env->fpstt - 1) & 7;
+ env->fptags[env->fpstt] = 0; /* validate stack entry */
+}
+
+static inline void fpop(void)
+{
+ env->fptags[env->fpstt] = 1; /* invvalidate stack entry */
+ env->fpstt = (env->fpstt + 1) & 7;
+}
+
+#ifndef USE_X86LDOUBLE
+static inline CPU86_LDouble helper_fldt(uint8_t *ptr)
+{
+ CPU86_LDoubleU temp;
+ int upper, e;
+ uint64_t ll;
+
+ /* mantissa */
+ upper = lduw(ptr + 8);
+ /* XXX: handle overflow ? */
+ e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
+ e |= (upper >> 4) & 0x800; /* sign */
+ ll = (ldq(ptr) >> 11) & ((1LL << 52) - 1);
+#ifdef __arm__
+ temp.l.upper = (e << 20) | (ll >> 32);
+ temp.l.lower = ll;
+#else
+ temp.ll = ll | ((uint64_t)e << 52);
+#endif
+ return temp.d;
+}
+
+static inline void helper_fstt(CPU86_LDouble f, uint8_t *ptr)
+{
+ CPU86_LDoubleU temp;
+ int e;
+
+ temp.d = f;
+ /* mantissa */
+ stq(ptr, (MANTD(temp) << 11) | (1LL << 63));
+ /* exponent + sign */
+ e = EXPD(temp) - EXPBIAS + 16383;
+ e |= SIGND(temp) >> 16;
+ stw(ptr + 8, e);
+}
+#endif
+
+const CPU86_LDouble f15rk[7];
+
+void helper_fldt_ST0_A0(void);
+void helper_fstt_ST0_A0(void);
+void helper_fbld_ST0_A0(void);
+void helper_fbst_ST0_A0(void);
+void helper_f2xm1(void);
+void helper_fyl2x(void);
+void helper_fptan(void);
+void helper_fpatan(void);
+void helper_fxtract(void);
+void helper_fprem1(void);
+void helper_fprem(void);
+void helper_fyl2xp1(void);
+void helper_fsqrt(void);
+void helper_fsincos(void);
+void helper_frndint(void);
+void helper_fscale(void);
+void helper_fsin(void);
+void helper_fcos(void);
+void helper_fxam_ST0(void);
+void helper_fstenv(uint8_t *ptr, int data32);
+void helper_fldenv(uint8_t *ptr, int data32);
+void helper_fsave(uint8_t *ptr, int data32);
+void helper_frstor(uint8_t *ptr, int data32);
+
+const uint8_t parity_table[256];
+const uint8_t rclw_table[32];
+const uint8_t rclb_table[32];
+
+static inline uint32_t compute_eflags(void)
+{
+ return env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
+}
+
+#define FL_UPDATE_MASK32 (TF_MASK | AC_MASK | ID_MASK)
+
+#define FL_UPDATE_CPL0_MASK (TF_MASK | IF_MASK | IOPL_MASK | NT_MASK | \
+ RF_MASK | AC_MASK | ID_MASK)
+
+/* NOTE: CC_OP must be modified manually to CC_OP_EFLAGS */
+static inline void load_eflags(int eflags, int update_mask)
+{
+ CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
+ DF = 1 - (2 * ((eflags >> 10) & 1));
+ env->eflags = (env->eflags & ~update_mask) |
+ (eflags & update_mask);
+}
+
+/* memory access macros */
+
+#define ldul ldl
+#define lduq ldq
+#define ldul_user ldl_user
+#define ldul_kernel ldl_kernel
+
+#define ldub_raw ldub
+#define ldsb_raw ldsb
+#define lduw_raw lduw
+#define ldsw_raw ldsw
+#define ldl_raw ldl
+#define ldq_raw ldq
+
+#define stb_raw stb
+#define stw_raw stw
+#define stl_raw stl
+#define stq_raw stq
+
+#define MEMUSER 0
+#define DATA_SIZE 1
+#include "softmmu_header.h"
+
+#define DATA_SIZE 2
+#include "softmmu_header.h"
+
+#define DATA_SIZE 4
+#include "softmmu_header.h"
+
+#define DATA_SIZE 8
+#include "softmmu_header.h"
+
+#undef MEMUSER
+#define MEMUSER 1
+#define DATA_SIZE 1
+#include "softmmu_header.h"
+
+#define DATA_SIZE 2
+#include "softmmu_header.h"
+
+#define DATA_SIZE 4
+#include "softmmu_header.h"
+
+#define DATA_SIZE 8
+#include "softmmu_header.h"
+
+#undef MEMUSER
+
--- /dev/null
+/*
+ * i386 helpers
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include "exec.h"
+
+const uint8_t parity_table[256] = {
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+};
+
+/* modulo 17 table */
+const uint8_t rclw_table[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9,10,11,12,13,14,15,
+ 16, 0, 1, 2, 3, 4, 5, 6,
+ 7, 8, 9,10,11,12,13,14,
+};
+
+/* modulo 9 table */
+const uint8_t rclb_table[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 0, 1, 2, 3, 4, 5, 6,
+ 7, 8, 0, 1, 2, 3, 4, 5,
+ 6, 7, 8, 0, 1, 2, 3, 4,
+};
+
+const CPU86_LDouble f15rk[7] =
+{
+ 0.00000000000000000000L,
+ 1.00000000000000000000L,
+ 3.14159265358979323851L, /*pi*/
+ 0.30102999566398119523L, /*lg2*/
+ 0.69314718055994530943L, /*ln2*/
+ 1.44269504088896340739L, /*l2e*/
+ 3.32192809488736234781L, /*l2t*/
+};
+
+/* thread support */
+
+spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
+
+void cpu_lock(void)
+{
+ spin_lock(&global_cpu_lock);
+}
+
+void cpu_unlock(void)
+{
+ spin_unlock(&global_cpu_lock);
+}
+
+void cpu_loop_exit(void)
+{
+ /* NOTE: the register at this point must be saved by hand because
+ longjmp restore them */
+#ifdef reg_EAX
+ env->regs[R_EAX] = EAX;
+#endif
+#ifdef reg_ECX
+ env->regs[R_ECX] = ECX;
+#endif
+#ifdef reg_EDX
+ env->regs[R_EDX] = EDX;
+#endif
+#ifdef reg_EBX
+ env->regs[R_EBX] = EBX;
+#endif
+#ifdef reg_ESP
+ env->regs[R_ESP] = ESP;
+#endif
+#ifdef reg_EBP
+ env->regs[R_EBP] = EBP;
+#endif
+#ifdef reg_ESI
+ env->regs[R_ESI] = ESI;
+#endif
+#ifdef reg_EDI
+ env->regs[R_EDI] = EDI;
+#endif
+ longjmp(env->jmp_env, 1);
+}
+
+static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
+ uint32_t *esp_ptr, int dpl)
+{
+ int type, index, shift;
+
+#if 0
+ {
+ int i;
+ printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
+ for(i=0;i<env->tr.limit;i++) {
+ printf("%02x ", env->tr.base[i]);
+ if ((i & 7) == 7) printf("\n");
+ }
+ printf("\n");
+ }
+#endif
+
+ if (!(env->tr.flags & DESC_P_MASK))
+ cpu_abort(env, "invalid tss");
+ type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
+ if ((type & 7) != 1)
+ cpu_abort(env, "invalid tss type");
+ shift = type >> 3;
+ index = (dpl * 4 + 2) << shift;
+ if (index + (4 << shift) - 1 > env->tr.limit)
+ raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
+ if (shift == 0) {
+ *esp_ptr = lduw(env->tr.base + index);
+ *ss_ptr = lduw(env->tr.base + index + 2);
+ } else {
+ *esp_ptr = ldl(env->tr.base + index);
+ *ss_ptr = lduw(env->tr.base + index + 4);
+ }
+}
+
+/* return non zero if error */
+static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
+ int selector)
+{
+ SegmentCache *dt;
+ int index;
+ uint8_t *ptr;
+
+ if (selector & 0x4)
+ dt = &env->ldt;
+ else
+ dt = &env->gdt;
+ index = selector & ~7;
+ if ((index + 7) > dt->limit)
+ return -1;
+ ptr = dt->base + index;
+ *e1_ptr = ldl(ptr);
+ *e2_ptr = ldl(ptr + 4);
+ return 0;
+}
+
+static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
+{
+ unsigned int limit;
+ limit = (e1 & 0xffff) | (e2 & 0x000f0000);
+ if (e2 & DESC_G_MASK)
+ limit = (limit << 12) | 0xfff;
+ return limit;
+}
+
+static inline uint8_t *get_seg_base(uint32_t e1, uint32_t e2)
+{
+ return (uint8_t *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
+}
+
+static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
+{
+ sc->base = get_seg_base(e1, e2);
+ sc->limit = get_seg_limit(e1, e2);
+ sc->flags = e2;
+}
+
+/* init the segment cache in vm86 mode. */
+static inline void load_seg_vm(int seg, int selector)
+{
+ selector &= 0xffff;
+ cpu_x86_load_seg_cache(env, seg, selector,
+ (uint8_t *)(selector << 4), 0xffff, 0);
+}
+
+/* protected mode interrupt */
+static void do_interrupt_protected(int intno, int is_int, int error_code,
+ unsigned int next_eip, int is_hw)
+{
+ SegmentCache *dt;
+ uint8_t *ptr, *ssp;
+ int type, dpl, selector, ss_dpl, cpl;
+ int has_error_code, new_stack, shift;
+ uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2, push_size;
+ uint32_t old_cs, old_ss, old_esp, old_eip;
+
+ dt = &env->idt;
+ if (intno * 8 + 7 > dt->limit)
+ raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
+ ptr = dt->base + intno * 8;
+ e1 = ldl(ptr);
+ e2 = ldl(ptr + 4);
+ /* check gate type */
+ type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
+ switch(type) {
+ case 5: /* task gate */
+ cpu_abort(env, "task gate not supported");
+ break;
+ case 6: /* 286 interrupt gate */
+ case 7: /* 286 trap gate */
+ case 14: /* 386 interrupt gate */
+ case 15: /* 386 trap gate */
+ break;
+ default:
+ raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
+ break;
+ }
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ cpl = env->hflags & HF_CPL_MASK;
+ /* check privledge if software int */
+ if (is_int && dpl < cpl)
+ raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
+ /* check valid bit */
+ if (!(e2 & DESC_P_MASK))
+ raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
+ selector = e1 >> 16;
+ offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
+ if ((selector & 0xfffc) == 0)
+ raise_exception_err(EXCP0D_GPF, 0);
+
+ if (load_segment(&e1, &e2, selector) != 0)
+ raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
+ if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
+ raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (dpl > cpl)
+ raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
+ if (!(e2 & DESC_P_MASK))
+ raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
+ if (!(e2 & DESC_C_MASK) && dpl < cpl) {
+ /* to inner priviledge */
+ get_ss_esp_from_tss(&ss, &esp, dpl);
+ if ((ss & 0xfffc) == 0)
+ raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
+ if ((ss & 3) != dpl)
+ raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
+ if (load_segment(&ss_e1, &ss_e2, ss) != 0)
+ raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
+ ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
+ if (ss_dpl != dpl)
+ raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
+ if (!(ss_e2 & DESC_S_MASK) ||
+ (ss_e2 & DESC_CS_MASK) ||
+ !(ss_e2 & DESC_W_MASK))
+ raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
+ if (!(ss_e2 & DESC_P_MASK))
+ raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
+ new_stack = 1;
+ } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
+ /* to same priviledge */
+ new_stack = 0;
+ } else {
+ raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
+ new_stack = 0; /* avoid warning */
+ }
+
+ shift = type >> 3;
+ has_error_code = 0;
+ if (!is_int && !is_hw) {
+ switch(intno) {
+ case 8:
+ case 10:
+ case 11:
+ case 12:
+ case 13:
+ case 14:
+ case 17:
+ has_error_code = 1;
+ break;
+ }
+ }
+ push_size = 6 + (new_stack << 2) + (has_error_code << 1);
+ if (env->eflags & VM_MASK)
+ push_size += 8;
+ push_size <<= shift;
+
+ /* XXX: check that enough room is available */
+ if (new_stack) {
+ old_esp = ESP;
+ old_ss = env->segs[R_SS].selector;
+ ss = (ss & ~3) | dpl;
+ cpu_x86_load_seg_cache(env, R_SS, ss,
+ get_seg_base(ss_e1, ss_e2),
+ get_seg_limit(ss_e1, ss_e2),
+ ss_e2);
+ } else {
+ old_esp = 0;
+ old_ss = 0;
+ esp = ESP;
+ }
+ if (is_int)
+ old_eip = next_eip;
+ else
+ old_eip = env->eip;
+ old_cs = env->segs[R_CS].selector;
+ selector = (selector & ~3) | dpl;
+ cpu_x86_load_seg_cache(env, R_CS, selector,
+ get_seg_base(e1, e2),
+ get_seg_limit(e1, e2),
+ e2);
+ cpu_x86_set_cpl(env, dpl);
+ env->eip = offset;
+ ESP = esp - push_size;
+ ssp = env->segs[R_SS].base + esp;
+ if (shift == 1) {
+ int old_eflags;
+ if (env->eflags & VM_MASK) {
+ ssp -= 4;
+ stl(ssp, env->segs[R_GS].selector);
+ ssp -= 4;
+ stl(ssp, env->segs[R_FS].selector);
+ ssp -= 4;
+ stl(ssp, env->segs[R_DS].selector);
+ ssp -= 4;
+ stl(ssp, env->segs[R_ES].selector);
+ }
+ if (new_stack) {
+ ssp -= 4;
+ stl(ssp, old_ss);
+ ssp -= 4;
+ stl(ssp, old_esp);
+ }
+ ssp -= 4;
+ old_eflags = compute_eflags();
+ stl(ssp, old_eflags);
+ ssp -= 4;
+ stl(ssp, old_cs);
+ ssp -= 4;
+ stl(ssp, old_eip);
+ if (has_error_code) {
+ ssp -= 4;
+ stl(ssp, error_code);
+ }
+ } else {
+ if (new_stack) {
+ ssp -= 2;
+ stw(ssp, old_ss);
+ ssp -= 2;
+ stw(ssp, old_esp);
+ }
+ ssp -= 2;
+ stw(ssp, compute_eflags());
+ ssp -= 2;
+ stw(ssp, old_cs);
+ ssp -= 2;
+ stw(ssp, old_eip);
+ if (has_error_code) {
+ ssp -= 2;
+ stw(ssp, error_code);
+ }
+ }
+
+ /* interrupt gate clear IF mask */
+ if ((type & 1) == 0) {
+ env->eflags &= ~IF_MASK;
+ }
+ env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
+}
+
+/* real mode interrupt */
+static void do_interrupt_real(int intno, int is_int, int error_code,
+ unsigned int next_eip)
+{
+ SegmentCache *dt;
+ uint8_t *ptr, *ssp;
+ int selector;
+ uint32_t offset, esp;
+ uint32_t old_cs, old_eip;
+
+ /* real mode (simpler !) */
+ dt = &env->idt;
+ if (intno * 4 + 3 > dt->limit)
+ raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
+ ptr = dt->base + intno * 4;
+ offset = lduw(ptr);
+ selector = lduw(ptr + 2);
+ esp = ESP;
+ ssp = env->segs[R_SS].base;
+ if (is_int)
+ old_eip = next_eip;
+ else
+ old_eip = env->eip;
+ old_cs = env->segs[R_CS].selector;
+ esp -= 2;
+ stw(ssp + (esp & 0xffff), compute_eflags());
+ esp -= 2;
+ stw(ssp + (esp & 0xffff), old_cs);
+ esp -= 2;
+ stw(ssp + (esp & 0xffff), old_eip);
+
+ /* update processor state */
+ ESP = (ESP & ~0xffff) | (esp & 0xffff);
+ env->eip = offset;
+ env->segs[R_CS].selector = selector;
+ env->segs[R_CS].base = (uint8_t *)(selector << 4);
+ env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
+}
+
+/* fake user mode interrupt */
+void do_interrupt_user(int intno, int is_int, int error_code,
+ unsigned int next_eip)
+{
+ SegmentCache *dt;
+ uint8_t *ptr;
+ int dpl, cpl;
+ uint32_t e2;
+
+ dt = &env->idt;
+ ptr = dt->base + (intno * 8);
+ e2 = ldl(ptr + 4);
+
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ cpl = env->hflags & HF_CPL_MASK;
+ /* check privledge if software int */
+ if (is_int && dpl < cpl)
+ raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
+
+ /* Since we emulate only user space, we cannot do more than
+ exiting the emulation with the suitable exception and error
+ code */
+ if (is_int)
+ EIP = next_eip;
+}
+
+/*
+ * Begin excution of an interruption. is_int is TRUE if coming from
+ * the int instruction. next_eip is the EIP value AFTER the interrupt
+ * instruction. It is only relevant if is_int is TRUE.
+ */
+void do_interrupt(int intno, int is_int, int error_code,
+ unsigned int next_eip, int is_hw)
+{
+ if (env->cr[0] & CR0_PE_MASK) {
+ do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
+ } else {
+ do_interrupt_real(intno, is_int, error_code, next_eip);
+ }
+}
+
+/*
+ * Signal an interruption. It is executed in the main CPU loop.
+ * is_int is TRUE if coming from the int instruction. next_eip is the
+ * EIP value AFTER the interrupt instruction. It is only relevant if
+ * is_int is TRUE.
+ */
+void raise_interrupt(int intno, int is_int, int error_code,
+ unsigned int next_eip)
+{
+ env->exception_index = intno;
+ env->error_code = error_code;
+ env->exception_is_int = is_int;
+ env->exception_next_eip = next_eip;
+ cpu_loop_exit();
+}
+
+/* shortcuts to generate exceptions */
+void raise_exception_err(int exception_index, int error_code)
+{
+ raise_interrupt(exception_index, 0, error_code, 0);
+}
+
+void raise_exception(int exception_index)
+{
+ raise_interrupt(exception_index, 0, 0, 0);
+}
+
+#ifdef BUGGY_GCC_DIV64
+/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
+ call it from another function */
+uint32_t div64(uint32_t *q_ptr, uint64_t num, uint32_t den)
+{
+ *q_ptr = num / den;
+ return num % den;
+}
+
+int32_t idiv64(int32_t *q_ptr, int64_t num, int32_t den)
+{
+ *q_ptr = num / den;
+ return num % den;
+}
+#endif
+
+void helper_divl_EAX_T0(uint32_t eip)
+{
+ unsigned int den, q, r;
+ uint64_t num;
+
+ num = EAX | ((uint64_t)EDX << 32);
+ den = T0;
+ if (den == 0) {
+ EIP = eip;
+ raise_exception(EXCP00_DIVZ);
+ }
+#ifdef BUGGY_GCC_DIV64
+ r = div64(&q, num, den);
+#else
+ q = (num / den);
+ r = (num % den);
+#endif
+ EAX = q;
+ EDX = r;
+}
+
+void helper_idivl_EAX_T0(uint32_t eip)
+{
+ int den, q, r;
+ int64_t num;
+
+ num = EAX | ((uint64_t)EDX << 32);
+ den = T0;
+ if (den == 0) {
+ EIP = eip;
+ raise_exception(EXCP00_DIVZ);
+ }
+#ifdef BUGGY_GCC_DIV64
+ r = idiv64(&q, num, den);
+#else
+ q = (num / den);
+ r = (num % den);
+#endif
+ EAX = q;
+ EDX = r;
+}
+
+void helper_cmpxchg8b(void)
+{
+ uint64_t d;
+ int eflags;
+
+ eflags = cc_table[CC_OP].compute_all();
+ d = ldq((uint8_t *)A0);
+ if (d == (((uint64_t)EDX << 32) | EAX)) {
+ stq((uint8_t *)A0, ((uint64_t)ECX << 32) | EBX);
+ eflags |= CC_Z;
+ } else {
+ EDX = d >> 32;
+ EAX = d;
+ eflags &= ~CC_Z;
+ }
+ CC_SRC = eflags;
+}
+
+/* We simulate a pre-MMX pentium as in valgrind */
+#define CPUID_FP87 (1 << 0)
+#define CPUID_VME (1 << 1)
+#define CPUID_DE (1 << 2)
+#define CPUID_PSE (1 << 3)
+#define CPUID_TSC (1 << 4)
+#define CPUID_MSR (1 << 5)
+#define CPUID_PAE (1 << 6)
+#define CPUID_MCE (1 << 7)
+#define CPUID_CX8 (1 << 8)
+#define CPUID_APIC (1 << 9)
+#define CPUID_SEP (1 << 11) /* sysenter/sysexit */
+#define CPUID_MTRR (1 << 12)
+#define CPUID_PGE (1 << 13)
+#define CPUID_MCA (1 << 14)
+#define CPUID_CMOV (1 << 15)
+/* ... */
+#define CPUID_MMX (1 << 23)
+#define CPUID_FXSR (1 << 24)
+#define CPUID_SSE (1 << 25)
+#define CPUID_SSE2 (1 << 26)
+
+void helper_cpuid(void)
+{
+ if (EAX == 0) {
+ EAX = 1; /* max EAX index supported */
+ EBX = 0x756e6547;
+ ECX = 0x6c65746e;
+ EDX = 0x49656e69;
+ } else if (EAX == 1) {
+ int family, model, stepping;
+ /* EAX = 1 info */
+#if 0
+ /* pentium 75-200 */
+ family = 5;
+ model = 2;
+ stepping = 11;
+#else
+ /* pentium pro */
+ family = 6;
+ model = 1;
+ stepping = 3;
+#endif
+ EAX = (family << 8) | (model << 4) | stepping;
+ EBX = 0;
+ ECX = 0;
+ EDX = CPUID_FP87 | CPUID_DE | CPUID_PSE |
+ CPUID_TSC | CPUID_MSR | CPUID_MCE |
+ CPUID_CX8 | CPUID_PGE | CPUID_CMOV;
+ }
+}
+
+void helper_lldt_T0(void)
+{
+ int selector;
+ SegmentCache *dt;
+ uint32_t e1, e2;
+ int index;
+ uint8_t *ptr;
+
+ selector = T0 & 0xffff;
+ if ((selector & 0xfffc) == 0) {
+ /* XXX: NULL selector case: invalid LDT */
+ env->ldt.base = NULL;
+ env->ldt.limit = 0;
+ } else {
+ if (selector & 0x4)
+ raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
+ dt = &env->gdt;
+ index = selector & ~7;
+ if ((index + 7) > dt->limit)
+ raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
+ ptr = dt->base + index;
+ e1 = ldl(ptr);
+ e2 = ldl(ptr + 4);
+ if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
+ raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
+ if (!(e2 & DESC_P_MASK))
+ raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
+ load_seg_cache_raw_dt(&env->ldt, e1, e2);
+ }
+ env->ldt.selector = selector;
+}
+
+void helper_ltr_T0(void)
+{
+ int selector;
+ SegmentCache *dt;
+ uint32_t e1, e2;
+ int index, type;
+ uint8_t *ptr;
+
+ selector = T0 & 0xffff;
+ if ((selector & 0xfffc) == 0) {
+ /* NULL selector case: invalid LDT */
+ env->tr.base = NULL;
+ env->tr.limit = 0;
+ env->tr.flags = 0;
+ } else {
+ if (selector & 0x4)
+ raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
+ dt = &env->gdt;
+ index = selector & ~7;
+ if ((index + 7) > dt->limit)
+ raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
+ ptr = dt->base + index;
+ e1 = ldl(ptr);
+ e2 = ldl(ptr + 4);
+ type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
+ if ((e2 & DESC_S_MASK) ||
+ (type != 2 && type != 9))
+ raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
+ if (!(e2 & DESC_P_MASK))
+ raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
+ load_seg_cache_raw_dt(&env->tr, e1, e2);
+ e2 |= 0x00000200; /* set the busy bit */
+ stl(ptr + 4, e2);
+ }
+ env->tr.selector = selector;
+}
+
+/* only works if protected mode and not VM86. Calling load_seg with
+ seg_reg == R_CS is discouraged */
+void load_seg(int seg_reg, int selector, unsigned int cur_eip)
+{
+ uint32_t e1, e2;
+
+ if ((selector & 0xfffc) == 0) {
+ /* null selector case */
+ if (seg_reg == R_SS) {
+ EIP = cur_eip;
+ raise_exception_err(EXCP0D_GPF, 0);
+ } else {
+ cpu_x86_load_seg_cache(env, seg_reg, selector, NULL, 0, 0);
+ }
+ } else {
+ if (load_segment(&e1, &e2, selector) != 0) {
+ EIP = cur_eip;
+ raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
+ }
+ if (!(e2 & DESC_S_MASK) ||
+ (e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
+ EIP = cur_eip;
+ raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
+ }
+
+ if (seg_reg == R_SS) {
+ if ((e2 & (DESC_CS_MASK | DESC_W_MASK)) == 0) {
+ EIP = cur_eip;
+ raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
+ }
+ } else {
+ if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
+ EIP = cur_eip;
+ raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
+ }
+ }
+
+ if (!(e2 & DESC_P_MASK)) {
+ EIP = cur_eip;
+ if (seg_reg == R_SS)
+ raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
+ else
+ raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
+ }
+ cpu_x86_load_seg_cache(env, seg_reg, selector,
+ get_seg_base(e1, e2),
+ get_seg_limit(e1, e2),
+ e2);
+#if 0
+ fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
+ selector, (unsigned long)sc->base, sc->limit, sc->flags);
+#endif
+ }
+}
+
+/* protected mode jump */
+void helper_ljmp_protected_T0_T1(void)
+{
+ int new_cs, new_eip;
+ uint32_t e1, e2, cpl, dpl, rpl, limit;
+
+ new_cs = T0;
+ new_eip = T1;
+ if ((new_cs & 0xfffc) == 0)
+ raise_exception_err(EXCP0D_GPF, 0);
+ if (load_segment(&e1, &e2, new_cs) != 0)
+ raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
+ cpl = env->hflags & HF_CPL_MASK;
+ if (e2 & DESC_S_MASK) {
+ if (!(e2 & DESC_CS_MASK))
+ raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (e2 & DESC_CS_MASK) {
+ /* conforming code segment */
+ if (dpl > cpl)
+ raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
+ } else {
+ /* non conforming code segment */
+ rpl = new_cs & 3;
+ if (rpl > cpl)
+ raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
+ if (dpl != cpl)
+ raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
+ }
+ if (!(e2 & DESC_P_MASK))
+ raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
+ limit = get_seg_limit(e1, e2);
+ if (new_eip > limit)
+ raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
+ cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
+ get_seg_base(e1, e2), limit, e2);
+ EIP = new_eip;
+ } else {
+ cpu_abort(env, "jmp to call/task gate not supported 0x%04x:0x%08x",
+ new_cs, new_eip);
+ }
+}
+
+/* real mode call */
+void helper_lcall_real_T0_T1(int shift, int next_eip)
+{
+ int new_cs, new_eip;
+ uint32_t esp, esp_mask;
+ uint8_t *ssp;
+
+ new_cs = T0;
+ new_eip = T1;
+ esp = ESP;
+ esp_mask = 0xffffffff;
+ if (!(env->segs[R_SS].flags & DESC_B_MASK))
+ esp_mask = 0xffff;
+ ssp = env->segs[R_SS].base;
+ if (shift) {
+ esp -= 4;
+ stl(ssp + (esp & esp_mask), env->segs[R_CS].selector);
+ esp -= 4;
+ stl(ssp + (esp & esp_mask), next_eip);
+ } else {
+ esp -= 2;
+ stw(ssp + (esp & esp_mask), env->segs[R_CS].selector);
+ esp -= 2;
+ stw(ssp + (esp & esp_mask), next_eip);
+ }
+
+ if (!(env->segs[R_SS].flags & DESC_B_MASK))
+ ESP = (ESP & ~0xffff) | (esp & 0xffff);
+ else
+ ESP = esp;
+ env->eip = new_eip;
+ env->segs[R_CS].selector = new_cs;
+ env->segs[R_CS].base = (uint8_t *)(new_cs << 4);
+}
+
+/* protected mode call */
+void helper_lcall_protected_T0_T1(int shift, int next_eip)
+{
+ int new_cs, new_eip;
+ uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
+ uint32_t ss, ss_e1, ss_e2, push_size, sp, type, ss_dpl;
+ uint32_t old_ss, old_esp, val, i, limit;
+ uint8_t *ssp, *old_ssp;
+
+ new_cs = T0;
+ new_eip = T1;
+ if ((new_cs & 0xfffc) == 0)
+ raise_exception_err(EXCP0D_GPF, 0);
+ if (load_segment(&e1, &e2, new_cs) != 0)
+ raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
+ cpl = env->hflags & HF_CPL_MASK;
+ if (e2 & DESC_S_MASK) {
+ if (!(e2 & DESC_CS_MASK))
+ raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (e2 & DESC_CS_MASK) {
+ /* conforming code segment */
+ if (dpl > cpl)
+ raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
+ } else {
+ /* non conforming code segment */
+ rpl = new_cs & 3;
+ if (rpl > cpl)
+ raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
+ if (dpl != cpl)
+ raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
+ }
+ if (!(e2 & DESC_P_MASK))
+ raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
+
+ sp = ESP;
+ if (!(env->segs[R_SS].flags & DESC_B_MASK))
+ sp &= 0xffff;
+ ssp = env->segs[R_SS].base + sp;
+ if (shift) {
+ ssp -= 4;
+ stl(ssp, env->segs[R_CS].selector);
+ ssp -= 4;
+ stl(ssp, next_eip);
+ } else {
+ ssp -= 2;
+ stw(ssp, env->segs[R_CS].selector);
+ ssp -= 2;
+ stw(ssp, next_eip);
+ }
+ sp -= (4 << shift);
+
+ limit = get_seg_limit(e1, e2);
+ if (new_eip > limit)
+ raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
+ /* from this point, not restartable */
+ if (!(env->segs[R_SS].flags & DESC_B_MASK))
+ ESP = (ESP & 0xffff0000) | (sp & 0xffff);
+ else
+ ESP = sp;
+ cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
+ get_seg_base(e1, e2), limit, e2);
+ EIP = new_eip;
+ } else {
+ /* check gate type */
+ type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
+ switch(type) {
+ case 1: /* available 286 TSS */
+ case 9: /* available 386 TSS */
+ case 5: /* task gate */
+ cpu_abort(env, "task gate not supported");
+ break;
+ case 4: /* 286 call gate */
+ case 12: /* 386 call gate */
+ break;
+ default:
+ raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
+ break;
+ }
+ shift = type >> 3;
+
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ rpl = new_cs & 3;
+ if (dpl < cpl || dpl < rpl)
+ raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
+ /* check valid bit */
+ if (!(e2 & DESC_P_MASK))
+ raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
+ selector = e1 >> 16;
+ offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
+ if ((selector & 0xfffc) == 0)
+ raise_exception_err(EXCP0D_GPF, 0);
+
+ if (load_segment(&e1, &e2, selector) != 0)
+ raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
+ if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
+ raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (dpl > cpl)
+ raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
+ if (!(e2 & DESC_P_MASK))
+ raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
+
+ if (!(e2 & DESC_C_MASK) && dpl < cpl) {
+ /* to inner priviledge */
+ get_ss_esp_from_tss(&ss, &sp, dpl);
+ if ((ss & 0xfffc) == 0)
+ raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
+ if ((ss & 3) != dpl)
+ raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
+ if (load_segment(&ss_e1, &ss_e2, ss) != 0)
+ raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
+ ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
+ if (ss_dpl != dpl)
+ raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
+ if (!(ss_e2 & DESC_S_MASK) ||
+ (ss_e2 & DESC_CS_MASK) ||
+ !(ss_e2 & DESC_W_MASK))
+ raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
+ if (!(ss_e2 & DESC_P_MASK))
+ raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
+
+ param_count = e2 & 0x1f;
+ push_size = ((param_count * 2) + 8) << shift;
+
+ old_esp = ESP;
+ old_ss = env->segs[R_SS].selector;
+ if (!(env->segs[R_SS].flags & DESC_B_MASK))
+ old_esp &= 0xffff;
+ old_ssp = env->segs[R_SS].base + old_esp;
+
+ /* XXX: from this point not restartable */
+ ss = (ss & ~3) | dpl;
+ cpu_x86_load_seg_cache(env, R_SS, ss,
+ get_seg_base(ss_e1, ss_e2),
+ get_seg_limit(ss_e1, ss_e2),
+ ss_e2);
+
+ if (!(env->segs[R_SS].flags & DESC_B_MASK))
+ sp &= 0xffff;
+ ssp = env->segs[R_SS].base + sp;
+ if (shift) {
+ ssp -= 4;
+ stl(ssp, old_ss);
+ ssp -= 4;
+ stl(ssp, old_esp);
+ ssp -= 4 * param_count;
+ for(i = 0; i < param_count; i++) {
+ val = ldl(old_ssp + i * 4);
+ stl(ssp + i * 4, val);
+ }
+ } else {
+ ssp -= 2;
+ stw(ssp, old_ss);
+ ssp -= 2;
+ stw(ssp, old_esp);
+ ssp -= 2 * param_count;
+ for(i = 0; i < param_count; i++) {
+ val = lduw(old_ssp + i * 2);
+ stw(ssp + i * 2, val);
+ }
+ }
+ } else {
+ /* to same priviledge */
+ if (!(env->segs[R_SS].flags & DESC_B_MASK))
+ sp &= 0xffff;
+ ssp = env->segs[R_SS].base + sp;
+ push_size = (4 << shift);
+ }
+
+ if (shift) {
+ ssp -= 4;
+ stl(ssp, env->segs[R_CS].selector);
+ ssp -= 4;
+ stl(ssp, next_eip);
+ } else {
+ ssp -= 2;
+ stw(ssp, env->segs[R_CS].selector);
+ ssp -= 2;
+ stw(ssp, next_eip);
+ }
+
+ sp -= push_size;
+ selector = (selector & ~3) | dpl;
+ cpu_x86_load_seg_cache(env, R_CS, selector,
+ get_seg_base(e1, e2),
+ get_seg_limit(e1, e2),
+ e2);
+ cpu_x86_set_cpl(env, dpl);
+
+ /* from this point, not restartable if same priviledge */
+ if (!(env->segs[R_SS].flags & DESC_B_MASK))
+ ESP = (ESP & 0xffff0000) | (sp & 0xffff);
+ else
+ ESP = sp;
+ EIP = offset;
+ }
+}
+
+/* real mode iret */
+void helper_iret_real(int shift)
+{
+ uint32_t sp, new_cs, new_eip, new_eflags, new_esp;
+ uint8_t *ssp;
+ int eflags_mask;
+
+ sp = ESP & 0xffff;
+ ssp = env->segs[R_SS].base + sp;
+ if (shift == 1) {
+ /* 32 bits */
+ new_eflags = ldl(ssp + 8);
+ new_cs = ldl(ssp + 4) & 0xffff;
+ new_eip = ldl(ssp) & 0xffff;
+ } else {
+ /* 16 bits */
+ new_eflags = lduw(ssp + 4);
+ new_cs = lduw(ssp + 2);
+ new_eip = lduw(ssp);
+ }
+ new_esp = sp + (6 << shift);
+ ESP = (ESP & 0xffff0000) |
+ (new_esp & 0xffff);
+ load_seg_vm(R_CS, new_cs);
+ env->eip = new_eip;
+ eflags_mask = FL_UPDATE_CPL0_MASK;
+ if (shift == 0)
+ eflags_mask &= 0xffff;
+ load_eflags(new_eflags, eflags_mask);
+}
+
+/* protected mode iret */
+static inline void helper_ret_protected(int shift, int is_iret, int addend)
+{
+ uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss;
+ uint32_t new_es, new_ds, new_fs, new_gs;
+ uint32_t e1, e2, ss_e1, ss_e2;
+ int cpl, dpl, rpl, eflags_mask;
+ uint8_t *ssp;
+
+ sp = ESP;
+ if (!(env->segs[R_SS].flags & DESC_B_MASK))
+ sp &= 0xffff;
+ ssp = env->segs[R_SS].base + sp;
+ if (shift == 1) {
+ /* 32 bits */
+ if (is_iret)
+ new_eflags = ldl(ssp + 8);
+ new_cs = ldl(ssp + 4) & 0xffff;
+ new_eip = ldl(ssp);
+ if (is_iret && (new_eflags & VM_MASK))
+ goto return_to_vm86;
+ } else {
+ /* 16 bits */
+ if (is_iret)
+ new_eflags = lduw(ssp + 4);
+ new_cs = lduw(ssp + 2);
+ new_eip = lduw(ssp);
+ }
+ if ((new_cs & 0xfffc) == 0)
+ raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
+ if (load_segment(&e1, &e2, new_cs) != 0)
+ raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
+ if (!(e2 & DESC_S_MASK) ||
+ !(e2 & DESC_CS_MASK))
+ raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
+ cpl = env->hflags & HF_CPL_MASK;
+ rpl = new_cs & 3;
+ if (rpl < cpl)
+ raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (e2 & DESC_CS_MASK) {
+ if (dpl > rpl)
+ raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
+ } else {
+ if (dpl != rpl)
+ raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
+ }
+ if (!(e2 & DESC_P_MASK))
+ raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
+
+ if (rpl == cpl) {
+ /* return to same priledge level */
+ cpu_x86_load_seg_cache(env, R_CS, new_cs,
+ get_seg_base(e1, e2),
+ get_seg_limit(e1, e2),
+ e2);
+ new_esp = sp + (4 << shift) + ((2 * is_iret) << shift) + addend;
+ } else {
+ /* return to different priviledge level */
+ ssp += (4 << shift) + ((2 * is_iret) << shift) + addend;
+ if (shift == 1) {
+ /* 32 bits */
+ new_esp = ldl(ssp);
+ new_ss = ldl(ssp + 4) & 0xffff;
+ } else {
+ /* 16 bits */
+ new_esp = lduw(ssp);
+ new_ss = lduw(ssp + 2);
+ }
+
+ if ((new_ss & 3) != rpl)
+ raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
+ if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
+ raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
+ if (!(ss_e2 & DESC_S_MASK) ||
+ (ss_e2 & DESC_CS_MASK) ||
+ !(ss_e2 & DESC_W_MASK))
+ raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
+ dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
+ if (dpl != rpl)
+ raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
+ if (!(ss_e2 & DESC_P_MASK))
+ raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
+
+ cpu_x86_load_seg_cache(env, R_CS, new_cs,
+ get_seg_base(e1, e2),
+ get_seg_limit(e1, e2),
+ e2);
+ cpu_x86_load_seg_cache(env, R_SS, new_ss,
+ get_seg_base(ss_e1, ss_e2),
+ get_seg_limit(ss_e1, ss_e2),
+ ss_e2);
+ cpu_x86_set_cpl(env, rpl);
+ }
+ if (env->segs[R_SS].flags & DESC_B_MASK)
+ ESP = new_esp;
+ else
+ ESP = (ESP & 0xffff0000) |
+ (new_esp & 0xffff);
+ env->eip = new_eip;
+ if (is_iret) {
+ /* NOTE: 'cpl' can be different from the current CPL */
+ if (cpl == 0)
+ eflags_mask = FL_UPDATE_CPL0_MASK;
+ else
+ eflags_mask = FL_UPDATE_MASK32;
+ if (shift == 0)
+ eflags_mask &= 0xffff;
+ load_eflags(new_eflags, eflags_mask);
+ }
+ return;
+
+ return_to_vm86:
+ new_esp = ldl(ssp + 12);
+ new_ss = ldl(ssp + 16);
+ new_es = ldl(ssp + 20);
+ new_ds = ldl(ssp + 24);
+ new_fs = ldl(ssp + 28);
+ new_gs = ldl(ssp + 32);
+
+ /* modify processor state */
+ load_eflags(new_eflags, FL_UPDATE_CPL0_MASK | VM_MASK | VIF_MASK | VIP_MASK);
+ load_seg_vm(R_CS, new_cs);
+ cpu_x86_set_cpl(env, 3);
+ load_seg_vm(R_SS, new_ss);
+ load_seg_vm(R_ES, new_es);
+ load_seg_vm(R_DS, new_ds);
+ load_seg_vm(R_FS, new_fs);
+ load_seg_vm(R_GS, new_gs);
+
+ env->eip = new_eip;
+ ESP = new_esp;
+}
+
+void helper_iret_protected(int shift)
+{
+ helper_ret_protected(shift, 1, 0);
+}
+
+void helper_lret_protected(int shift, int addend)
+{
+ helper_ret_protected(shift, 0, addend);
+}
+
+void helper_movl_crN_T0(int reg)
+{
+ env->cr[reg] = T0;
+ switch(reg) {
+ case 0:
+ cpu_x86_update_cr0(env);
+ break;
+ case 3:
+ cpu_x86_update_cr3(env);
+ break;
+ }
+}
+
+/* XXX: do more */
+void helper_movl_drN_T0(int reg)
+{
+ env->dr[reg] = T0;
+}
+
+void helper_invlpg(unsigned int addr)
+{
+ cpu_x86_flush_tlb(env, addr);
+}
+
+/* rdtsc */
+#ifndef __i386__
+uint64_t emu_time;
+#endif
+
+void helper_rdtsc(void)
+{
+ uint64_t val;
+#ifdef __i386__
+ asm("rdtsc" : "=A" (val));
+#else
+ /* better than nothing: the time increases */
+ val = emu_time++;
+#endif
+ EAX = val;
+ EDX = val >> 32;
+}
+
+void helper_wrmsr(void)
+{
+ switch(ECX) {
+ case MSR_IA32_SYSENTER_CS:
+ env->sysenter_cs = EAX & 0xffff;
+ break;
+ case MSR_IA32_SYSENTER_ESP:
+ env->sysenter_esp = EAX;
+ break;
+ case MSR_IA32_SYSENTER_EIP:
+ env->sysenter_eip = EAX;
+ break;
+ default:
+ /* XXX: exception ? */
+ break;
+ }
+}
+
+void helper_rdmsr(void)
+{
+ switch(ECX) {
+ case MSR_IA32_SYSENTER_CS:
+ EAX = env->sysenter_cs;
+ EDX = 0;
+ break;
+ case MSR_IA32_SYSENTER_ESP:
+ EAX = env->sysenter_esp;
+ EDX = 0;
+ break;
+ case MSR_IA32_SYSENTER_EIP:
+ EAX = env->sysenter_eip;
+ EDX = 0;
+ break;
+ default:
+ /* XXX: exception ? */
+ break;
+ }
+}
+
+void helper_lsl(void)
+{
+ unsigned int selector, limit;
+ uint32_t e1, e2;
+
+ CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
+ selector = T0 & 0xffff;
+ if (load_segment(&e1, &e2, selector) != 0)
+ return;
+ limit = (e1 & 0xffff) | (e2 & 0x000f0000);
+ if (e2 & (1 << 23))
+ limit = (limit << 12) | 0xfff;
+ T1 = limit;
+ CC_SRC |= CC_Z;
+}
+
+void helper_lar(void)
+{
+ unsigned int selector;
+ uint32_t e1, e2;
+
+ CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
+ selector = T0 & 0xffff;
+ if (load_segment(&e1, &e2, selector) != 0)
+ return;
+ T1 = e2 & 0x00f0ff00;
+ CC_SRC |= CC_Z;
+}
+
+/* FPU helpers */
+
+#ifndef USE_X86LDOUBLE
+void helper_fldt_ST0_A0(void)
+{
+ int new_fpstt;
+ new_fpstt = (env->fpstt - 1) & 7;
+ env->fpregs[new_fpstt] = helper_fldt((uint8_t *)A0);
+ env->fpstt = new_fpstt;
+ env->fptags[new_fpstt] = 0; /* validate stack entry */
+}
+
+void helper_fstt_ST0_A0(void)
+{
+ helper_fstt(ST0, (uint8_t *)A0);
+}
+#endif
+
+/* BCD ops */
+
+#define MUL10(iv) ( iv + iv + (iv << 3) )
+
+void helper_fbld_ST0_A0(void)
+{
+ CPU86_LDouble tmp;
+ uint64_t val;
+ unsigned int v;
+ int i;
+
+ val = 0;
+ for(i = 8; i >= 0; i--) {
+ v = ldub((uint8_t *)A0 + i);
+ val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
+ }
+ tmp = val;
+ if (ldub((uint8_t *)A0 + 9) & 0x80)
+ tmp = -tmp;
+ fpush();
+ ST0 = tmp;
+}
+
+void helper_fbst_ST0_A0(void)
+{
+ CPU86_LDouble tmp;
+ int v;
+ uint8_t *mem_ref, *mem_end;
+ int64_t val;
+
+ tmp = rint(ST0);
+ val = (int64_t)tmp;
+ mem_ref = (uint8_t *)A0;
+ mem_end = mem_ref + 9;
+ if (val < 0) {
+ stb(mem_end, 0x80);
+ val = -val;
+ } else {
+ stb(mem_end, 0x00);
+ }
+ while (mem_ref < mem_end) {
+ if (val == 0)
+ break;
+ v = val % 100;
+ val = val / 100;
+ v = ((v / 10) << 4) | (v % 10);
+ stb(mem_ref++, v);
+ }
+ while (mem_ref < mem_end) {
+ stb(mem_ref++, 0);
+ }
+}
+
+void helper_f2xm1(void)
+{
+ ST0 = pow(2.0,ST0) - 1.0;
+}
+
+void helper_fyl2x(void)
+{
+ CPU86_LDouble fptemp;
+
+ fptemp = ST0;
+ if (fptemp>0.0){
+ fptemp = log(fptemp)/log(2.0); /* log2(ST) */
+ ST1 *= fptemp;
+ fpop();
+ } else {
+ env->fpus &= (~0x4700);
+ env->fpus |= 0x400;
+ }
+}
+
+void helper_fptan(void)
+{
+ CPU86_LDouble fptemp;
+
+ fptemp = ST0;
+ if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
+ env->fpus |= 0x400;
+ } else {
+ ST0 = tan(fptemp);
+ fpush();
+ ST0 = 1.0;
+ env->fpus &= (~0x400); /* C2 <-- 0 */
+ /* the above code is for |arg| < 2**52 only */
+ }
+}
+
+void helper_fpatan(void)
+{
+ CPU86_LDouble fptemp, fpsrcop;
+
+ fpsrcop = ST1;
+ fptemp = ST0;
+ ST1 = atan2(fpsrcop,fptemp);
+ fpop();
+}
+
+void helper_fxtract(void)
+{
+ CPU86_LDoubleU temp;
+ unsigned int expdif;
+
+ temp.d = ST0;
+ expdif = EXPD(temp) - EXPBIAS;
+ /*DP exponent bias*/
+ ST0 = expdif;
+ fpush();
+ BIASEXPONENT(temp);
+ ST0 = temp.d;
+}
+
+void helper_fprem1(void)
+{
+ CPU86_LDouble dblq, fpsrcop, fptemp;
+ CPU86_LDoubleU fpsrcop1, fptemp1;
+ int expdif;
+ int q;
+
+ fpsrcop = ST0;
+ fptemp = ST1;
+ fpsrcop1.d = fpsrcop;
+ fptemp1.d = fptemp;
+ expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
+ if (expdif < 53) {
+ dblq = fpsrcop / fptemp;
+ dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
+ ST0 = fpsrcop - fptemp*dblq;
+ q = (int)dblq; /* cutting off top bits is assumed here */
+ env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
+ /* (C0,C1,C3) <-- (q2,q1,q0) */
+ env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
+ env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
+ env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
+ } else {
+ env->fpus |= 0x400; /* C2 <-- 1 */
+ fptemp = pow(2.0, expdif-50);
+ fpsrcop = (ST0 / ST1) / fptemp;
+ /* fpsrcop = integer obtained by rounding to the nearest */
+ fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
+ floor(fpsrcop): ceil(fpsrcop);
+ ST0 -= (ST1 * fpsrcop * fptemp);
+ }
+}
+
+void helper_fprem(void)
+{
+ CPU86_LDouble dblq, fpsrcop, fptemp;
+ CPU86_LDoubleU fpsrcop1, fptemp1;
+ int expdif;
+ int q;
+
+ fpsrcop = ST0;
+ fptemp = ST1;
+ fpsrcop1.d = fpsrcop;
+ fptemp1.d = fptemp;
+ expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
+ if ( expdif < 53 ) {
+ dblq = fpsrcop / fptemp;
+ dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
+ ST0 = fpsrcop - fptemp*dblq;
+ q = (int)dblq; /* cutting off top bits is assumed here */
+ env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
+ /* (C0,C1,C3) <-- (q2,q1,q0) */
+ env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
+ env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
+ env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
+ } else {
+ env->fpus |= 0x400; /* C2 <-- 1 */
+ fptemp = pow(2.0, expdif-50);
+ fpsrcop = (ST0 / ST1) / fptemp;
+ /* fpsrcop = integer obtained by chopping */
+ fpsrcop = (fpsrcop < 0.0)?
+ -(floor(fabs(fpsrcop))): floor(fpsrcop);
+ ST0 -= (ST1 * fpsrcop * fptemp);
+ }
+}
+
+void helper_fyl2xp1(void)
+{
+ CPU86_LDouble fptemp;
+
+ fptemp = ST0;
+ if ((fptemp+1.0)>0.0) {
+ fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
+ ST1 *= fptemp;
+ fpop();
+ } else {
+ env->fpus &= (~0x4700);
+ env->fpus |= 0x400;
+ }
+}
+
+void helper_fsqrt(void)
+{
+ CPU86_LDouble fptemp;
+
+ fptemp = ST0;
+ if (fptemp<0.0) {
+ env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
+ env->fpus |= 0x400;
+ }
+ ST0 = sqrt(fptemp);
+}
+
+void helper_fsincos(void)
+{
+ CPU86_LDouble fptemp;
+
+ fptemp = ST0;
+ if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
+ env->fpus |= 0x400;
+ } else {
+ ST0 = sin(fptemp);
+ fpush();
+ ST0 = cos(fptemp);
+ env->fpus &= (~0x400); /* C2 <-- 0 */
+ /* the above code is for |arg| < 2**63 only */
+ }
+}
+
+void helper_frndint(void)
+{
+ CPU86_LDouble a;
+
+ a = ST0;
+#ifdef __arm__
+ switch(env->fpuc & RC_MASK) {
+ default:
+ case RC_NEAR:
+ asm("rndd %0, %1" : "=f" (a) : "f"(a));
+ break;
+ case RC_DOWN:
+ asm("rnddm %0, %1" : "=f" (a) : "f"(a));
+ break;
+ case RC_UP:
+ asm("rnddp %0, %1" : "=f" (a) : "f"(a));
+ break;
+ case RC_CHOP:
+ asm("rnddz %0, %1" : "=f" (a) : "f"(a));
+ break;
+ }
+#else
+ a = rint(a);
+#endif
+ ST0 = a;
+}
+
+void helper_fscale(void)
+{
+ CPU86_LDouble fpsrcop, fptemp;
+
+ fpsrcop = 2.0;
+ fptemp = pow(fpsrcop,ST1);
+ ST0 *= fptemp;
+}
+
+void helper_fsin(void)
+{
+ CPU86_LDouble fptemp;
+
+ fptemp = ST0;
+ if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
+ env->fpus |= 0x400;
+ } else {
+ ST0 = sin(fptemp);
+ env->fpus &= (~0x400); /* C2 <-- 0 */
+ /* the above code is for |arg| < 2**53 only */
+ }
+}
+
+void helper_fcos(void)
+{
+ CPU86_LDouble fptemp;
+
+ fptemp = ST0;
+ if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
+ env->fpus |= 0x400;
+ } else {
+ ST0 = cos(fptemp);
+ env->fpus &= (~0x400); /* C2 <-- 0 */
+ /* the above code is for |arg5 < 2**63 only */
+ }
+}
+
+void helper_fxam_ST0(void)
+{
+ CPU86_LDoubleU temp;
+ int expdif;
+
+ temp.d = ST0;
+
+ env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
+ if (SIGND(temp))
+ env->fpus |= 0x200; /* C1 <-- 1 */
+
+ expdif = EXPD(temp);
+ if (expdif == MAXEXPD) {
+ if (MANTD(temp) == 0)
+ env->fpus |= 0x500 /*Infinity*/;
+ else
+ env->fpus |= 0x100 /*NaN*/;
+ } else if (expdif == 0) {
+ if (MANTD(temp) == 0)
+ env->fpus |= 0x4000 /*Zero*/;
+ else
+ env->fpus |= 0x4400 /*Denormal*/;
+ } else {
+ env->fpus |= 0x400;
+ }
+}
+
+void helper_fstenv(uint8_t *ptr, int data32)
+{
+ int fpus, fptag, exp, i;
+ uint64_t mant;
+ CPU86_LDoubleU tmp;
+
+ fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
+ fptag = 0;
+ for (i=7; i>=0; i--) {
+ fptag <<= 2;
+ if (env->fptags[i]) {
+ fptag |= 3;
+ } else {
+ tmp.d = env->fpregs[i];
+ exp = EXPD(tmp);
+ mant = MANTD(tmp);
+ if (exp == 0 && mant == 0) {
+ /* zero */
+ fptag |= 1;
+ } else if (exp == 0 || exp == MAXEXPD
+#ifdef USE_X86LDOUBLE
+ || (mant & (1LL << 63)) == 0
+#endif
+ ) {
+ /* NaNs, infinity, denormal */
+ fptag |= 2;
+ }
+ }
+ }
+ if (data32) {
+ /* 32 bit */
+ stl(ptr, env->fpuc);
+ stl(ptr + 4, fpus);
+ stl(ptr + 8, fptag);
+ stl(ptr + 12, 0);
+ stl(ptr + 16, 0);
+ stl(ptr + 20, 0);
+ stl(ptr + 24, 0);
+ } else {
+ /* 16 bit */
+ stw(ptr, env->fpuc);
+ stw(ptr + 2, fpus);
+ stw(ptr + 4, fptag);
+ stw(ptr + 6, 0);
+ stw(ptr + 8, 0);
+ stw(ptr + 10, 0);
+ stw(ptr + 12, 0);
+ }
+}
+
+void helper_fldenv(uint8_t *ptr, int data32)
+{
+ int i, fpus, fptag;
+
+ if (data32) {
+ env->fpuc = lduw(ptr);
+ fpus = lduw(ptr + 4);
+ fptag = lduw(ptr + 8);
+ }
+ else {
+ env->fpuc = lduw(ptr);
+ fpus = lduw(ptr + 2);
+ fptag = lduw(ptr + 4);
+ }
+ env->fpstt = (fpus >> 11) & 7;
+ env->fpus = fpus & ~0x3800;
+ for(i = 0;i < 7; i++) {
+ env->fptags[i] = ((fptag & 3) == 3);
+ fptag >>= 2;
+ }
+}
+
+void helper_fsave(uint8_t *ptr, int data32)
+{
+ CPU86_LDouble tmp;
+ int i;
+
+ helper_fstenv(ptr, data32);
+
+ ptr += (14 << data32);
+ for(i = 0;i < 8; i++) {
+ tmp = ST(i);
+#ifdef USE_X86LDOUBLE
+ *(long double *)ptr = tmp;
+#else
+ helper_fstt(tmp, ptr);
+#endif
+ ptr += 10;
+ }
+
+ /* fninit */
+ env->fpus = 0;
+ env->fpstt = 0;
+ env->fpuc = 0x37f;
+ env->fptags[0] = 1;
+ env->fptags[1] = 1;
+ env->fptags[2] = 1;
+ env->fptags[3] = 1;
+ env->fptags[4] = 1;
+ env->fptags[5] = 1;
+ env->fptags[6] = 1;
+ env->fptags[7] = 1;
+}
+
+void helper_frstor(uint8_t *ptr, int data32)
+{
+ CPU86_LDouble tmp;
+ int i;
+
+ helper_fldenv(ptr, data32);
+ ptr += (14 << data32);
+
+ for(i = 0;i < 8; i++) {
+#ifdef USE_X86LDOUBLE
+ tmp = *(long double *)ptr;
+#else
+ tmp = helper_fldt(ptr);
+#endif
+ ST(i) = tmp;
+ ptr += 10;
+ }
+}
+
+#define SHIFT 0
+#include "softmmu_template.h"
+
+#define SHIFT 1
+#include "softmmu_template.h"
+
+#define SHIFT 2
+#include "softmmu_template.h"
+
+#define SHIFT 3
+#include "softmmu_template.h"
+
+/* try to fill the TLB and return an exception if error */
+void tlb_fill(unsigned long addr, int is_write, void *retaddr)
+{
+ TranslationBlock *tb;
+ int ret;
+ unsigned long pc;
+ ret = cpu_x86_handle_mmu_fault(env, addr, is_write);
+ if (ret) {
+ /* now we have a real cpu fault */
+ pc = (unsigned long)retaddr;
+ tb = tb_find_pc(pc);
+ if (tb) {
+ /* the PC is inside the translated code. It means that we have
+ a virtual CPU fault */
+ cpu_restore_state(tb, env, pc);
+ }
+ raise_exception_err(EXCP0E_PAGE, env->error_code);
+ }
+}
--- /dev/null
+/*
+ * i386 helpers (without register variable usage)
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <assert.h>
+#include <sys/mman.h>
+
+#include "cpu.h"
+#include "exec-all.h"
+
+//#define DEBUG_MMU
+
+CPUX86State *cpu_x86_init(void)
+{
+ CPUX86State *env;
+ int i;
+ static int inited;
+
+ cpu_exec_init();
+
+ env = malloc(sizeof(CPUX86State));
+ if (!env)
+ return NULL;
+ memset(env, 0, sizeof(CPUX86State));
+ /* basic FPU init */
+ for(i = 0;i < 8; i++)
+ env->fptags[i] = 1;
+ env->fpuc = 0x37f;
+ /* flags setup : we activate the IRQs by default as in user mode */
+ env->eflags = 0x2 | IF_MASK;
+
+ tlb_flush(env);
+#ifdef CONFIG_SOFTMMU
+ env->hflags |= HF_SOFTMMU_MASK;
+#endif
+ /* init various static tables */
+ if (!inited) {
+ inited = 1;
+ optimize_flags_init();
+ }
+ return env;
+}
+
+void cpu_x86_close(CPUX86State *env)
+{
+ free(env);
+}
+
+/***********************************************************/
+/* x86 debug */
+
+static const char *cc_op_str[] = {
+ "DYNAMIC",
+ "EFLAGS",
+ "MUL",
+ "ADDB",
+ "ADDW",
+ "ADDL",
+ "ADCB",
+ "ADCW",
+ "ADCL",
+ "SUBB",
+ "SUBW",
+ "SUBL",
+ "SBBB",
+ "SBBW",
+ "SBBL",
+ "LOGICB",
+ "LOGICW",
+ "LOGICL",
+ "INCB",
+ "INCW",
+ "INCL",
+ "DECB",
+ "DECW",
+ "DECL",
+ "SHLB",
+ "SHLW",
+ "SHLL",
+ "SARB",
+ "SARW",
+ "SARL",
+};
+
+void cpu_x86_dump_state(CPUX86State *env, FILE *f, int flags)
+{
+ int eflags;
+ char cc_op_name[32];
+
+ eflags = env->eflags;
+ fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
+ "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
+ "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c]\n",
+ env->regs[R_EAX], env->regs[R_EBX], env->regs[R_ECX], env->regs[R_EDX],
+ env->regs[R_ESI], env->regs[R_EDI], env->regs[R_EBP], env->regs[R_ESP],
+ env->eip, eflags,
+ eflags & DF_MASK ? 'D' : '-',
+ eflags & CC_O ? 'O' : '-',
+ eflags & CC_S ? 'S' : '-',
+ eflags & CC_Z ? 'Z' : '-',
+ eflags & CC_A ? 'A' : '-',
+ eflags & CC_P ? 'P' : '-',
+ eflags & CC_C ? 'C' : '-');
+ fprintf(f, "CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x\n",
+ env->segs[R_CS].selector,
+ env->segs[R_SS].selector,
+ env->segs[R_DS].selector,
+ env->segs[R_ES].selector,
+ env->segs[R_FS].selector,
+ env->segs[R_GS].selector);
+ if (flags & X86_DUMP_CCOP) {
+ if ((unsigned)env->cc_op < CC_OP_NB)
+ strcpy(cc_op_name, cc_op_str[env->cc_op]);
+ else
+ snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
+ fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
+ env->cc_src, env->cc_dst, cc_op_name);
+ }
+ if (flags & X86_DUMP_FPU) {
+ fprintf(f, "ST0=%f ST1=%f ST2=%f ST3=%f\n",
+ (double)env->fpregs[0],
+ (double)env->fpregs[1],
+ (double)env->fpregs[2],
+ (double)env->fpregs[3]);
+ fprintf(f, "ST4=%f ST5=%f ST6=%f ST7=%f\n",
+ (double)env->fpregs[4],
+ (double)env->fpregs[5],
+ (double)env->fpregs[7],
+ (double)env->fpregs[8]);
+ }
+}
+
+/***********************************************************/
+/* x86 mmu */
+/* XXX: add PGE support */
+
+/* called when cr3 or PG bit are modified */
+static int last_pg_state = -1;
+static int last_pe_state = 0;
+int phys_ram_size;
+int phys_ram_fd;
+uint8_t *phys_ram_base;
+
+void cpu_x86_update_cr0(CPUX86State *env)
+{
+ int pg_state, pe_state;
+
+#ifdef DEBUG_MMU
+ printf("CR0 update: CR0=0x%08x\n", env->cr[0]);
+#endif
+ pg_state = env->cr[0] & CR0_PG_MASK;
+ if (pg_state != last_pg_state) {
+ page_unmap();
+ tlb_flush(env);
+ last_pg_state = pg_state;
+ }
+ pe_state = env->cr[0] & CR0_PE_MASK;
+ if (last_pe_state != pe_state) {
+ tb_flush();
+ last_pe_state = pe_state;
+ }
+}
+
+void cpu_x86_update_cr3(CPUX86State *env)
+{
+ if (env->cr[0] & CR0_PG_MASK) {
+#if defined(DEBUG_MMU)
+ printf("CR3 update: CR3=%08x\n", env->cr[3]);
+#endif
+ page_unmap();
+ tlb_flush(env);
+ }
+}
+
+void cpu_x86_init_mmu(CPUX86State *env)
+{
+ last_pg_state = -1;
+ cpu_x86_update_cr0(env);
+}
+
+/* XXX: also flush 4MB pages */
+void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr)
+{
+ int flags;
+ unsigned long virt_addr;
+
+ tlb_flush_page(env, addr);
+
+ flags = page_get_flags(addr);
+ if (flags & PAGE_VALID) {
+ virt_addr = addr & ~0xfff;
+ munmap((void *)virt_addr, 4096);
+ page_set_flags(virt_addr, virt_addr + 4096, 0);
+ }
+}
+
+/* return value:
+ -1 = cannot handle fault
+ 0 = nothing more to do
+ 1 = generate PF fault
+ 2 = soft MMU activation required for this block
+*/
+int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write)
+{
+ uint8_t *pde_ptr, *pte_ptr;
+ uint32_t pde, pte, virt_addr;
+ int cpl, error_code, is_dirty, is_user, prot, page_size, ret;
+ unsigned long pd;
+
+ cpl = env->hflags & HF_CPL_MASK;
+ is_user = (cpl == 3);
+
+#ifdef DEBUG_MMU
+ printf("MMU fault: addr=0x%08x w=%d u=%d eip=%08x\n",
+ addr, is_write, is_user, env->eip);
+#endif
+
+ if (env->user_mode_only) {
+ /* user mode only emulation */
+ error_code = 0;
+ goto do_fault;
+ }
+
+ if (!(env->cr[0] & CR0_PG_MASK)) {
+ pte = addr;
+ virt_addr = addr & ~0xfff;
+ prot = PROT_READ | PROT_WRITE;
+ page_size = 4096;
+ goto do_mapping;
+ }
+
+ /* page directory entry */
+ pde_ptr = phys_ram_base + ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3));
+ pde = ldl(pde_ptr);
+ if (!(pde & PG_PRESENT_MASK)) {
+ error_code = 0;
+ goto do_fault;
+ }
+ if (is_user) {
+ if (!(pde & PG_USER_MASK))
+ goto do_fault_protect;
+ if (is_write && !(pde & PG_RW_MASK))
+ goto do_fault_protect;
+ } else {
+ if ((env->cr[0] & CR0_WP_MASK) && (pde & PG_USER_MASK) &&
+ is_write && !(pde & PG_RW_MASK))
+ goto do_fault_protect;
+ }
+ /* if PSE bit is set, then we use a 4MB page */
+ if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
+ is_dirty = is_write && !(pde & PG_DIRTY_MASK);
+ if (!(pde & PG_ACCESSED_MASK)) {
+ pde |= PG_ACCESSED_MASK;
+ if (is_dirty)
+ pde |= PG_DIRTY_MASK;
+ stl(pde_ptr, pde);
+ }
+
+ pte = pde & ~0x003ff000; /* align to 4MB */
+ page_size = 4096 * 1024;
+ virt_addr = addr & ~0x003fffff;
+ } else {
+ if (!(pde & PG_ACCESSED_MASK)) {
+ pde |= PG_ACCESSED_MASK;
+ stl(pde_ptr, pde);
+ }
+
+ /* page directory entry */
+ pte_ptr = phys_ram_base + ((pde & ~0xfff) + ((addr >> 10) & 0xffc));
+ pte = ldl(pte_ptr);
+ if (!(pte & PG_PRESENT_MASK)) {
+ error_code = 0;
+ goto do_fault;
+ }
+ if (is_user) {
+ if (!(pte & PG_USER_MASK))
+ goto do_fault_protect;
+ if (is_write && !(pte & PG_RW_MASK))
+ goto do_fault_protect;
+ } else {
+ if ((env->cr[0] & CR0_WP_MASK) && (pte & PG_USER_MASK) &&
+ is_write && !(pte & PG_RW_MASK))
+ goto do_fault_protect;
+ }
+ is_dirty = is_write && !(pte & PG_DIRTY_MASK);
+ if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
+ pte |= PG_ACCESSED_MASK;
+ if (is_dirty)
+ pte |= PG_DIRTY_MASK;
+ stl(pte_ptr, pte);
+ }
+ page_size = 4096;
+ virt_addr = addr & ~0xfff;
+ }
+ /* the page can be put in the TLB */
+ prot = PROT_READ;
+ if (is_user) {
+ if (pte & PG_RW_MASK)
+ prot |= PROT_WRITE;
+ } else {
+ if (!(env->cr[0] & CR0_WP_MASK) || !(pte & PG_USER_MASK) ||
+ (pte & PG_RW_MASK))
+ prot |= PROT_WRITE;
+ }
+
+ do_mapping:
+ if (env->hflags & HF_SOFTMMU_MASK) {
+ unsigned long paddr, vaddr, address, addend, page_offset;
+ int index;
+
+ /* software MMU case. Even if 4MB pages, we map only one 4KB
+ page in the cache to avoid filling it too fast */
+ page_offset = (addr & ~0xfff) & (page_size - 1);
+ paddr = (pte & ~0xfff) + page_offset;
+ vaddr = virt_addr + page_offset;
+ index = (addr >> 12) & (CPU_TLB_SIZE - 1);
+ pd = physpage_find(paddr);
+ if (pd & 0xfff) {
+ /* IO memory case */
+ address = vaddr | pd;
+ addend = paddr;
+ } else {
+ /* standard memory */
+ address = vaddr;
+ addend = (unsigned long)phys_ram_base + pd;
+ }
+ addend -= vaddr;
+ env->tlb_read[is_user][index].address = address;
+ env->tlb_read[is_user][index].addend = addend;
+ if (prot & PROT_WRITE) {
+ env->tlb_write[is_user][index].address = address;
+ env->tlb_write[is_user][index].addend = addend;
+ }
+ }
+ ret = 0;
+ /* XXX: incorrect for 4MB pages */
+ pd = physpage_find(pte & ~0xfff);
+ if ((pd & 0xfff) != 0) {
+ /* IO access: no mapping is done as it will be handled by the
+ soft MMU */
+ if (!(env->hflags & HF_SOFTMMU_MASK))
+ ret = 2;
+ } else {
+ void *map_addr;
+ map_addr = mmap((void *)virt_addr, page_size, prot,
+ MAP_SHARED | MAP_FIXED, phys_ram_fd, pd);
+ if (map_addr == MAP_FAILED) {
+ fprintf(stderr,
+ "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
+ pte & ~0xfff, virt_addr);
+ exit(1);
+ }
+#ifdef DEBUG_MMU
+ printf("mmaping 0x%08x to virt 0x%08x pse=%d\n",
+ pte & ~0xfff, virt_addr, (page_size != 4096));
+#endif
+ page_set_flags(virt_addr, virt_addr + page_size,
+ PAGE_VALID | PAGE_EXEC | prot);
+ }
+ return ret;
+ do_fault_protect:
+ error_code = PG_ERROR_P_MASK;
+ do_fault:
+ env->cr[2] = addr;
+ env->error_code = (is_write << PG_ERROR_W_BIT) | error_code;
+ if (is_user)
+ env->error_code |= PG_ERROR_U_MASK;
+ return 1;
+}
--- /dev/null
+/*
+ * i386 micro operations
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include "exec.h"
+
+/* n must be a constant to be efficient */
+static inline int lshift(int x, int n)
+{
+ if (n >= 0)
+ return x << n;
+ else
+ return x >> (-n);
+}
+
+/* we define the various pieces of code used by the JIT */
+
+#define REG EAX
+#define REGNAME _EAX
+#include "opreg_template.h"
+#undef REG
+#undef REGNAME
+
+#define REG ECX
+#define REGNAME _ECX
+#include "opreg_template.h"
+#undef REG
+#undef REGNAME
+
+#define REG EDX
+#define REGNAME _EDX
+#include "opreg_template.h"
+#undef REG
+#undef REGNAME
+
+#define REG EBX
+#define REGNAME _EBX
+#include "opreg_template.h"
+#undef REG
+#undef REGNAME
+
+#define REG ESP
+#define REGNAME _ESP
+#include "opreg_template.h"
+#undef REG
+#undef REGNAME
+
+#define REG EBP
+#define REGNAME _EBP
+#include "opreg_template.h"
+#undef REG
+#undef REGNAME
+
+#define REG ESI
+#define REGNAME _ESI
+#include "opreg_template.h"
+#undef REG
+#undef REGNAME
+
+#define REG EDI
+#define REGNAME _EDI
+#include "opreg_template.h"
+#undef REG
+#undef REGNAME
+
+/* operations with flags */
+
+/* update flags with T0 and T1 (add/sub case) */
+void OPPROTO op_update2_cc(void)
+{
+ CC_SRC = T1;
+ CC_DST = T0;
+}
+
+/* update flags with T0 (logic operation case) */
+void OPPROTO op_update1_cc(void)
+{
+ CC_DST = T0;
+}
+
+void OPPROTO op_update_neg_cc(void)
+{
+ CC_SRC = -T0;
+ CC_DST = T0;
+}
+
+void OPPROTO op_cmpl_T0_T1_cc(void)
+{
+ CC_SRC = T1;
+ CC_DST = T0 - T1;
+}
+
+void OPPROTO op_update_inc_cc(void)
+{
+ CC_SRC = cc_table[CC_OP].compute_c();
+ CC_DST = T0;
+}
+
+void OPPROTO op_testl_T0_T1_cc(void)
+{
+ CC_DST = T0 & T1;
+}
+
+/* operations without flags */
+
+void OPPROTO op_addl_T0_T1(void)
+{
+ T0 += T1;
+}
+
+void OPPROTO op_orl_T0_T1(void)
+{
+ T0 |= T1;
+}
+
+void OPPROTO op_andl_T0_T1(void)
+{
+ T0 &= T1;
+}
+
+void OPPROTO op_subl_T0_T1(void)
+{
+ T0 -= T1;
+}
+
+void OPPROTO op_xorl_T0_T1(void)
+{
+ T0 ^= T1;
+}
+
+void OPPROTO op_negl_T0(void)
+{
+ T0 = -T0;
+}
+
+void OPPROTO op_incl_T0(void)
+{
+ T0++;
+}
+
+void OPPROTO op_decl_T0(void)
+{
+ T0--;
+}
+
+void OPPROTO op_notl_T0(void)
+{
+ T0 = ~T0;
+}
+
+void OPPROTO op_bswapl_T0(void)
+{
+ T0 = bswap32(T0);
+}
+
+/* multiply/divide */
+void OPPROTO op_mulb_AL_T0(void)
+{
+ unsigned int res;
+ res = (uint8_t)EAX * (uint8_t)T0;
+ EAX = (EAX & 0xffff0000) | res;
+ CC_SRC = (res & 0xff00);
+}
+
+void OPPROTO op_imulb_AL_T0(void)
+{
+ int res;
+ res = (int8_t)EAX * (int8_t)T0;
+ EAX = (EAX & 0xffff0000) | (res & 0xffff);
+ CC_SRC = (res != (int8_t)res);
+}
+
+void OPPROTO op_mulw_AX_T0(void)
+{
+ unsigned int res;
+ res = (uint16_t)EAX * (uint16_t)T0;
+ EAX = (EAX & 0xffff0000) | (res & 0xffff);
+ EDX = (EDX & 0xffff0000) | ((res >> 16) & 0xffff);
+ CC_SRC = res >> 16;
+}
+
+void OPPROTO op_imulw_AX_T0(void)
+{
+ int res;
+ res = (int16_t)EAX * (int16_t)T0;
+ EAX = (EAX & 0xffff0000) | (res & 0xffff);
+ EDX = (EDX & 0xffff0000) | ((res >> 16) & 0xffff);
+ CC_SRC = (res != (int16_t)res);
+}
+
+void OPPROTO op_mull_EAX_T0(void)
+{
+ uint64_t res;
+ res = (uint64_t)((uint32_t)EAX) * (uint64_t)((uint32_t)T0);
+ EAX = res;
+ EDX = res >> 32;
+ CC_SRC = res >> 32;
+}
+
+void OPPROTO op_imull_EAX_T0(void)
+{
+ int64_t res;
+ res = (int64_t)((int32_t)EAX) * (int64_t)((int32_t)T0);
+ EAX = res;
+ EDX = res >> 32;
+ CC_SRC = (res != (int32_t)res);
+}
+
+void OPPROTO op_imulw_T0_T1(void)
+{
+ int res;
+ res = (int16_t)T0 * (int16_t)T1;
+ T0 = res;
+ CC_SRC = (res != (int16_t)res);
+}
+
+void OPPROTO op_imull_T0_T1(void)
+{
+ int64_t res;
+ res = (int64_t)((int32_t)T0) * (int64_t)((int32_t)T1);
+ T0 = res;
+ CC_SRC = (res != (int32_t)res);
+}
+
+/* division, flags are undefined */
+/* XXX: add exceptions for overflow */
+
+void OPPROTO op_divb_AL_T0(void)
+{
+ unsigned int num, den, q, r;
+
+ num = (EAX & 0xffff);
+ den = (T0 & 0xff);
+ if (den == 0) {
+ EIP = PARAM1;
+ raise_exception(EXCP00_DIVZ);
+ }
+ q = (num / den) & 0xff;
+ r = (num % den) & 0xff;
+ EAX = (EAX & 0xffff0000) | (r << 8) | q;
+}
+
+void OPPROTO op_idivb_AL_T0(void)
+{
+ int num, den, q, r;
+
+ num = (int16_t)EAX;
+ den = (int8_t)T0;
+ if (den == 0) {
+ EIP = PARAM1;
+ raise_exception(EXCP00_DIVZ);
+ }
+ q = (num / den) & 0xff;
+ r = (num % den) & 0xff;
+ EAX = (EAX & 0xffff0000) | (r << 8) | q;
+}
+
+void OPPROTO op_divw_AX_T0(void)
+{
+ unsigned int num, den, q, r;
+
+ num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
+ den = (T0 & 0xffff);
+ if (den == 0) {
+ EIP = PARAM1;
+ raise_exception(EXCP00_DIVZ);
+ }
+ q = (num / den) & 0xffff;
+ r = (num % den) & 0xffff;
+ EAX = (EAX & 0xffff0000) | q;
+ EDX = (EDX & 0xffff0000) | r;
+}
+
+void OPPROTO op_idivw_AX_T0(void)
+{
+ int num, den, q, r;
+
+ num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
+ den = (int16_t)T0;
+ if (den == 0) {
+ EIP = PARAM1;
+ raise_exception(EXCP00_DIVZ);
+ }
+ q = (num / den) & 0xffff;
+ r = (num % den) & 0xffff;
+ EAX = (EAX & 0xffff0000) | q;
+ EDX = (EDX & 0xffff0000) | r;
+}
+
+void OPPROTO op_divl_EAX_T0(void)
+{
+ helper_divl_EAX_T0(PARAM1);
+}
+
+void OPPROTO op_idivl_EAX_T0(void)
+{
+ helper_idivl_EAX_T0(PARAM1);
+}
+
+/* constant load & misc op */
+
+void OPPROTO op_movl_T0_im(void)
+{
+ T0 = PARAM1;
+}
+
+void OPPROTO op_addl_T0_im(void)
+{
+ T0 += PARAM1;
+}
+
+void OPPROTO op_andl_T0_ffff(void)
+{
+ T0 = T0 & 0xffff;
+}
+
+void OPPROTO op_andl_T0_im(void)
+{
+ T0 = T0 & PARAM1;
+}
+
+void OPPROTO op_movl_T0_T1(void)
+{
+ T0 = T1;
+}
+
+void OPPROTO op_movl_T1_im(void)
+{
+ T1 = PARAM1;
+}
+
+void OPPROTO op_addl_T1_im(void)
+{
+ T1 += PARAM1;
+}
+
+void OPPROTO op_movl_T1_A0(void)
+{
+ T1 = A0;
+}
+
+void OPPROTO op_movl_A0_im(void)
+{
+ A0 = PARAM1;
+}
+
+void OPPROTO op_addl_A0_im(void)
+{
+ A0 += PARAM1;
+}
+
+void OPPROTO op_addl_A0_AL(void)
+{
+ A0 += (EAX & 0xff);
+}
+
+void OPPROTO op_andl_A0_ffff(void)
+{
+ A0 = A0 & 0xffff;
+}
+
+/* memory access */
+
+#define MEMSUFFIX
+#include "ops_mem.h"
+
+#define MEMSUFFIX _user
+#include "ops_mem.h"
+
+#define MEMSUFFIX _kernel
+#include "ops_mem.h"
+
+/* used for bit operations */
+
+void OPPROTO op_add_bitw_A0_T1(void)
+{
+ A0 += ((int32_t)T1 >> 4) << 1;
+}
+
+void OPPROTO op_add_bitl_A0_T1(void)
+{
+ A0 += ((int32_t)T1 >> 5) << 2;
+}
+
+/* indirect jump */
+
+void OPPROTO op_jmp_T0(void)
+{
+ EIP = T0;
+}
+
+void OPPROTO op_jmp_im(void)
+{
+ EIP = PARAM1;
+}
+
+void OPPROTO op_hlt(void)
+{
+ env->exception_index = EXCP_HLT;
+ cpu_loop_exit();
+}
+
+void OPPROTO op_debug(void)
+{
+ env->exception_index = EXCP_DEBUG;
+ cpu_loop_exit();
+}
+
+void OPPROTO op_raise_interrupt(void)
+{
+ int intno;
+ unsigned int next_eip;
+ intno = PARAM1;
+ next_eip = PARAM2;
+ raise_interrupt(intno, 1, 0, next_eip);
+}
+
+void OPPROTO op_raise_exception(void)
+{
+ int exception_index;
+ exception_index = PARAM1;
+ raise_exception(exception_index);
+}
+
+void OPPROTO op_into(void)
+{
+ int eflags;
+ eflags = cc_table[CC_OP].compute_all();
+ if (eflags & CC_O) {
+ raise_interrupt(EXCP04_INTO, 1, 0, PARAM1);
+ }
+ FORCE_RET();
+}
+
+void OPPROTO op_cli(void)
+{
+ env->eflags &= ~IF_MASK;
+}
+
+void OPPROTO op_sti(void)
+{
+ env->eflags |= IF_MASK;
+}
+
+void OPPROTO op_set_inhibit_irq(void)
+{
+ env->hflags |= HF_INHIBIT_IRQ_MASK;
+}
+
+void OPPROTO op_reset_inhibit_irq(void)
+{
+ env->hflags &= ~HF_INHIBIT_IRQ_MASK;
+}
+
+#if 0
+/* vm86plus instructions */
+void OPPROTO op_cli_vm(void)
+{
+ env->eflags &= ~VIF_MASK;
+}
+
+void OPPROTO op_sti_vm(void)
+{
+ env->eflags |= VIF_MASK;
+ if (env->eflags & VIP_MASK) {
+ EIP = PARAM1;
+ raise_exception(EXCP0D_GPF);
+ }
+ FORCE_RET();
+}
+#endif
+
+void OPPROTO op_boundw(void)
+{
+ int low, high, v;
+ low = ldsw((uint8_t *)A0);
+ high = ldsw((uint8_t *)A0 + 2);
+ v = (int16_t)T0;
+ if (v < low || v > high) {
+ EIP = PARAM1;
+ raise_exception(EXCP05_BOUND);
+ }
+ FORCE_RET();
+}
+
+void OPPROTO op_boundl(void)
+{
+ int low, high, v;
+ low = ldl((uint8_t *)A0);
+ high = ldl((uint8_t *)A0 + 4);
+ v = T0;
+ if (v < low || v > high) {
+ EIP = PARAM1;
+ raise_exception(EXCP05_BOUND);
+ }
+ FORCE_RET();
+}
+
+void OPPROTO op_cmpxchg8b(void)
+{
+ helper_cmpxchg8b();
+}
+
+void OPPROTO op_jmp(void)
+{
+ JUMP_TB(op_jmp, PARAM1, 0, PARAM2);
+}
+
+void OPPROTO op_movl_T0_0(void)
+{
+ T0 = 0;
+}
+
+void OPPROTO op_exit_tb(void)
+{
+ EXIT_TB();
+}
+
+/* multiple size ops */
+
+#define ldul ldl
+
+#define SHIFT 0
+#include "ops_template.h"
+#undef SHIFT
+
+#define SHIFT 1
+#include "ops_template.h"
+#undef SHIFT
+
+#define SHIFT 2
+#include "ops_template.h"
+#undef SHIFT
+
+/* sign extend */
+
+void OPPROTO op_movsbl_T0_T0(void)
+{
+ T0 = (int8_t)T0;
+}
+
+void OPPROTO op_movzbl_T0_T0(void)
+{
+ T0 = (uint8_t)T0;
+}
+
+void OPPROTO op_movswl_T0_T0(void)
+{
+ T0 = (int16_t)T0;
+}
+
+void OPPROTO op_movzwl_T0_T0(void)
+{
+ T0 = (uint16_t)T0;
+}
+
+void OPPROTO op_movswl_EAX_AX(void)
+{
+ EAX = (int16_t)EAX;
+}
+
+void OPPROTO op_movsbw_AX_AL(void)
+{
+ EAX = (EAX & 0xffff0000) | ((int8_t)EAX & 0xffff);
+}
+
+void OPPROTO op_movslq_EDX_EAX(void)
+{
+ EDX = (int32_t)EAX >> 31;
+}
+
+void OPPROTO op_movswl_DX_AX(void)
+{
+ EDX = (EDX & 0xffff0000) | (((int16_t)EAX >> 15) & 0xffff);
+}
+
+/* string ops helpers */
+
+void OPPROTO op_addl_ESI_T0(void)
+{
+ ESI += T0;
+}
+
+void OPPROTO op_addw_ESI_T0(void)
+{
+ ESI = (ESI & ~0xffff) | ((ESI + T0) & 0xffff);
+}
+
+void OPPROTO op_addl_EDI_T0(void)
+{
+ EDI += T0;
+}
+
+void OPPROTO op_addw_EDI_T0(void)
+{
+ EDI = (EDI & ~0xffff) | ((EDI + T0) & 0xffff);
+}
+
+void OPPROTO op_decl_ECX(void)
+{
+ ECX--;
+}
+
+void OPPROTO op_decw_ECX(void)
+{
+ ECX = (ECX & ~0xffff) | ((ECX - 1) & 0xffff);
+}
+
+/* push/pop */
+
+void op_pushl_T0(void)
+{
+ uint32_t offset;
+ offset = ESP - 4;
+ stl((void *)offset, T0);
+ /* modify ESP after to handle exceptions correctly */
+ ESP = offset;
+}
+
+void op_pushw_T0(void)
+{
+ uint32_t offset;
+ offset = ESP - 2;
+ stw((void *)offset, T0);
+ /* modify ESP after to handle exceptions correctly */
+ ESP = offset;
+}
+
+void op_pushl_ss32_T0(void)
+{
+ uint32_t offset;
+ offset = ESP - 4;
+ stl(env->segs[R_SS].base + offset, T0);
+ /* modify ESP after to handle exceptions correctly */
+ ESP = offset;
+}
+
+void op_pushw_ss32_T0(void)
+{
+ uint32_t offset;
+ offset = ESP - 2;
+ stw(env->segs[R_SS].base + offset, T0);
+ /* modify ESP after to handle exceptions correctly */
+ ESP = offset;
+}
+
+void op_pushl_ss16_T0(void)
+{
+ uint32_t offset;
+ offset = (ESP - 4) & 0xffff;
+ stl(env->segs[R_SS].base + offset, T0);
+ /* modify ESP after to handle exceptions correctly */
+ ESP = (ESP & ~0xffff) | offset;
+}
+
+void op_pushw_ss16_T0(void)
+{
+ uint32_t offset;
+ offset = (ESP - 2) & 0xffff;
+ stw(env->segs[R_SS].base + offset, T0);
+ /* modify ESP after to handle exceptions correctly */
+ ESP = (ESP & ~0xffff) | offset;
+}
+
+/* NOTE: ESP update is done after */
+void op_popl_T0(void)
+{
+ T0 = ldl((void *)ESP);
+}
+
+void op_popw_T0(void)
+{
+ T0 = lduw((void *)ESP);
+}
+
+void op_popl_ss32_T0(void)
+{
+ T0 = ldl(env->segs[R_SS].base + ESP);
+}
+
+void op_popw_ss32_T0(void)
+{
+ T0 = lduw(env->segs[R_SS].base + ESP);
+}
+
+void op_popl_ss16_T0(void)
+{
+ T0 = ldl(env->segs[R_SS].base + (ESP & 0xffff));
+}
+
+void op_popw_ss16_T0(void)
+{
+ T0 = lduw(env->segs[R_SS].base + (ESP & 0xffff));
+}
+
+void op_addl_ESP_4(void)
+{
+ ESP += 4;
+}
+
+void op_addl_ESP_2(void)
+{
+ ESP += 2;
+}
+
+void op_addw_ESP_4(void)
+{
+ ESP = (ESP & ~0xffff) | ((ESP + 4) & 0xffff);
+}
+
+void op_addw_ESP_2(void)
+{
+ ESP = (ESP & ~0xffff) | ((ESP + 2) & 0xffff);
+}
+
+void op_addl_ESP_im(void)
+{
+ ESP += PARAM1;
+}
+
+void op_addw_ESP_im(void)
+{
+ ESP = (ESP & ~0xffff) | ((ESP + PARAM1) & 0xffff);
+}
+
+void OPPROTO op_rdtsc(void)
+{
+ helper_rdtsc();
+}
+
+void OPPROTO op_cpuid(void)
+{
+ helper_cpuid();
+}
+
+void OPPROTO op_rdmsr(void)
+{
+ helper_rdmsr();
+}
+
+void OPPROTO op_wrmsr(void)
+{
+ helper_wrmsr();
+}
+
+/* bcd */
+
+/* XXX: exception */
+void OPPROTO op_aam(void)
+{
+ int base = PARAM1;
+ int al, ah;
+ al = EAX & 0xff;
+ ah = al / base;
+ al = al % base;
+ EAX = (EAX & ~0xffff) | al | (ah << 8);
+ CC_DST = al;
+}
+
+void OPPROTO op_aad(void)
+{
+ int base = PARAM1;
+ int al, ah;
+ al = EAX & 0xff;
+ ah = (EAX >> 8) & 0xff;
+ al = ((ah * base) + al) & 0xff;
+ EAX = (EAX & ~0xffff) | al;
+ CC_DST = al;
+}
+
+void OPPROTO op_aaa(void)
+{
+ int icarry;
+ int al, ah, af;
+ int eflags;
+
+ eflags = cc_table[CC_OP].compute_all();
+ af = eflags & CC_A;
+ al = EAX & 0xff;
+ ah = (EAX >> 8) & 0xff;
+
+ icarry = (al > 0xf9);
+ if (((al & 0x0f) > 9 ) || af) {
+ al = (al + 6) & 0x0f;
+ ah = (ah + 1 + icarry) & 0xff;
+ eflags |= CC_C | CC_A;
+ } else {
+ eflags &= ~(CC_C | CC_A);
+ al &= 0x0f;
+ }
+ EAX = (EAX & ~0xffff) | al | (ah << 8);
+ CC_SRC = eflags;
+}
+
+void OPPROTO op_aas(void)
+{
+ int icarry;
+ int al, ah, af;
+ int eflags;
+
+ eflags = cc_table[CC_OP].compute_all();
+ af = eflags & CC_A;
+ al = EAX & 0xff;
+ ah = (EAX >> 8) & 0xff;
+
+ icarry = (al < 6);
+ if (((al & 0x0f) > 9 ) || af) {
+ al = (al - 6) & 0x0f;
+ ah = (ah - 1 - icarry) & 0xff;
+ eflags |= CC_C | CC_A;
+ } else {
+ eflags &= ~(CC_C | CC_A);
+ al &= 0x0f;
+ }
+ EAX = (EAX & ~0xffff) | al | (ah << 8);
+ CC_SRC = eflags;
+}
+
+void OPPROTO op_daa(void)
+{
+ int al, af, cf;
+ int eflags;
+
+ eflags = cc_table[CC_OP].compute_all();
+ cf = eflags & CC_C;
+ af = eflags & CC_A;
+ al = EAX & 0xff;
+
+ eflags = 0;
+ if (((al & 0x0f) > 9 ) || af) {
+ al = (al + 6) & 0xff;
+ eflags |= CC_A;
+ }
+ if ((al > 0x9f) || cf) {
+ al = (al + 0x60) & 0xff;
+ eflags |= CC_C;
+ }
+ EAX = (EAX & ~0xff) | al;
+ /* well, speed is not an issue here, so we compute the flags by hand */
+ eflags |= (al == 0) << 6; /* zf */
+ eflags |= parity_table[al]; /* pf */
+ eflags |= (al & 0x80); /* sf */
+ CC_SRC = eflags;
+}
+
+void OPPROTO op_das(void)
+{
+ int al, al1, af, cf;
+ int eflags;
+
+ eflags = cc_table[CC_OP].compute_all();
+ cf = eflags & CC_C;
+ af = eflags & CC_A;
+ al = EAX & 0xff;
+
+ eflags = 0;
+ al1 = al;
+ if (((al & 0x0f) > 9 ) || af) {
+ eflags |= CC_A;
+ if (al < 6 || cf)
+ eflags |= CC_C;
+ al = (al - 6) & 0xff;
+ }
+ if ((al1 > 0x99) || cf) {
+ al = (al - 0x60) & 0xff;
+ eflags |= CC_C;
+ }
+ EAX = (EAX & ~0xff) | al;
+ /* well, speed is not an issue here, so we compute the flags by hand */
+ eflags |= (al == 0) << 6; /* zf */
+ eflags |= parity_table[al]; /* pf */
+ eflags |= (al & 0x80); /* sf */
+ CC_SRC = eflags;
+}
+
+/* segment handling */
+
+/* never use it with R_CS */
+void OPPROTO op_movl_seg_T0(void)
+{
+ load_seg(PARAM1, T0 & 0xffff, PARAM2);
+}
+
+/* faster VM86 version */
+void OPPROTO op_movl_seg_T0_vm(void)
+{
+ int selector;
+ SegmentCache *sc;
+
+ selector = T0 & 0xffff;
+ /* env->segs[] access */
+ sc = (SegmentCache *)((char *)env + PARAM1);
+ sc->selector = selector;
+ sc->base = (void *)(selector << 4);
+}
+
+void OPPROTO op_movl_T0_seg(void)
+{
+ T0 = env->segs[PARAM1].selector;
+}
+
+void OPPROTO op_movl_A0_seg(void)
+{
+ A0 = *(unsigned long *)((char *)env + PARAM1);
+}
+
+void OPPROTO op_addl_A0_seg(void)
+{
+ A0 += *(unsigned long *)((char *)env + PARAM1);
+}
+
+void OPPROTO op_lsl(void)
+{
+ helper_lsl();
+}
+
+void OPPROTO op_lar(void)
+{
+ helper_lar();
+}
+
+/* T0: segment, T1:eip */
+void OPPROTO op_ljmp_protected_T0_T1(void)
+{
+ helper_ljmp_protected_T0_T1();
+}
+
+void OPPROTO op_lcall_real_T0_T1(void)
+{
+ helper_lcall_real_T0_T1(PARAM1, PARAM2);
+}
+
+void OPPROTO op_lcall_protected_T0_T1(void)
+{
+ helper_lcall_protected_T0_T1(PARAM1, PARAM2);
+}
+
+void OPPROTO op_iret_real(void)
+{
+ helper_iret_real(PARAM1);
+}
+
+void OPPROTO op_iret_protected(void)
+{
+ helper_iret_protected(PARAM1);
+}
+
+void OPPROTO op_lret_protected(void)
+{
+ helper_lret_protected(PARAM1, PARAM2);
+}
+
+void OPPROTO op_lldt_T0(void)
+{
+ helper_lldt_T0();
+}
+
+void OPPROTO op_ltr_T0(void)
+{
+ helper_ltr_T0();
+}
+
+/* CR registers access */
+void OPPROTO op_movl_crN_T0(void)
+{
+ helper_movl_crN_T0(PARAM1);
+}
+
+/* DR registers access */
+void OPPROTO op_movl_drN_T0(void)
+{
+ helper_movl_drN_T0(PARAM1);
+}
+
+void OPPROTO op_lmsw_T0(void)
+{
+ /* only 4 lower bits of CR0 are modified */
+ T0 = (env->cr[0] & ~0xf) | (T0 & 0xf);
+ helper_movl_crN_T0(0);
+}
+
+void OPPROTO op_invlpg_A0(void)
+{
+ helper_invlpg(A0);
+}
+
+void OPPROTO op_movl_T0_env(void)
+{
+ T0 = *(uint32_t *)((char *)env + PARAM1);
+}
+
+void OPPROTO op_movl_env_T0(void)
+{
+ *(uint32_t *)((char *)env + PARAM1) = T0;
+}
+
+void OPPROTO op_movl_env_T1(void)
+{
+ *(uint32_t *)((char *)env + PARAM1) = T1;
+}
+
+void OPPROTO op_clts(void)
+{
+ env->cr[0] &= ~CR0_TS_MASK;
+}
+
+/* flags handling */
+
+/* slow jumps cases : in order to avoid calling a function with a
+ pointer (which can generate a stack frame on PowerPC), we use
+ op_setcc to set T0 and then call op_jcc. */
+void OPPROTO op_jcc(void)
+{
+ if (T0)
+ JUMP_TB(op_jcc, PARAM1, 0, PARAM2);
+ else
+ JUMP_TB(op_jcc, PARAM1, 1, PARAM3);
+ FORCE_RET();
+}
+
+void OPPROTO op_jcc_im(void)
+{
+ if (T0)
+ EIP = PARAM1;
+ else
+ EIP = PARAM2;
+ FORCE_RET();
+}
+
+/* slow set cases (compute x86 flags) */
+void OPPROTO op_seto_T0_cc(void)
+{
+ int eflags;
+ eflags = cc_table[CC_OP].compute_all();
+ T0 = (eflags >> 11) & 1;
+}
+
+void OPPROTO op_setb_T0_cc(void)
+{
+ T0 = cc_table[CC_OP].compute_c();
+}
+
+void OPPROTO op_setz_T0_cc(void)
+{
+ int eflags;
+ eflags = cc_table[CC_OP].compute_all();
+ T0 = (eflags >> 6) & 1;
+}
+
+void OPPROTO op_setbe_T0_cc(void)
+{
+ int eflags;
+ eflags = cc_table[CC_OP].compute_all();
+ T0 = (eflags & (CC_Z | CC_C)) != 0;
+}
+
+void OPPROTO op_sets_T0_cc(void)
+{
+ int eflags;
+ eflags = cc_table[CC_OP].compute_all();
+ T0 = (eflags >> 7) & 1;
+}
+
+void OPPROTO op_setp_T0_cc(void)
+{
+ int eflags;
+ eflags = cc_table[CC_OP].compute_all();
+ T0 = (eflags >> 2) & 1;
+}
+
+void OPPROTO op_setl_T0_cc(void)
+{
+ int eflags;
+ eflags = cc_table[CC_OP].compute_all();
+ T0 = ((eflags ^ (eflags >> 4)) >> 7) & 1;
+}
+
+void OPPROTO op_setle_T0_cc(void)
+{
+ int eflags;
+ eflags = cc_table[CC_OP].compute_all();
+ T0 = (((eflags ^ (eflags >> 4)) & 0x80) || (eflags & CC_Z)) != 0;
+}
+
+void OPPROTO op_xor_T0_1(void)
+{
+ T0 ^= 1;
+}
+
+void OPPROTO op_set_cc_op(void)
+{
+ CC_OP = PARAM1;
+}
+
+#define FL_UPDATE_MASK16 (FL_UPDATE_MASK32 & 0xffff)
+
+void OPPROTO op_movl_eflags_T0(void)
+{
+ int eflags;
+ eflags = T0;
+ CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
+ DF = 1 - (2 * ((eflags >> 10) & 1));
+ /* we also update some system flags as in user mode */
+ env->eflags = (env->eflags & ~FL_UPDATE_MASK32) |
+ (eflags & FL_UPDATE_MASK32);
+}
+
+void OPPROTO op_movw_eflags_T0(void)
+{
+ int eflags;
+ eflags = T0;
+ CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
+ DF = 1 - (2 * ((eflags >> 10) & 1));
+ /* we also update some system flags as in user mode */
+ env->eflags = (env->eflags & ~FL_UPDATE_MASK16) |
+ (eflags & FL_UPDATE_MASK16);
+}
+
+void OPPROTO op_movl_eflags_T0_cpl0(void)
+{
+ load_eflags(T0, FL_UPDATE_CPL0_MASK);
+}
+
+void OPPROTO op_movw_eflags_T0_cpl0(void)
+{
+ load_eflags(T0, FL_UPDATE_CPL0_MASK & 0xffff);
+}
+
+#if 0
+/* vm86plus version */
+void OPPROTO op_movw_eflags_T0_vm(void)
+{
+ int eflags;
+ eflags = T0;
+ CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
+ DF = 1 - (2 * ((eflags >> 10) & 1));
+ /* we also update some system flags as in user mode */
+ env->eflags = (env->eflags & ~(FL_UPDATE_MASK16 | VIF_MASK)) |
+ (eflags & FL_UPDATE_MASK16);
+ if (eflags & IF_MASK) {
+ env->eflags |= VIF_MASK;
+ if (env->eflags & VIP_MASK) {
+ EIP = PARAM1;
+ raise_exception(EXCP0D_GPF);
+ }
+ }
+ FORCE_RET();
+}
+
+void OPPROTO op_movl_eflags_T0_vm(void)
+{
+ int eflags;
+ eflags = T0;
+ CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
+ DF = 1 - (2 * ((eflags >> 10) & 1));
+ /* we also update some system flags as in user mode */
+ env->eflags = (env->eflags & ~(FL_UPDATE_MASK32 | VIF_MASK)) |
+ (eflags & FL_UPDATE_MASK32);
+ if (eflags & IF_MASK) {
+ env->eflags |= VIF_MASK;
+ if (env->eflags & VIP_MASK) {
+ EIP = PARAM1;
+ raise_exception(EXCP0D_GPF);
+ }
+ }
+ FORCE_RET();
+}
+#endif
+
+/* XXX: compute only O flag */
+void OPPROTO op_movb_eflags_T0(void)
+{
+ int of;
+ of = cc_table[CC_OP].compute_all() & CC_O;
+ CC_SRC = (T0 & (CC_S | CC_Z | CC_A | CC_P | CC_C)) | of;
+}
+
+void OPPROTO op_movl_T0_eflags(void)
+{
+ int eflags;
+ eflags = cc_table[CC_OP].compute_all();
+ eflags |= (DF & DF_MASK);
+ eflags |= env->eflags & ~(VM_MASK | RF_MASK);
+ T0 = eflags;
+}
+
+/* vm86plus version */
+#if 0
+void OPPROTO op_movl_T0_eflags_vm(void)
+{
+ int eflags;
+ eflags = cc_table[CC_OP].compute_all();
+ eflags |= (DF & DF_MASK);
+ eflags |= env->eflags & ~(VM_MASK | RF_MASK | IF_MASK);
+ if (env->eflags & VIF_MASK)
+ eflags |= IF_MASK;
+ T0 = eflags;
+}
+#endif
+
+void OPPROTO op_cld(void)
+{
+ DF = 1;
+}
+
+void OPPROTO op_std(void)
+{
+ DF = -1;
+}
+
+void OPPROTO op_clc(void)
+{
+ int eflags;
+ eflags = cc_table[CC_OP].compute_all();
+ eflags &= ~CC_C;
+ CC_SRC = eflags;
+}
+
+void OPPROTO op_stc(void)
+{
+ int eflags;
+ eflags = cc_table[CC_OP].compute_all();
+ eflags |= CC_C;
+ CC_SRC = eflags;
+}
+
+void OPPROTO op_cmc(void)
+{
+ int eflags;
+ eflags = cc_table[CC_OP].compute_all();
+ eflags ^= CC_C;
+ CC_SRC = eflags;
+}
+
+void OPPROTO op_salc(void)
+{
+ int cf;
+ cf = cc_table[CC_OP].compute_c();
+ EAX = (EAX & ~0xff) | ((-cf) & 0xff);
+}
+
+static int compute_all_eflags(void)
+{
+ return CC_SRC;
+}
+
+static int compute_c_eflags(void)
+{
+ return CC_SRC & CC_C;
+}
+
+static int compute_c_mul(void)
+{
+ int cf;
+ cf = (CC_SRC != 0);
+ return cf;
+}
+
+static int compute_all_mul(void)
+{
+ int cf, pf, af, zf, sf, of;
+ cf = (CC_SRC != 0);
+ pf = 0; /* undefined */
+ af = 0; /* undefined */
+ zf = 0; /* undefined */
+ sf = 0; /* undefined */
+ of = cf << 11;
+ return cf | pf | af | zf | sf | of;
+}
+
+CCTable cc_table[CC_OP_NB] = {
+ [CC_OP_DYNAMIC] = { /* should never happen */ },
+
+ [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
+
+ [CC_OP_MUL] = { compute_all_mul, compute_c_mul },
+
+ [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
+ [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
+ [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
+
+ [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
+ [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
+ [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
+
+ [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
+ [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
+ [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
+
+ [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
+ [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
+ [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
+
+ [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
+ [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
+ [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
+
+ [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
+ [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
+ [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
+
+ [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
+ [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
+ [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
+
+ [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
+ [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
+ [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
+
+ [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
+ [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
+ [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
+};
+
+/* floating point support. Some of the code for complicated x87
+ functions comes from the LGPL'ed x86 emulator found in the Willows
+ TWIN windows emulator. */
+
+#if defined(__powerpc__)
+extern CPU86_LDouble copysign(CPU86_LDouble, CPU86_LDouble);
+
+/* correct (but slow) PowerPC rint() (glibc version is incorrect) */
+double qemu_rint(double x)
+{
+ double y = 4503599627370496.0;
+ if (fabs(x) >= y)
+ return x;
+ if (x < 0)
+ y = -y;
+ y = (x + y) - y;
+ if (y == 0.0)
+ y = copysign(y, x);
+ return y;
+}
+
+#define rint qemu_rint
+#endif
+
+/* fp load FT0 */
+
+void OPPROTO op_flds_FT0_A0(void)
+{
+#ifdef USE_FP_CONVERT
+ FP_CONVERT.i32 = ldl((void *)A0);
+ FT0 = FP_CONVERT.f;
+#else
+ FT0 = ldfl((void *)A0);
+#endif
+}
+
+void OPPROTO op_fldl_FT0_A0(void)
+{
+#ifdef USE_FP_CONVERT
+ FP_CONVERT.i64 = ldq((void *)A0);
+ FT0 = FP_CONVERT.d;
+#else
+ FT0 = ldfq((void *)A0);
+#endif
+}
+
+/* helpers are needed to avoid static constant reference. XXX: find a better way */
+#ifdef USE_INT_TO_FLOAT_HELPERS
+
+void helper_fild_FT0_A0(void)
+{
+ FT0 = (CPU86_LDouble)ldsw((void *)A0);
+}
+
+void helper_fildl_FT0_A0(void)
+{
+ FT0 = (CPU86_LDouble)((int32_t)ldl((void *)A0));
+}
+
+void helper_fildll_FT0_A0(void)
+{
+ FT0 = (CPU86_LDouble)((int64_t)ldq((void *)A0));
+}
+
+void OPPROTO op_fild_FT0_A0(void)
+{
+ helper_fild_FT0_A0();
+}
+
+void OPPROTO op_fildl_FT0_A0(void)
+{
+ helper_fildl_FT0_A0();
+}
+
+void OPPROTO op_fildll_FT0_A0(void)
+{
+ helper_fildll_FT0_A0();
+}
+
+#else
+
+void OPPROTO op_fild_FT0_A0(void)
+{
+#ifdef USE_FP_CONVERT
+ FP_CONVERT.i32 = ldsw((void *)A0);
+ FT0 = (CPU86_LDouble)FP_CONVERT.i32;
+#else
+ FT0 = (CPU86_LDouble)ldsw((void *)A0);
+#endif
+}
+
+void OPPROTO op_fildl_FT0_A0(void)
+{
+#ifdef USE_FP_CONVERT
+ FP_CONVERT.i32 = (int32_t) ldl((void *)A0);
+ FT0 = (CPU86_LDouble)FP_CONVERT.i32;
+#else
+ FT0 = (CPU86_LDouble)((int32_t)ldl((void *)A0));
+#endif
+}
+
+void OPPROTO op_fildll_FT0_A0(void)
+{
+#ifdef USE_FP_CONVERT
+ FP_CONVERT.i64 = (int64_t) ldq((void *)A0);
+ FT0 = (CPU86_LDouble)FP_CONVERT.i64;
+#else
+ FT0 = (CPU86_LDouble)((int64_t)ldq((void *)A0));
+#endif
+}
+#endif
+
+/* fp load ST0 */
+
+void OPPROTO op_flds_ST0_A0(void)
+{
+ int new_fpstt;
+ new_fpstt = (env->fpstt - 1) & 7;
+#ifdef USE_FP_CONVERT
+ FP_CONVERT.i32 = ldl((void *)A0);
+ env->fpregs[new_fpstt] = FP_CONVERT.f;
+#else
+ env->fpregs[new_fpstt] = ldfl((void *)A0);
+#endif
+ env->fpstt = new_fpstt;
+ env->fptags[new_fpstt] = 0; /* validate stack entry */
+}
+
+void OPPROTO op_fldl_ST0_A0(void)
+{
+ int new_fpstt;
+ new_fpstt = (env->fpstt - 1) & 7;
+#ifdef USE_FP_CONVERT
+ FP_CONVERT.i64 = ldq((void *)A0);
+ env->fpregs[new_fpstt] = FP_CONVERT.d;
+#else
+ env->fpregs[new_fpstt] = ldfq((void *)A0);
+#endif
+ env->fpstt = new_fpstt;
+ env->fptags[new_fpstt] = 0; /* validate stack entry */
+}
+
+#ifdef USE_X86LDOUBLE
+void OPPROTO op_fldt_ST0_A0(void)
+{
+ int new_fpstt;
+ new_fpstt = (env->fpstt - 1) & 7;
+ env->fpregs[new_fpstt] = *(long double *)A0;
+ env->fpstt = new_fpstt;
+ env->fptags[new_fpstt] = 0; /* validate stack entry */
+}
+#else
+void OPPROTO op_fldt_ST0_A0(void)
+{
+ helper_fldt_ST0_A0();
+}
+#endif
+
+/* helpers are needed to avoid static constant reference. XXX: find a better way */
+#ifdef USE_INT_TO_FLOAT_HELPERS
+
+void helper_fild_ST0_A0(void)
+{
+ int new_fpstt;
+ new_fpstt = (env->fpstt - 1) & 7;
+ env->fpregs[new_fpstt] = (CPU86_LDouble)ldsw((void *)A0);
+ env->fpstt = new_fpstt;
+ env->fptags[new_fpstt] = 0; /* validate stack entry */
+}
+
+void helper_fildl_ST0_A0(void)
+{
+ int new_fpstt;
+ new_fpstt = (env->fpstt - 1) & 7;
+ env->fpregs[new_fpstt] = (CPU86_LDouble)((int32_t)ldl((void *)A0));
+ env->fpstt = new_fpstt;
+ env->fptags[new_fpstt] = 0; /* validate stack entry */
+}
+
+void helper_fildll_ST0_A0(void)
+{
+ int new_fpstt;
+ new_fpstt = (env->fpstt - 1) & 7;
+ env->fpregs[new_fpstt] = (CPU86_LDouble)((int64_t)ldq((void *)A0));
+ env->fpstt = new_fpstt;
+ env->fptags[new_fpstt] = 0; /* validate stack entry */
+}
+
+void OPPROTO op_fild_ST0_A0(void)
+{
+ helper_fild_ST0_A0();
+}
+
+void OPPROTO op_fildl_ST0_A0(void)
+{
+ helper_fildl_ST0_A0();
+}
+
+void OPPROTO op_fildll_ST0_A0(void)
+{
+ helper_fildll_ST0_A0();
+}
+
+#else
+
+void OPPROTO op_fild_ST0_A0(void)
+{
+ int new_fpstt;
+ new_fpstt = (env->fpstt - 1) & 7;
+#ifdef USE_FP_CONVERT
+ FP_CONVERT.i32 = ldsw((void *)A0);
+ env->fpregs[new_fpstt] = (CPU86_LDouble)FP_CONVERT.i32;
+#else
+ env->fpregs[new_fpstt] = (CPU86_LDouble)ldsw((void *)A0);
+#endif
+ env->fpstt = new_fpstt;
+ env->fptags[new_fpstt] = 0; /* validate stack entry */
+}
+
+void OPPROTO op_fildl_ST0_A0(void)
+{
+ int new_fpstt;
+ new_fpstt = (env->fpstt - 1) & 7;
+#ifdef USE_FP_CONVERT
+ FP_CONVERT.i32 = (int32_t) ldl((void *)A0);
+ env->fpregs[new_fpstt] = (CPU86_LDouble)FP_CONVERT.i32;
+#else
+ env->fpregs[new_fpstt] = (CPU86_LDouble)((int32_t)ldl((void *)A0));
+#endif
+ env->fpstt = new_fpstt;
+ env->fptags[new_fpstt] = 0; /* validate stack entry */
+}
+
+void OPPROTO op_fildll_ST0_A0(void)
+{
+ int new_fpstt;
+ new_fpstt = (env->fpstt - 1) & 7;
+#ifdef USE_FP_CONVERT
+ FP_CONVERT.i64 = (int64_t) ldq((void *)A0);
+ env->fpregs[new_fpstt] = (CPU86_LDouble)FP_CONVERT.i64;
+#else
+ env->fpregs[new_fpstt] = (CPU86_LDouble)((int64_t)ldq((void *)A0));
+#endif
+ env->fpstt = new_fpstt;
+ env->fptags[new_fpstt] = 0; /* validate stack entry */
+}
+
+#endif
+
+/* fp store */
+
+void OPPROTO op_fsts_ST0_A0(void)
+{
+#ifdef USE_FP_CONVERT
+ FP_CONVERT.f = (float)ST0;
+ stfl((void *)A0, FP_CONVERT.f);
+#else
+ stfl((void *)A0, (float)ST0);
+#endif
+}
+
+void OPPROTO op_fstl_ST0_A0(void)
+{
+ stfq((void *)A0, (double)ST0);
+}
+
+#ifdef USE_X86LDOUBLE
+void OPPROTO op_fstt_ST0_A0(void)
+{
+ *(long double *)A0 = ST0;
+}
+#else
+void OPPROTO op_fstt_ST0_A0(void)
+{
+ helper_fstt_ST0_A0();
+}
+#endif
+
+void OPPROTO op_fist_ST0_A0(void)
+{
+#if defined(__sparc__) && !defined(__sparc_v9__)
+ register CPU86_LDouble d asm("o0");
+#else
+ CPU86_LDouble d;
+#endif
+ int val;
+
+ d = ST0;
+ val = lrint(d);
+ if (val != (int16_t)val)
+ val = -32768;
+ stw((void *)A0, val);
+}
+
+void OPPROTO op_fistl_ST0_A0(void)
+{
+#if defined(__sparc__) && !defined(__sparc_v9__)
+ register CPU86_LDouble d asm("o0");
+#else
+ CPU86_LDouble d;
+#endif
+ int val;
+
+ d = ST0;
+ val = lrint(d);
+ stl((void *)A0, val);
+}
+
+void OPPROTO op_fistll_ST0_A0(void)
+{
+#if defined(__sparc__) && !defined(__sparc_v9__)
+ register CPU86_LDouble d asm("o0");
+#else
+ CPU86_LDouble d;
+#endif
+ int64_t val;
+
+ d = ST0;
+ val = llrint(d);
+ stq((void *)A0, val);
+}
+
+void OPPROTO op_fbld_ST0_A0(void)
+{
+ helper_fbld_ST0_A0();
+}
+
+void OPPROTO op_fbst_ST0_A0(void)
+{
+ helper_fbst_ST0_A0();
+}
+
+/* FPU move */
+
+void OPPROTO op_fpush(void)
+{
+ fpush();
+}
+
+void OPPROTO op_fpop(void)
+{
+ fpop();
+}
+
+void OPPROTO op_fdecstp(void)
+{
+ env->fpstt = (env->fpstt - 1) & 7;
+ env->fpus &= (~0x4700);
+}
+
+void OPPROTO op_fincstp(void)
+{
+ env->fpstt = (env->fpstt + 1) & 7;
+ env->fpus &= (~0x4700);
+}
+
+void OPPROTO op_fmov_ST0_FT0(void)
+{
+ ST0 = FT0;
+}
+
+void OPPROTO op_fmov_FT0_STN(void)
+{
+ FT0 = ST(PARAM1);
+}
+
+void OPPROTO op_fmov_ST0_STN(void)
+{
+ ST0 = ST(PARAM1);
+}
+
+void OPPROTO op_fmov_STN_ST0(void)
+{
+ ST(PARAM1) = ST0;
+}
+
+void OPPROTO op_fxchg_ST0_STN(void)
+{
+ CPU86_LDouble tmp;
+ tmp = ST(PARAM1);
+ ST(PARAM1) = ST0;
+ ST0 = tmp;
+}
+
+/* FPU operations */
+
+/* XXX: handle nans */
+void OPPROTO op_fcom_ST0_FT0(void)
+{
+ env->fpus &= (~0x4500); /* (C3,C2,C0) <-- 000 */
+ if (ST0 < FT0)
+ env->fpus |= 0x100; /* (C3,C2,C0) <-- 001 */
+ else if (ST0 == FT0)
+ env->fpus |= 0x4000; /* (C3,C2,C0) <-- 100 */
+ FORCE_RET();
+}
+
+/* XXX: handle nans */
+void OPPROTO op_fucom_ST0_FT0(void)
+{
+ env->fpus &= (~0x4500); /* (C3,C2,C0) <-- 000 */
+ if (ST0 < FT0)
+ env->fpus |= 0x100; /* (C3,C2,C0) <-- 001 */
+ else if (ST0 == FT0)
+ env->fpus |= 0x4000; /* (C3,C2,C0) <-- 100 */
+ FORCE_RET();
+}
+
+/* XXX: handle nans */
+void OPPROTO op_fcomi_ST0_FT0(void)
+{
+ int eflags;
+ eflags = cc_table[CC_OP].compute_all();
+ eflags &= ~(CC_Z | CC_P | CC_C);
+ if (ST0 < FT0)
+ eflags |= CC_C;
+ else if (ST0 == FT0)
+ eflags |= CC_Z;
+ CC_SRC = eflags;
+ FORCE_RET();
+}
+
+/* XXX: handle nans */
+void OPPROTO op_fucomi_ST0_FT0(void)
+{
+ int eflags;
+ eflags = cc_table[CC_OP].compute_all();
+ eflags &= ~(CC_Z | CC_P | CC_C);
+ if (ST0 < FT0)
+ eflags |= CC_C;
+ else if (ST0 == FT0)
+ eflags |= CC_Z;
+ CC_SRC = eflags;
+ FORCE_RET();
+}
+
+void OPPROTO op_fadd_ST0_FT0(void)
+{
+ ST0 += FT0;
+}
+
+void OPPROTO op_fmul_ST0_FT0(void)
+{
+ ST0 *= FT0;
+}
+
+void OPPROTO op_fsub_ST0_FT0(void)
+{
+ ST0 -= FT0;
+}
+
+void OPPROTO op_fsubr_ST0_FT0(void)
+{
+ ST0 = FT0 - ST0;
+}
+
+void OPPROTO op_fdiv_ST0_FT0(void)
+{
+ ST0 /= FT0;
+}
+
+void OPPROTO op_fdivr_ST0_FT0(void)
+{
+ ST0 = FT0 / ST0;
+}
+
+/* fp operations between STN and ST0 */
+
+void OPPROTO op_fadd_STN_ST0(void)
+{
+ ST(PARAM1) += ST0;
+}
+
+void OPPROTO op_fmul_STN_ST0(void)
+{
+ ST(PARAM1) *= ST0;
+}
+
+void OPPROTO op_fsub_STN_ST0(void)
+{
+ ST(PARAM1) -= ST0;
+}
+
+void OPPROTO op_fsubr_STN_ST0(void)
+{
+ CPU86_LDouble *p;
+ p = &ST(PARAM1);
+ *p = ST0 - *p;
+}
+
+void OPPROTO op_fdiv_STN_ST0(void)
+{
+ ST(PARAM1) /= ST0;
+}
+
+void OPPROTO op_fdivr_STN_ST0(void)
+{
+ CPU86_LDouble *p;
+ p = &ST(PARAM1);
+ *p = ST0 / *p;
+}
+
+/* misc FPU operations */
+void OPPROTO op_fchs_ST0(void)
+{
+ ST0 = -ST0;
+}
+
+void OPPROTO op_fabs_ST0(void)
+{
+ ST0 = fabs(ST0);
+}
+
+void OPPROTO op_fxam_ST0(void)
+{
+ helper_fxam_ST0();
+}
+
+void OPPROTO op_fld1_ST0(void)
+{
+ ST0 = f15rk[1];
+}
+
+void OPPROTO op_fldl2t_ST0(void)
+{
+ ST0 = f15rk[6];
+}
+
+void OPPROTO op_fldl2e_ST0(void)
+{
+ ST0 = f15rk[5];
+}
+
+void OPPROTO op_fldpi_ST0(void)
+{
+ ST0 = f15rk[2];
+}
+
+void OPPROTO op_fldlg2_ST0(void)
+{
+ ST0 = f15rk[3];
+}
+
+void OPPROTO op_fldln2_ST0(void)
+{
+ ST0 = f15rk[4];
+}
+
+void OPPROTO op_fldz_ST0(void)
+{
+ ST0 = f15rk[0];
+}
+
+void OPPROTO op_fldz_FT0(void)
+{
+ ST0 = f15rk[0];
+}
+
+/* associated heplers to reduce generated code length and to simplify
+ relocation (FP constants are usually stored in .rodata section) */
+
+void OPPROTO op_f2xm1(void)
+{
+ helper_f2xm1();
+}
+
+void OPPROTO op_fyl2x(void)
+{
+ helper_fyl2x();
+}
+
+void OPPROTO op_fptan(void)
+{
+ helper_fptan();
+}
+
+void OPPROTO op_fpatan(void)
+{
+ helper_fpatan();
+}
+
+void OPPROTO op_fxtract(void)
+{
+ helper_fxtract();
+}
+
+void OPPROTO op_fprem1(void)
+{
+ helper_fprem1();
+}
+
+
+void OPPROTO op_fprem(void)
+{
+ helper_fprem();
+}
+
+void OPPROTO op_fyl2xp1(void)
+{
+ helper_fyl2xp1();
+}
+
+void OPPROTO op_fsqrt(void)
+{
+ helper_fsqrt();
+}
+
+void OPPROTO op_fsincos(void)
+{
+ helper_fsincos();
+}
+
+void OPPROTO op_frndint(void)
+{
+ helper_frndint();
+}
+
+void OPPROTO op_fscale(void)
+{
+ helper_fscale();
+}
+
+void OPPROTO op_fsin(void)
+{
+ helper_fsin();
+}
+
+void OPPROTO op_fcos(void)
+{
+ helper_fcos();
+}
+
+void OPPROTO op_fnstsw_A0(void)
+{
+ int fpus;
+ fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
+ stw((void *)A0, fpus);
+}
+
+void OPPROTO op_fnstsw_EAX(void)
+{
+ int fpus;
+ fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
+ EAX = (EAX & 0xffff0000) | fpus;
+}
+
+void OPPROTO op_fnstcw_A0(void)
+{
+ stw((void *)A0, env->fpuc);
+}
+
+void OPPROTO op_fldcw_A0(void)
+{
+ int rnd_type;
+ env->fpuc = lduw((void *)A0);
+ /* set rounding mode */
+ switch(env->fpuc & RC_MASK) {
+ default:
+ case RC_NEAR:
+ rnd_type = FE_TONEAREST;
+ break;
+ case RC_DOWN:
+ rnd_type = FE_DOWNWARD;
+ break;
+ case RC_UP:
+ rnd_type = FE_UPWARD;
+ break;
+ case RC_CHOP:
+ rnd_type = FE_TOWARDZERO;
+ break;
+ }
+ fesetround(rnd_type);
+}
+
+void OPPROTO op_fclex(void)
+{
+ env->fpus &= 0x7f00;
+}
+
+void OPPROTO op_fninit(void)
+{
+ env->fpus = 0;
+ env->fpstt = 0;
+ env->fpuc = 0x37f;
+ env->fptags[0] = 1;
+ env->fptags[1] = 1;
+ env->fptags[2] = 1;
+ env->fptags[3] = 1;
+ env->fptags[4] = 1;
+ env->fptags[5] = 1;
+ env->fptags[6] = 1;
+ env->fptags[7] = 1;
+}
+
+void OPPROTO op_fnstenv_A0(void)
+{
+ helper_fstenv((uint8_t *)A0, PARAM1);
+}
+
+void OPPROTO op_fldenv_A0(void)
+{
+ helper_fldenv((uint8_t *)A0, PARAM1);
+}
+
+void OPPROTO op_fnsave_A0(void)
+{
+ helper_fsave((uint8_t *)A0, PARAM1);
+}
+
+void OPPROTO op_frstor_A0(void)
+{
+ helper_frstor((uint8_t *)A0, PARAM1);
+}
+
+/* threading support */
+void OPPROTO op_lock(void)
+{
+ cpu_lock();
+}
+
+void OPPROTO op_unlock(void)
+{
+ cpu_unlock();
+}
+
--- /dev/null
+/*
+ * i386 micro operations (templates for various register related
+ * operations)
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+void OPPROTO glue(op_movl_A0,REGNAME)(void)
+{
+ A0 = REG;
+}
+
+void OPPROTO glue(op_addl_A0,REGNAME)(void)
+{
+ A0 += REG;
+}
+
+void OPPROTO glue(glue(op_addl_A0,REGNAME),_s1)(void)
+{
+ A0 += REG << 1;
+}
+
+void OPPROTO glue(glue(op_addl_A0,REGNAME),_s2)(void)
+{
+ A0 += REG << 2;
+}
+
+void OPPROTO glue(glue(op_addl_A0,REGNAME),_s3)(void)
+{
+ A0 += REG << 3;
+}
+
+void OPPROTO glue(op_movl_T0,REGNAME)(void)
+{
+ T0 = REG;
+}
+
+void OPPROTO glue(op_movl_T1,REGNAME)(void)
+{
+ T1 = REG;
+}
+
+void OPPROTO glue(op_movh_T0,REGNAME)(void)
+{
+ T0 = REG >> 8;
+}
+
+void OPPROTO glue(op_movh_T1,REGNAME)(void)
+{
+ T1 = REG >> 8;
+}
+
+void OPPROTO glue(glue(op_movl,REGNAME),_T0)(void)
+{
+ REG = T0;
+}
+
+void OPPROTO glue(glue(op_movl,REGNAME),_T1)(void)
+{
+ REG = T1;
+}
+
+void OPPROTO glue(glue(op_movl,REGNAME),_A0)(void)
+{
+ REG = A0;
+}
+
+/* mov T1 to REG if T0 is true */
+void OPPROTO glue(glue(op_cmovw,REGNAME),_T1_T0)(void)
+{
+ if (T0)
+ REG = (REG & 0xffff0000) | (T1 & 0xffff);
+}
+
+void OPPROTO glue(glue(op_cmovl,REGNAME),_T1_T0)(void)
+{
+ if (T0)
+ REG = T1;
+}
+
+/* NOTE: T0 high order bits are ignored */
+void OPPROTO glue(glue(op_movw,REGNAME),_T0)(void)
+{
+ REG = (REG & 0xffff0000) | (T0 & 0xffff);
+}
+
+/* NOTE: T0 high order bits are ignored */
+void OPPROTO glue(glue(op_movw,REGNAME),_T1)(void)
+{
+ REG = (REG & 0xffff0000) | (T1 & 0xffff);
+}
+
+/* NOTE: A0 high order bits are ignored */
+void OPPROTO glue(glue(op_movw,REGNAME),_A0)(void)
+{
+ REG = (REG & 0xffff0000) | (A0 & 0xffff);
+}
+
+/* NOTE: T0 high order bits are ignored */
+void OPPROTO glue(glue(op_movb,REGNAME),_T0)(void)
+{
+ REG = (REG & 0xffffff00) | (T0 & 0xff);
+}
+
+/* NOTE: T0 high order bits are ignored */
+void OPPROTO glue(glue(op_movh,REGNAME),_T0)(void)
+{
+ REG = (REG & 0xffff00ff) | ((T0 & 0xff) << 8);
+}
+
+/* NOTE: T1 high order bits are ignored */
+void OPPROTO glue(glue(op_movb,REGNAME),_T1)(void)
+{
+ REG = (REG & 0xffffff00) | (T1 & 0xff);
+}
+
+/* NOTE: T1 high order bits are ignored */
+void OPPROTO glue(glue(op_movh,REGNAME),_T1)(void)
+{
+ REG = (REG & 0xffff00ff) | ((T1 & 0xff) << 8);
+}
--- /dev/null
+void OPPROTO glue(glue(op_ldub, MEMSUFFIX), _T0_A0)(void)
+{
+ T0 = glue(ldub, MEMSUFFIX)((uint8_t *)A0);
+}
+
+void OPPROTO glue(glue(op_ldsb, MEMSUFFIX), _T0_A0)(void)
+{
+ T0 = glue(ldsb, MEMSUFFIX)((int8_t *)A0);
+}
+
+void OPPROTO glue(glue(op_lduw, MEMSUFFIX), _T0_A0)(void)
+{
+ T0 = glue(lduw, MEMSUFFIX)((uint8_t *)A0);
+}
+
+void OPPROTO glue(glue(op_ldsw, MEMSUFFIX), _T0_A0)(void)
+{
+ T0 = glue(ldsw, MEMSUFFIX)((int8_t *)A0);
+}
+
+void OPPROTO glue(glue(op_ldl, MEMSUFFIX), _T0_A0)(void)
+{
+ T0 = glue(ldl, MEMSUFFIX)((uint8_t *)A0);
+}
+
+void OPPROTO glue(glue(op_ldub, MEMSUFFIX), _T1_A0)(void)
+{
+ T1 = glue(ldub, MEMSUFFIX)((uint8_t *)A0);
+}
+
+void OPPROTO glue(glue(op_ldsb, MEMSUFFIX), _T1_A0)(void)
+{
+ T1 = glue(ldsb, MEMSUFFIX)((int8_t *)A0);
+}
+
+void OPPROTO glue(glue(op_lduw, MEMSUFFIX), _T1_A0)(void)
+{
+ T1 = glue(lduw, MEMSUFFIX)((uint8_t *)A0);
+}
+
+void OPPROTO glue(glue(op_ldsw, MEMSUFFIX), _T1_A0)(void)
+{
+ T1 = glue(ldsw, MEMSUFFIX)((int8_t *)A0);
+}
+
+void OPPROTO glue(glue(op_ldl, MEMSUFFIX), _T1_A0)(void)
+{
+ T1 = glue(ldl, MEMSUFFIX)((uint8_t *)A0);
+}
+
+void OPPROTO glue(glue(op_stb, MEMSUFFIX), _T0_A0)(void)
+{
+ glue(stb, MEMSUFFIX)((uint8_t *)A0, T0);
+}
+
+void OPPROTO glue(glue(op_stw, MEMSUFFIX), _T0_A0)(void)
+{
+ glue(stw, MEMSUFFIX)((uint8_t *)A0, T0);
+}
+
+void OPPROTO glue(glue(op_stl, MEMSUFFIX), _T0_A0)(void)
+{
+ glue(stl, MEMSUFFIX)((uint8_t *)A0, T0);
+}
+
+#undef MEMSUFFIX
--- /dev/null
+/*
+ * i386 micro operations (included several times to generate
+ * different operand sizes)
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#define DATA_BITS (1 << (3 + SHIFT))
+#define SHIFT_MASK (DATA_BITS - 1)
+#define SIGN_MASK (1 << (DATA_BITS - 1))
+
+#if DATA_BITS == 8
+#define SUFFIX b
+#define DATA_TYPE uint8_t
+#define DATA_STYPE int8_t
+#define DATA_MASK 0xff
+#elif DATA_BITS == 16
+#define SUFFIX w
+#define DATA_TYPE uint16_t
+#define DATA_STYPE int16_t
+#define DATA_MASK 0xffff
+#elif DATA_BITS == 32
+#define SUFFIX l
+#define DATA_TYPE uint32_t
+#define DATA_STYPE int32_t
+#define DATA_MASK 0xffffffff
+#else
+#error unhandled operand size
+#endif
+
+/* dynamic flags computation */
+
+static int glue(compute_all_add, SUFFIX)(void)
+{
+ int cf, pf, af, zf, sf, of;
+ int src1, src2;
+ src1 = CC_SRC;
+ src2 = CC_DST - CC_SRC;
+ cf = (DATA_TYPE)CC_DST < (DATA_TYPE)src1;
+ pf = parity_table[(uint8_t)CC_DST];
+ af = (CC_DST ^ src1 ^ src2) & 0x10;
+ zf = ((DATA_TYPE)CC_DST == 0) << 6;
+ sf = lshift(CC_DST, 8 - DATA_BITS) & 0x80;
+ of = lshift((src1 ^ src2 ^ -1) & (src1 ^ CC_DST), 12 - DATA_BITS) & CC_O;
+ return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_c_add, SUFFIX)(void)
+{
+ int src1, cf;
+ src1 = CC_SRC;
+ cf = (DATA_TYPE)CC_DST < (DATA_TYPE)src1;
+ return cf;
+}
+
+static int glue(compute_all_adc, SUFFIX)(void)
+{
+ int cf, pf, af, zf, sf, of;
+ int src1, src2;
+ src1 = CC_SRC;
+ src2 = CC_DST - CC_SRC - 1;
+ cf = (DATA_TYPE)CC_DST <= (DATA_TYPE)src1;
+ pf = parity_table[(uint8_t)CC_DST];
+ af = (CC_DST ^ src1 ^ src2) & 0x10;
+ zf = ((DATA_TYPE)CC_DST == 0) << 6;
+ sf = lshift(CC_DST, 8 - DATA_BITS) & 0x80;
+ of = lshift((src1 ^ src2 ^ -1) & (src1 ^ CC_DST), 12 - DATA_BITS) & CC_O;
+ return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_c_adc, SUFFIX)(void)
+{
+ int src1, cf;
+ src1 = CC_SRC;
+ cf = (DATA_TYPE)CC_DST <= (DATA_TYPE)src1;
+ return cf;
+}
+
+static int glue(compute_all_sub, SUFFIX)(void)
+{
+ int cf, pf, af, zf, sf, of;
+ int src1, src2;
+ src1 = CC_DST + CC_SRC;
+ src2 = CC_SRC;
+ cf = (DATA_TYPE)src1 < (DATA_TYPE)src2;
+ pf = parity_table[(uint8_t)CC_DST];
+ af = (CC_DST ^ src1 ^ src2) & 0x10;
+ zf = ((DATA_TYPE)CC_DST == 0) << 6;
+ sf = lshift(CC_DST, 8 - DATA_BITS) & 0x80;
+ of = lshift((src1 ^ src2) & (src1 ^ CC_DST), 12 - DATA_BITS) & CC_O;
+ return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_c_sub, SUFFIX)(void)
+{
+ int src1, src2, cf;
+ src1 = CC_DST + CC_SRC;
+ src2 = CC_SRC;
+ cf = (DATA_TYPE)src1 < (DATA_TYPE)src2;
+ return cf;
+}
+
+static int glue(compute_all_sbb, SUFFIX)(void)
+{
+ int cf, pf, af, zf, sf, of;
+ int src1, src2;
+ src1 = CC_DST + CC_SRC + 1;
+ src2 = CC_SRC;
+ cf = (DATA_TYPE)src1 <= (DATA_TYPE)src2;
+ pf = parity_table[(uint8_t)CC_DST];
+ af = (CC_DST ^ src1 ^ src2) & 0x10;
+ zf = ((DATA_TYPE)CC_DST == 0) << 6;
+ sf = lshift(CC_DST, 8 - DATA_BITS) & 0x80;
+ of = lshift((src1 ^ src2) & (src1 ^ CC_DST), 12 - DATA_BITS) & CC_O;
+ return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_c_sbb, SUFFIX)(void)
+{
+ int src1, src2, cf;
+ src1 = CC_DST + CC_SRC + 1;
+ src2 = CC_SRC;
+ cf = (DATA_TYPE)src1 <= (DATA_TYPE)src2;
+ return cf;
+}
+
+static int glue(compute_all_logic, SUFFIX)(void)
+{
+ int cf, pf, af, zf, sf, of;
+ cf = 0;
+ pf = parity_table[(uint8_t)CC_DST];
+ af = 0;
+ zf = ((DATA_TYPE)CC_DST == 0) << 6;
+ sf = lshift(CC_DST, 8 - DATA_BITS) & 0x80;
+ of = 0;
+ return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_c_logic, SUFFIX)(void)
+{
+ return 0;
+}
+
+static int glue(compute_all_inc, SUFFIX)(void)
+{
+ int cf, pf, af, zf, sf, of;
+ int src1, src2;
+ src1 = CC_DST - 1;
+ src2 = 1;
+ cf = CC_SRC;
+ pf = parity_table[(uint8_t)CC_DST];
+ af = (CC_DST ^ src1 ^ src2) & 0x10;
+ zf = ((DATA_TYPE)CC_DST == 0) << 6;
+ sf = lshift(CC_DST, 8 - DATA_BITS) & 0x80;
+ of = ((CC_DST & DATA_MASK) == SIGN_MASK) << 11;
+ return cf | pf | af | zf | sf | of;
+}
+
+#if DATA_BITS == 32
+static int glue(compute_c_inc, SUFFIX)(void)
+{
+ return CC_SRC;
+}
+#endif
+
+static int glue(compute_all_dec, SUFFIX)(void)
+{
+ int cf, pf, af, zf, sf, of;
+ int src1, src2;
+ src1 = CC_DST + 1;
+ src2 = 1;
+ cf = CC_SRC;
+ pf = parity_table[(uint8_t)CC_DST];
+ af = (CC_DST ^ src1 ^ src2) & 0x10;
+ zf = ((DATA_TYPE)CC_DST == 0) << 6;
+ sf = lshift(CC_DST, 8 - DATA_BITS) & 0x80;
+ of = ((CC_DST & DATA_MASK) == ((uint32_t)SIGN_MASK - 1)) << 11;
+ return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_all_shl, SUFFIX)(void)
+{
+ int cf, pf, af, zf, sf, of;
+ cf = (CC_SRC >> (DATA_BITS - 1)) & CC_C;
+ pf = parity_table[(uint8_t)CC_DST];
+ af = 0; /* undefined */
+ zf = ((DATA_TYPE)CC_DST == 0) << 6;
+ sf = lshift(CC_DST, 8 - DATA_BITS) & 0x80;
+ /* of is defined if shift count == 1 */
+ of = lshift(CC_SRC ^ CC_DST, 12 - DATA_BITS) & CC_O;
+ return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_c_shl, SUFFIX)(void)
+{
+ return (CC_SRC >> (DATA_BITS - 1)) & CC_C;
+}
+
+#if DATA_BITS == 32
+static int glue(compute_c_sar, SUFFIX)(void)
+{
+ return CC_SRC & 1;
+}
+#endif
+
+static int glue(compute_all_sar, SUFFIX)(void)
+{
+ int cf, pf, af, zf, sf, of;
+ cf = CC_SRC & 1;
+ pf = parity_table[(uint8_t)CC_DST];
+ af = 0; /* undefined */
+ zf = ((DATA_TYPE)CC_DST == 0) << 6;
+ sf = lshift(CC_DST, 8 - DATA_BITS) & 0x80;
+ /* of is defined if shift count == 1 */
+ of = lshift(CC_SRC ^ CC_DST, 12 - DATA_BITS) & CC_O;
+ return cf | pf | af | zf | sf | of;
+}
+
+/* various optimized jumps cases */
+
+void OPPROTO glue(op_jb_sub, SUFFIX)(void)
+{
+ int src1, src2;
+ src1 = CC_DST + CC_SRC;
+ src2 = CC_SRC;
+
+ if ((DATA_TYPE)src1 < (DATA_TYPE)src2)
+ JUMP_TB(glue(op_jb_sub, SUFFIX), PARAM1, 0, PARAM2);
+ else
+ JUMP_TB(glue(op_jb_sub, SUFFIX), PARAM1, 1, PARAM3);
+ FORCE_RET();
+}
+
+void OPPROTO glue(op_jz_sub, SUFFIX)(void)
+{
+ if ((DATA_TYPE)CC_DST == 0)
+ JUMP_TB(glue(op_jz_sub, SUFFIX), PARAM1, 0, PARAM2);
+ else
+ JUMP_TB(glue(op_jz_sub, SUFFIX), PARAM1, 1, PARAM3);
+ FORCE_RET();
+}
+
+void OPPROTO glue(op_jbe_sub, SUFFIX)(void)
+{
+ int src1, src2;
+ src1 = CC_DST + CC_SRC;
+ src2 = CC_SRC;
+
+ if ((DATA_TYPE)src1 <= (DATA_TYPE)src2)
+ JUMP_TB(glue(op_jbe_sub, SUFFIX), PARAM1, 0, PARAM2);
+ else
+ JUMP_TB(glue(op_jbe_sub, SUFFIX), PARAM1, 1, PARAM3);
+ FORCE_RET();
+}
+
+void OPPROTO glue(op_js_sub, SUFFIX)(void)
+{
+ if (CC_DST & SIGN_MASK)
+ JUMP_TB(glue(op_js_sub, SUFFIX), PARAM1, 0, PARAM2);
+ else
+ JUMP_TB(glue(op_js_sub, SUFFIX), PARAM1, 1, PARAM3);
+ FORCE_RET();
+}
+
+void OPPROTO glue(op_jl_sub, SUFFIX)(void)
+{
+ int src1, src2;
+ src1 = CC_DST + CC_SRC;
+ src2 = CC_SRC;
+
+ if ((DATA_STYPE)src1 < (DATA_STYPE)src2)
+ JUMP_TB(glue(op_jl_sub, SUFFIX), PARAM1, 0, PARAM2);
+ else
+ JUMP_TB(glue(op_jl_sub, SUFFIX), PARAM1, 1, PARAM3);
+ FORCE_RET();
+}
+
+void OPPROTO glue(op_jle_sub, SUFFIX)(void)
+{
+ int src1, src2;
+ src1 = CC_DST + CC_SRC;
+ src2 = CC_SRC;
+
+ if ((DATA_STYPE)src1 <= (DATA_STYPE)src2)
+ JUMP_TB(glue(op_jle_sub, SUFFIX), PARAM1, 0, PARAM2);
+ else
+ JUMP_TB(glue(op_jle_sub, SUFFIX), PARAM1, 1, PARAM3);
+ FORCE_RET();
+}
+
+/* oldies */
+
+#if DATA_BITS >= 16
+
+void OPPROTO glue(op_loopnz, SUFFIX)(void)
+{
+ unsigned int tmp;
+ int eflags;
+ eflags = cc_table[CC_OP].compute_all();
+ tmp = (ECX - 1) & DATA_MASK;
+ ECX = (ECX & ~DATA_MASK) | tmp;
+ if (tmp != 0 && !(eflags & CC_Z))
+ EIP = PARAM1;
+ else
+ EIP = PARAM2;
+ FORCE_RET();
+}
+
+void OPPROTO glue(op_loopz, SUFFIX)(void)
+{
+ unsigned int tmp;
+ int eflags;
+ eflags = cc_table[CC_OP].compute_all();
+ tmp = (ECX - 1) & DATA_MASK;
+ ECX = (ECX & ~DATA_MASK) | tmp;
+ if (tmp != 0 && (eflags & CC_Z))
+ EIP = PARAM1;
+ else
+ EIP = PARAM2;
+ FORCE_RET();
+}
+
+void OPPROTO glue(op_loop, SUFFIX)(void)
+{
+ unsigned int tmp;
+ tmp = (ECX - 1) & DATA_MASK;
+ ECX = (ECX & ~DATA_MASK) | tmp;
+ if (tmp != 0)
+ EIP = PARAM1;
+ else
+ EIP = PARAM2;
+ FORCE_RET();
+}
+
+void OPPROTO glue(op_jecxz, SUFFIX)(void)
+{
+ if ((DATA_TYPE)ECX == 0)
+ EIP = PARAM1;
+ else
+ EIP = PARAM2;
+ FORCE_RET();
+}
+
+#endif
+
+/* various optimized set cases */
+
+void OPPROTO glue(op_setb_T0_sub, SUFFIX)(void)
+{
+ int src1, src2;
+ src1 = CC_DST + CC_SRC;
+ src2 = CC_SRC;
+
+ T0 = ((DATA_TYPE)src1 < (DATA_TYPE)src2);
+}
+
+void OPPROTO glue(op_setz_T0_sub, SUFFIX)(void)
+{
+ T0 = ((DATA_TYPE)CC_DST == 0);
+}
+
+void OPPROTO glue(op_setbe_T0_sub, SUFFIX)(void)
+{
+ int src1, src2;
+ src1 = CC_DST + CC_SRC;
+ src2 = CC_SRC;
+
+ T0 = ((DATA_TYPE)src1 <= (DATA_TYPE)src2);
+}
+
+void OPPROTO glue(op_sets_T0_sub, SUFFIX)(void)
+{
+ T0 = lshift(CC_DST, -(DATA_BITS - 1)) & 1;
+}
+
+void OPPROTO glue(op_setl_T0_sub, SUFFIX)(void)
+{
+ int src1, src2;
+ src1 = CC_DST + CC_SRC;
+ src2 = CC_SRC;
+
+ T0 = ((DATA_STYPE)src1 < (DATA_STYPE)src2);
+}
+
+void OPPROTO glue(op_setle_T0_sub, SUFFIX)(void)
+{
+ int src1, src2;
+ src1 = CC_DST + CC_SRC;
+ src2 = CC_SRC;
+
+ T0 = ((DATA_STYPE)src1 <= (DATA_STYPE)src2);
+}
+
+/* shifts */
+
+void OPPROTO glue(glue(op_shl, SUFFIX), _T0_T1)(void)
+{
+ int count;
+ count = T1 & 0x1f;
+ T0 = T0 << count;
+ FORCE_RET();
+}
+
+void OPPROTO glue(glue(op_shr, SUFFIX), _T0_T1)(void)
+{
+ int count;
+ count = T1 & 0x1f;
+ T0 &= DATA_MASK;
+ T0 = T0 >> count;
+ FORCE_RET();
+}
+
+void OPPROTO glue(glue(op_sar, SUFFIX), _T0_T1)(void)
+{
+ int count, src;
+ count = T1 & 0x1f;
+ src = (DATA_STYPE)T0;
+ T0 = src >> count;
+ FORCE_RET();
+}
+
+#undef MEM_WRITE
+#include "ops_template_mem.h"
+
+#define MEM_WRITE
+#include "ops_template_mem.h"
+
+/* bit operations */
+#if DATA_BITS >= 16
+
+void OPPROTO glue(glue(op_bt, SUFFIX), _T0_T1_cc)(void)
+{
+ int count;
+ count = T1 & SHIFT_MASK;
+ CC_SRC = T0 >> count;
+}
+
+void OPPROTO glue(glue(op_bts, SUFFIX), _T0_T1_cc)(void)
+{
+ int count;
+ count = T1 & SHIFT_MASK;
+ T1 = T0 >> count;
+ T0 |= (1 << count);
+}
+
+void OPPROTO glue(glue(op_btr, SUFFIX), _T0_T1_cc)(void)
+{
+ int count;
+ count = T1 & SHIFT_MASK;
+ T1 = T0 >> count;
+ T0 &= ~(1 << count);
+}
+
+void OPPROTO glue(glue(op_btc, SUFFIX), _T0_T1_cc)(void)
+{
+ int count;
+ count = T1 & SHIFT_MASK;
+ T1 = T0 >> count;
+ T0 ^= (1 << count);
+}
+
+void OPPROTO glue(glue(op_bsf, SUFFIX), _T0_cc)(void)
+{
+ int res, count;
+ res = T0 & DATA_MASK;
+ if (res != 0) {
+ count = 0;
+ while ((res & 1) == 0) {
+ count++;
+ res >>= 1;
+ }
+ T0 = count;
+ CC_DST = 1; /* ZF = 1 */
+ } else {
+ CC_DST = 0; /* ZF = 1 */
+ }
+ FORCE_RET();
+}
+
+void OPPROTO glue(glue(op_bsr, SUFFIX), _T0_cc)(void)
+{
+ int res, count;
+ res = T0 & DATA_MASK;
+ if (res != 0) {
+ count = DATA_BITS - 1;
+ while ((res & SIGN_MASK) == 0) {
+ count--;
+ res <<= 1;
+ }
+ T0 = count;
+ CC_DST = 1; /* ZF = 1 */
+ } else {
+ CC_DST = 0; /* ZF = 1 */
+ }
+ FORCE_RET();
+}
+
+#endif
+
+#if DATA_BITS == 32
+void OPPROTO op_update_bt_cc(void)
+{
+ CC_SRC = T1;
+}
+#endif
+
+/* string operations */
+
+void OPPROTO glue(op_movl_T0_Dshift, SUFFIX)(void)
+{
+ T0 = DF << SHIFT;
+}
+
+void OPPROTO glue(op_string_jz_sub, SUFFIX)(void)
+{
+ if ((DATA_TYPE)CC_DST == 0)
+ JUMP_TB2(glue(op_string_jz_sub, SUFFIX), PARAM1, 1);
+ FORCE_RET();
+}
+
+void OPPROTO glue(op_string_jnz_sub, SUFFIX)(void)
+{
+ if ((DATA_TYPE)CC_DST != 0)
+ JUMP_TB2(glue(op_string_jnz_sub, SUFFIX), PARAM1, 1);
+ FORCE_RET();
+}
+
+void OPPROTO glue(glue(op_string_jz_sub, SUFFIX), _im)(void)
+{
+ if ((DATA_TYPE)CC_DST == 0) {
+ EIP = PARAM1;
+ if (env->eflags & TF_MASK) {
+ raise_exception(EXCP01_SSTP);
+ }
+ T0 = 0;
+ EXIT_TB();
+ }
+ FORCE_RET();
+}
+
+void OPPROTO glue(glue(op_string_jnz_sub, SUFFIX), _im)(void)
+{
+ if ((DATA_TYPE)CC_DST != 0) {
+ EIP = PARAM1;
+ if (env->eflags & TF_MASK) {
+ raise_exception(EXCP01_SSTP);
+ }
+ T0 = 0;
+ EXIT_TB();
+ }
+ FORCE_RET();
+}
+
+#if DATA_BITS >= 16
+void OPPROTO glue(op_jz_ecx, SUFFIX)(void)
+{
+ if ((DATA_TYPE)ECX == 0)
+ JUMP_TB(glue(op_jz_ecx, SUFFIX), PARAM1, 1, PARAM2);
+ FORCE_RET();
+}
+
+void OPPROTO glue(glue(op_jz_ecx, SUFFIX), _im)(void)
+{
+ if ((DATA_TYPE)ECX == 0) {
+ EIP = PARAM1;
+ if (env->eflags & TF_MASK) {
+ raise_exception(EXCP01_SSTP);
+ }
+ T0 = 0;
+ EXIT_TB();
+ }
+ FORCE_RET();
+}
+#endif
+
+/* port I/O */
+
+void OPPROTO glue(glue(op_out, SUFFIX), _T0_T1)(void)
+{
+ glue(cpu_x86_out, SUFFIX)(env, T0 & 0xffff, T1 & DATA_MASK);
+}
+
+void OPPROTO glue(glue(op_in, SUFFIX), _T0_T1)(void)
+{
+ T1 = glue(cpu_x86_in, SUFFIX)(env, T0 & 0xffff);
+}
+
+void OPPROTO glue(glue(op_in, SUFFIX), _DX_T0)(void)
+{
+ T0 = glue(cpu_x86_in, SUFFIX)(env, EDX & 0xffff);
+}
+
+void OPPROTO glue(glue(op_out, SUFFIX), _DX_T0)(void)
+{
+ glue(cpu_x86_out, SUFFIX)(env, EDX & 0xffff, T0);
+}
+
+#undef DATA_BITS
+#undef SHIFT_MASK
+#undef SIGN_MASK
+#undef DATA_TYPE
+#undef DATA_STYPE
+#undef DATA_MASK
+#undef SUFFIX
--- /dev/null
+/*
+ * i386 micro operations (included several times to generate
+ * different operand sizes)
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifdef MEM_WRITE
+
+#if DATA_BITS == 8
+#define MEM_SUFFIX b_mem
+#elif DATA_BITS == 16
+#define MEM_SUFFIX w_mem
+#elif DATA_BITS == 32
+#define MEM_SUFFIX l_mem
+#endif
+
+#else
+
+#define MEM_SUFFIX SUFFIX
+
+#endif
+
+void OPPROTO glue(glue(op_rol, MEM_SUFFIX), _T0_T1_cc)(void)
+{
+ int count, src;
+ count = T1 & SHIFT_MASK;
+ if (count) {
+ src = T0;
+ T0 &= DATA_MASK;
+ T0 = (T0 << count) | (T0 >> (DATA_BITS - count));
+#ifdef MEM_WRITE
+ glue(st, SUFFIX)((uint8_t *)A0, T0);
+#else
+ /* gcc 3.2 workaround. This is really a bug in gcc. */
+ asm volatile("" : : "r" (T0));
+#endif
+ CC_SRC = (cc_table[CC_OP].compute_all() & ~(CC_O | CC_C)) |
+ (lshift(src ^ T0, 11 - (DATA_BITS - 1)) & CC_O) |
+ (T0 & CC_C);
+ CC_OP = CC_OP_EFLAGS;
+ }
+ FORCE_RET();
+}
+
+void OPPROTO glue(glue(op_ror, MEM_SUFFIX), _T0_T1_cc)(void)
+{
+ int count, src;
+ count = T1 & SHIFT_MASK;
+ if (count) {
+ src = T0;
+ T0 &= DATA_MASK;
+ T0 = (T0 >> count) | (T0 << (DATA_BITS - count));
+#ifdef MEM_WRITE
+ glue(st, SUFFIX)((uint8_t *)A0, T0);
+#else
+ /* gcc 3.2 workaround. This is really a bug in gcc. */
+ asm volatile("" : : "r" (T0));
+#endif
+ CC_SRC = (cc_table[CC_OP].compute_all() & ~(CC_O | CC_C)) |
+ (lshift(src ^ T0, 11 - (DATA_BITS - 1)) & CC_O) |
+ ((T0 >> (DATA_BITS - 1)) & CC_C);
+ CC_OP = CC_OP_EFLAGS;
+ }
+ FORCE_RET();
+}
+
+void OPPROTO glue(glue(op_rol, MEM_SUFFIX), _T0_T1)(void)
+{
+ int count;
+ count = T1 & SHIFT_MASK;
+ if (count) {
+ T0 &= DATA_MASK;
+ T0 = (T0 << count) | (T0 >> (DATA_BITS - count));
+#ifdef MEM_WRITE
+ glue(st, SUFFIX)((uint8_t *)A0, T0);
+#endif
+ }
+ FORCE_RET();
+}
+
+void OPPROTO glue(glue(op_ror, MEM_SUFFIX), _T0_T1)(void)
+{
+ int count;
+ count = T1 & SHIFT_MASK;
+ if (count) {
+ T0 &= DATA_MASK;
+ T0 = (T0 >> count) | (T0 << (DATA_BITS - count));
+#ifdef MEM_WRITE
+ glue(st, SUFFIX)((uint8_t *)A0, T0);
+#endif
+ }
+ FORCE_RET();
+}
+
+void OPPROTO glue(glue(op_rcl, MEM_SUFFIX), _T0_T1_cc)(void)
+{
+ int count, res, eflags;
+ unsigned int src;
+
+ count = T1 & 0x1f;
+#if DATA_BITS == 16
+ count = rclw_table[count];
+#elif DATA_BITS == 8
+ count = rclb_table[count];
+#endif
+ if (count) {
+ eflags = cc_table[CC_OP].compute_all();
+ T0 &= DATA_MASK;
+ src = T0;
+ res = (T0 << count) | ((eflags & CC_C) << (count - 1));
+ if (count > 1)
+ res |= T0 >> (DATA_BITS + 1 - count);
+ T0 = res;
+#ifdef MEM_WRITE
+ glue(st, SUFFIX)((uint8_t *)A0, T0);
+#endif
+ CC_SRC = (eflags & ~(CC_C | CC_O)) |
+ (lshift(src ^ T0, 11 - (DATA_BITS - 1)) & CC_O) |
+ ((src >> (DATA_BITS - count)) & CC_C);
+ CC_OP = CC_OP_EFLAGS;
+ }
+ FORCE_RET();
+}
+
+void OPPROTO glue(glue(op_rcr, MEM_SUFFIX), _T0_T1_cc)(void)
+{
+ int count, res, eflags;
+ unsigned int src;
+
+ count = T1 & 0x1f;
+#if DATA_BITS == 16
+ count = rclw_table[count];
+#elif DATA_BITS == 8
+ count = rclb_table[count];
+#endif
+ if (count) {
+ eflags = cc_table[CC_OP].compute_all();
+ T0 &= DATA_MASK;
+ src = T0;
+ res = (T0 >> count) | ((eflags & CC_C) << (DATA_BITS - count));
+ if (count > 1)
+ res |= T0 << (DATA_BITS + 1 - count);
+ T0 = res;
+#ifdef MEM_WRITE
+ glue(st, SUFFIX)((uint8_t *)A0, T0);
+#endif
+ CC_SRC = (eflags & ~(CC_C | CC_O)) |
+ (lshift(src ^ T0, 11 - (DATA_BITS - 1)) & CC_O) |
+ ((src >> (count - 1)) & CC_C);
+ CC_OP = CC_OP_EFLAGS;
+ }
+ FORCE_RET();
+}
+
+void OPPROTO glue(glue(op_shl, MEM_SUFFIX), _T0_T1_cc)(void)
+{
+ int count, src;
+ count = T1 & 0x1f;
+ if (count) {
+ src = (DATA_TYPE)T0 << (count - 1);
+ T0 = T0 << count;
+#ifdef MEM_WRITE
+ glue(st, SUFFIX)((uint8_t *)A0, T0);
+#endif
+ CC_SRC = src;
+ CC_DST = T0;
+ CC_OP = CC_OP_SHLB + SHIFT;
+ }
+ FORCE_RET();
+}
+
+void OPPROTO glue(glue(op_shr, MEM_SUFFIX), _T0_T1_cc)(void)
+{
+ int count, src;
+ count = T1 & 0x1f;
+ if (count) {
+ T0 &= DATA_MASK;
+ src = T0 >> (count - 1);
+ T0 = T0 >> count;
+#ifdef MEM_WRITE
+ glue(st, SUFFIX)((uint8_t *)A0, T0);
+#endif
+ CC_SRC = src;
+ CC_DST = T0;
+ CC_OP = CC_OP_SARB + SHIFT;
+ }
+ FORCE_RET();
+}
+
+void OPPROTO glue(glue(op_sar, MEM_SUFFIX), _T0_T1_cc)(void)
+{
+ int count, src;
+ count = T1 & 0x1f;
+ if (count) {
+ src = (DATA_STYPE)T0;
+ T0 = src >> count;
+ src = src >> (count - 1);
+#ifdef MEM_WRITE
+ glue(st, SUFFIX)((uint8_t *)A0, T0);
+#endif
+ CC_SRC = src;
+ CC_DST = T0;
+ CC_OP = CC_OP_SARB + SHIFT;
+ }
+ FORCE_RET();
+}
+
+#if DATA_BITS == 16
+/* XXX: overflow flag might be incorrect in some cases in shldw */
+void OPPROTO glue(glue(op_shld, MEM_SUFFIX), _T0_T1_im_cc)(void)
+{
+ int count;
+ unsigned int res, tmp;
+ count = PARAM1;
+ T1 &= 0xffff;
+ res = T1 | (T0 << 16);
+ tmp = res >> (32 - count);
+ res <<= count;
+ if (count > 16)
+ res |= T1 << (count - 16);
+ T0 = res >> 16;
+#ifdef MEM_WRITE
+ glue(st, SUFFIX)((uint8_t *)A0, T0);
+#endif
+ CC_SRC = tmp;
+ CC_DST = T0;
+}
+
+void OPPROTO glue(glue(op_shld, MEM_SUFFIX), _T0_T1_ECX_cc)(void)
+{
+ int count;
+ unsigned int res, tmp;
+ count = ECX & 0x1f;
+ if (count) {
+ T1 &= 0xffff;
+ res = T1 | (T0 << 16);
+ tmp = res >> (32 - count);
+ res <<= count;
+ if (count > 16)
+ res |= T1 << (count - 16);
+ T0 = res >> 16;
+#ifdef MEM_WRITE
+ glue(st, SUFFIX)((uint8_t *)A0, T0);
+#endif
+ CC_SRC = tmp;
+ CC_DST = T0;
+ CC_OP = CC_OP_SARB + SHIFT;
+ }
+ FORCE_RET();
+}
+
+void OPPROTO glue(glue(op_shrd, MEM_SUFFIX), _T0_T1_im_cc)(void)
+{
+ int count;
+ unsigned int res, tmp;
+
+ count = PARAM1;
+ res = (T0 & 0xffff) | (T1 << 16);
+ tmp = res >> (count - 1);
+ res >>= count;
+ if (count > 16)
+ res |= T1 << (32 - count);
+ T0 = res;
+#ifdef MEM_WRITE
+ glue(st, SUFFIX)((uint8_t *)A0, T0);
+#endif
+ CC_SRC = tmp;
+ CC_DST = T0;
+}
+
+
+void OPPROTO glue(glue(op_shrd, MEM_SUFFIX), _T0_T1_ECX_cc)(void)
+{
+ int count;
+ unsigned int res, tmp;
+
+ count = ECX & 0x1f;
+ if (count) {
+ res = (T0 & 0xffff) | (T1 << 16);
+ tmp = res >> (count - 1);
+ res >>= count;
+ if (count > 16)
+ res |= T1 << (32 - count);
+ T0 = res;
+#ifdef MEM_WRITE
+ glue(st, SUFFIX)((uint8_t *)A0, T0);
+#endif
+ CC_SRC = tmp;
+ CC_DST = T0;
+ CC_OP = CC_OP_SARB + SHIFT;
+ }
+ FORCE_RET();
+}
+#endif
+
+#if DATA_BITS == 32
+void OPPROTO glue(glue(op_shld, MEM_SUFFIX), _T0_T1_im_cc)(void)
+{
+ int count, tmp;
+ count = PARAM1;
+ T0 &= DATA_MASK;
+ T1 &= DATA_MASK;
+ tmp = T0 << (count - 1);
+ T0 = (T0 << count) | (T1 >> (DATA_BITS - count));
+#ifdef MEM_WRITE
+ glue(st, SUFFIX)((uint8_t *)A0, T0);
+#endif
+ CC_SRC = tmp;
+ CC_DST = T0;
+}
+
+void OPPROTO glue(glue(op_shld, MEM_SUFFIX), _T0_T1_ECX_cc)(void)
+{
+ int count, tmp;
+ count = ECX & 0x1f;
+ if (count) {
+ T0 &= DATA_MASK;
+ T1 &= DATA_MASK;
+ tmp = T0 << (count - 1);
+ T0 = (T0 << count) | (T1 >> (DATA_BITS - count));
+#ifdef MEM_WRITE
+ glue(st, SUFFIX)((uint8_t *)A0, T0);
+#endif
+ CC_SRC = tmp;
+ CC_DST = T0;
+ CC_OP = CC_OP_SHLB + SHIFT;
+ }
+ FORCE_RET();
+}
+
+void OPPROTO glue(glue(op_shrd, MEM_SUFFIX), _T0_T1_im_cc)(void)
+{
+ int count, tmp;
+ count = PARAM1;
+ T0 &= DATA_MASK;
+ T1 &= DATA_MASK;
+ tmp = T0 >> (count - 1);
+ T0 = (T0 >> count) | (T1 << (DATA_BITS - count));
+#ifdef MEM_WRITE
+ glue(st, SUFFIX)((uint8_t *)A0, T0);
+#endif
+ CC_SRC = tmp;
+ CC_DST = T0;
+}
+
+
+void OPPROTO glue(glue(op_shrd, MEM_SUFFIX), _T0_T1_ECX_cc)(void)
+{
+ int count, tmp;
+ count = ECX & 0x1f;
+ if (count) {
+ T0 &= DATA_MASK;
+ T1 &= DATA_MASK;
+ tmp = T0 >> (count - 1);
+ T0 = (T0 >> count) | (T1 << (DATA_BITS - count));
+#ifdef MEM_WRITE
+ glue(st, SUFFIX)((uint8_t *)A0, T0);
+#endif
+ CC_SRC = tmp;
+ CC_DST = T0;
+ CC_OP = CC_OP_SARB + SHIFT;
+ }
+ FORCE_RET();
+}
+#endif
+
+/* carry add/sub (we only need to set CC_OP differently) */
+
+void OPPROTO glue(glue(op_adc, MEM_SUFFIX), _T0_T1_cc)(void)
+{
+ int cf;
+ cf = cc_table[CC_OP].compute_c();
+ T0 = T0 + T1 + cf;
+#ifdef MEM_WRITE
+ glue(st, SUFFIX)((uint8_t *)A0, T0);
+#endif
+ CC_SRC = T1;
+ CC_DST = T0;
+ CC_OP = CC_OP_ADDB + SHIFT + cf * 3;
+}
+
+void OPPROTO glue(glue(op_sbb, MEM_SUFFIX), _T0_T1_cc)(void)
+{
+ int cf;
+ cf = cc_table[CC_OP].compute_c();
+ T0 = T0 - T1 - cf;
+#ifdef MEM_WRITE
+ glue(st, SUFFIX)((uint8_t *)A0, T0);
+#endif
+ CC_SRC = T1;
+ CC_DST = T0;
+ CC_OP = CC_OP_SUBB + SHIFT + cf * 3;
+}
+
+void OPPROTO glue(glue(op_cmpxchg, MEM_SUFFIX), _T0_T1_EAX_cc)(void)
+{
+ unsigned int src, dst;
+
+ src = T0;
+ dst = EAX - T0;
+ if ((DATA_TYPE)dst == 0) {
+ T0 = T1;
+ } else {
+ EAX = (EAX & ~DATA_MASK) | (T0 & DATA_MASK);
+ }
+#ifdef MEM_WRITE
+ glue(st, SUFFIX)((uint8_t *)A0, T0);
+#endif
+ CC_SRC = src;
+ CC_DST = dst;
+ FORCE_RET();
+}
+
+#undef MEM_SUFFIX
+#undef MEM_WRITE
--- /dev/null
+/*
+ * i386 translation
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <assert.h>
+#include <sys/mman.h>
+
+#include "cpu.h"
+#include "exec-all.h"
+#include "disas.h"
+
+/* XXX: move that elsewhere */
+static uint16_t *gen_opc_ptr;
+static uint32_t *gen_opparam_ptr;
+
+#define PREFIX_REPZ 0x01
+#define PREFIX_REPNZ 0x02
+#define PREFIX_LOCK 0x04
+#define PREFIX_DATA 0x08
+#define PREFIX_ADR 0x10
+
+typedef struct DisasContext {
+ /* current insn context */
+ int override; /* -1 if no override */
+ int prefix;
+ int aflag, dflag;
+ uint8_t *pc; /* pc = eip + cs_base */
+ int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
+ static state change (stop translation) */
+ /* current block context */
+ uint8_t *cs_base; /* base of CS segment */
+ int pe; /* protected mode */
+ int code32; /* 32 bit code segment */
+ int ss32; /* 32 bit stack segment */
+ int cc_op; /* current CC operation */
+ int addseg; /* non zero if either DS/ES/SS have a non zero base */
+ int f_st; /* currently unused */
+ int vm86; /* vm86 mode */
+ int cpl;
+ int iopl;
+ int tf; /* TF cpu flag */
+ int jmp_opt; /* use direct block chaining for direct jumps */
+ int mem_index; /* select memory access functions */
+ struct TranslationBlock *tb;
+ int popl_esp_hack; /* for correct popl with esp base handling */
+} DisasContext;
+
+static void gen_eob(DisasContext *s);
+static void gen_jmp(DisasContext *s, unsigned int eip);
+
+/* i386 arith/logic operations */
+enum {
+ OP_ADDL,
+ OP_ORL,
+ OP_ADCL,
+ OP_SBBL,
+ OP_ANDL,
+ OP_SUBL,
+ OP_XORL,
+ OP_CMPL,
+};
+
+/* i386 shift ops */
+enum {
+ OP_ROL,
+ OP_ROR,
+ OP_RCL,
+ OP_RCR,
+ OP_SHL,
+ OP_SHR,
+ OP_SHL1, /* undocumented */
+ OP_SAR = 7,
+};
+
+enum {
+#define DEF(s, n, copy_size) INDEX_op_ ## s,
+#include "opc.h"
+#undef DEF
+ NB_OPS,
+};
+
+#include "gen-op.h"
+
+/* operand size */
+enum {
+ OT_BYTE = 0,
+ OT_WORD,
+ OT_LONG,
+ OT_QUAD,
+};
+
+enum {
+ /* I386 int registers */
+ OR_EAX, /* MUST be even numbered */
+ OR_ECX,
+ OR_EDX,
+ OR_EBX,
+ OR_ESP,
+ OR_EBP,
+ OR_ESI,
+ OR_EDI,
+ OR_TMP0, /* temporary operand register */
+ OR_TMP1,
+ OR_A0, /* temporary register used when doing address evaluation */
+ OR_ZERO, /* fixed zero register */
+ NB_OREGS,
+};
+
+typedef void (GenOpFunc)(void);
+typedef void (GenOpFunc1)(long);
+typedef void (GenOpFunc2)(long, long);
+typedef void (GenOpFunc3)(long, long, long);
+
+static GenOpFunc *gen_op_mov_reg_T0[3][8] = {
+ [OT_BYTE] = {
+ gen_op_movb_EAX_T0,
+ gen_op_movb_ECX_T0,
+ gen_op_movb_EDX_T0,
+ gen_op_movb_EBX_T0,
+ gen_op_movh_EAX_T0,
+ gen_op_movh_ECX_T0,
+ gen_op_movh_EDX_T0,
+ gen_op_movh_EBX_T0,
+ },
+ [OT_WORD] = {
+ gen_op_movw_EAX_T0,
+ gen_op_movw_ECX_T0,
+ gen_op_movw_EDX_T0,
+ gen_op_movw_EBX_T0,
+ gen_op_movw_ESP_T0,
+ gen_op_movw_EBP_T0,
+ gen_op_movw_ESI_T0,
+ gen_op_movw_EDI_T0,
+ },
+ [OT_LONG] = {
+ gen_op_movl_EAX_T0,
+ gen_op_movl_ECX_T0,
+ gen_op_movl_EDX_T0,
+ gen_op_movl_EBX_T0,
+ gen_op_movl_ESP_T0,
+ gen_op_movl_EBP_T0,
+ gen_op_movl_ESI_T0,
+ gen_op_movl_EDI_T0,
+ },
+};
+
+static GenOpFunc *gen_op_mov_reg_T1[3][8] = {
+ [OT_BYTE] = {
+ gen_op_movb_EAX_T1,
+ gen_op_movb_ECX_T1,
+ gen_op_movb_EDX_T1,
+ gen_op_movb_EBX_T1,
+ gen_op_movh_EAX_T1,
+ gen_op_movh_ECX_T1,
+ gen_op_movh_EDX_T1,
+ gen_op_movh_EBX_T1,
+ },
+ [OT_WORD] = {
+ gen_op_movw_EAX_T1,
+ gen_op_movw_ECX_T1,
+ gen_op_movw_EDX_T1,
+ gen_op_movw_EBX_T1,
+ gen_op_movw_ESP_T1,
+ gen_op_movw_EBP_T1,
+ gen_op_movw_ESI_T1,
+ gen_op_movw_EDI_T1,
+ },
+ [OT_LONG] = {
+ gen_op_movl_EAX_T1,
+ gen_op_movl_ECX_T1,
+ gen_op_movl_EDX_T1,
+ gen_op_movl_EBX_T1,
+ gen_op_movl_ESP_T1,
+ gen_op_movl_EBP_T1,
+ gen_op_movl_ESI_T1,
+ gen_op_movl_EDI_T1,
+ },
+};
+
+static GenOpFunc *gen_op_mov_reg_A0[2][8] = {
+ [0] = {
+ gen_op_movw_EAX_A0,
+ gen_op_movw_ECX_A0,
+ gen_op_movw_EDX_A0,
+ gen_op_movw_EBX_A0,
+ gen_op_movw_ESP_A0,
+ gen_op_movw_EBP_A0,
+ gen_op_movw_ESI_A0,
+ gen_op_movw_EDI_A0,
+ },
+ [1] = {
+ gen_op_movl_EAX_A0,
+ gen_op_movl_ECX_A0,
+ gen_op_movl_EDX_A0,
+ gen_op_movl_EBX_A0,
+ gen_op_movl_ESP_A0,
+ gen_op_movl_EBP_A0,
+ gen_op_movl_ESI_A0,
+ gen_op_movl_EDI_A0,
+ },
+};
+
+static GenOpFunc *gen_op_mov_TN_reg[3][2][8] =
+{
+ [OT_BYTE] = {
+ {
+ gen_op_movl_T0_EAX,
+ gen_op_movl_T0_ECX,
+ gen_op_movl_T0_EDX,
+ gen_op_movl_T0_EBX,
+ gen_op_movh_T0_EAX,
+ gen_op_movh_T0_ECX,
+ gen_op_movh_T0_EDX,
+ gen_op_movh_T0_EBX,
+ },
+ {
+ gen_op_movl_T1_EAX,
+ gen_op_movl_T1_ECX,
+ gen_op_movl_T1_EDX,
+ gen_op_movl_T1_EBX,
+ gen_op_movh_T1_EAX,
+ gen_op_movh_T1_ECX,
+ gen_op_movh_T1_EDX,
+ gen_op_movh_T1_EBX,
+ },
+ },
+ [OT_WORD] = {
+ {
+ gen_op_movl_T0_EAX,
+ gen_op_movl_T0_ECX,
+ gen_op_movl_T0_EDX,
+ gen_op_movl_T0_EBX,
+ gen_op_movl_T0_ESP,
+ gen_op_movl_T0_EBP,
+ gen_op_movl_T0_ESI,
+ gen_op_movl_T0_EDI,
+ },
+ {
+ gen_op_movl_T1_EAX,
+ gen_op_movl_T1_ECX,
+ gen_op_movl_T1_EDX,
+ gen_op_movl_T1_EBX,
+ gen_op_movl_T1_ESP,
+ gen_op_movl_T1_EBP,
+ gen_op_movl_T1_ESI,
+ gen_op_movl_T1_EDI,
+ },
+ },
+ [OT_LONG] = {
+ {
+ gen_op_movl_T0_EAX,
+ gen_op_movl_T0_ECX,
+ gen_op_movl_T0_EDX,
+ gen_op_movl_T0_EBX,
+ gen_op_movl_T0_ESP,
+ gen_op_movl_T0_EBP,
+ gen_op_movl_T0_ESI,
+ gen_op_movl_T0_EDI,
+ },
+ {
+ gen_op_movl_T1_EAX,
+ gen_op_movl_T1_ECX,
+ gen_op_movl_T1_EDX,
+ gen_op_movl_T1_EBX,
+ gen_op_movl_T1_ESP,
+ gen_op_movl_T1_EBP,
+ gen_op_movl_T1_ESI,
+ gen_op_movl_T1_EDI,
+ },
+ },
+};
+
+static GenOpFunc *gen_op_movl_A0_reg[8] = {
+ gen_op_movl_A0_EAX,
+ gen_op_movl_A0_ECX,
+ gen_op_movl_A0_EDX,
+ gen_op_movl_A0_EBX,
+ gen_op_movl_A0_ESP,
+ gen_op_movl_A0_EBP,
+ gen_op_movl_A0_ESI,
+ gen_op_movl_A0_EDI,
+};
+
+static GenOpFunc *gen_op_addl_A0_reg_sN[4][8] = {
+ [0] = {
+ gen_op_addl_A0_EAX,
+ gen_op_addl_A0_ECX,
+ gen_op_addl_A0_EDX,
+ gen_op_addl_A0_EBX,
+ gen_op_addl_A0_ESP,
+ gen_op_addl_A0_EBP,
+ gen_op_addl_A0_ESI,
+ gen_op_addl_A0_EDI,
+ },
+ [1] = {
+ gen_op_addl_A0_EAX_s1,
+ gen_op_addl_A0_ECX_s1,
+ gen_op_addl_A0_EDX_s1,
+ gen_op_addl_A0_EBX_s1,
+ gen_op_addl_A0_ESP_s1,
+ gen_op_addl_A0_EBP_s1,
+ gen_op_addl_A0_ESI_s1,
+ gen_op_addl_A0_EDI_s1,
+ },
+ [2] = {
+ gen_op_addl_A0_EAX_s2,
+ gen_op_addl_A0_ECX_s2,
+ gen_op_addl_A0_EDX_s2,
+ gen_op_addl_A0_EBX_s2,
+ gen_op_addl_A0_ESP_s2,
+ gen_op_addl_A0_EBP_s2,
+ gen_op_addl_A0_ESI_s2,
+ gen_op_addl_A0_EDI_s2,
+ },
+ [3] = {
+ gen_op_addl_A0_EAX_s3,
+ gen_op_addl_A0_ECX_s3,
+ gen_op_addl_A0_EDX_s3,
+ gen_op_addl_A0_EBX_s3,
+ gen_op_addl_A0_ESP_s3,
+ gen_op_addl_A0_EBP_s3,
+ gen_op_addl_A0_ESI_s3,
+ gen_op_addl_A0_EDI_s3,
+ },
+};
+
+static GenOpFunc *gen_op_cmov_reg_T1_T0[2][8] = {
+ [0] = {
+ gen_op_cmovw_EAX_T1_T0,
+ gen_op_cmovw_ECX_T1_T0,
+ gen_op_cmovw_EDX_T1_T0,
+ gen_op_cmovw_EBX_T1_T0,
+ gen_op_cmovw_ESP_T1_T0,
+ gen_op_cmovw_EBP_T1_T0,
+ gen_op_cmovw_ESI_T1_T0,
+ gen_op_cmovw_EDI_T1_T0,
+ },
+ [1] = {
+ gen_op_cmovl_EAX_T1_T0,
+ gen_op_cmovl_ECX_T1_T0,
+ gen_op_cmovl_EDX_T1_T0,
+ gen_op_cmovl_EBX_T1_T0,
+ gen_op_cmovl_ESP_T1_T0,
+ gen_op_cmovl_EBP_T1_T0,
+ gen_op_cmovl_ESI_T1_T0,
+ gen_op_cmovl_EDI_T1_T0,
+ },
+};
+
+static GenOpFunc *gen_op_arith_T0_T1_cc[8] = {
+ NULL,
+ gen_op_orl_T0_T1,
+ NULL,
+ NULL,
+ gen_op_andl_T0_T1,
+ NULL,
+ gen_op_xorl_T0_T1,
+ NULL,
+};
+
+static GenOpFunc *gen_op_arithc_T0_T1_cc[3][2] = {
+ [OT_BYTE] = {
+ gen_op_adcb_T0_T1_cc,
+ gen_op_sbbb_T0_T1_cc,
+ },
+ [OT_WORD] = {
+ gen_op_adcw_T0_T1_cc,
+ gen_op_sbbw_T0_T1_cc,
+ },
+ [OT_LONG] = {
+ gen_op_adcl_T0_T1_cc,
+ gen_op_sbbl_T0_T1_cc,
+ },
+};
+
+static GenOpFunc *gen_op_arithc_mem_T0_T1_cc[3][2] = {
+ [OT_BYTE] = {
+ gen_op_adcb_mem_T0_T1_cc,
+ gen_op_sbbb_mem_T0_T1_cc,
+ },
+ [OT_WORD] = {
+ gen_op_adcw_mem_T0_T1_cc,
+ gen_op_sbbw_mem_T0_T1_cc,
+ },
+ [OT_LONG] = {
+ gen_op_adcl_mem_T0_T1_cc,
+ gen_op_sbbl_mem_T0_T1_cc,
+ },
+};
+
+static const int cc_op_arithb[8] = {
+ CC_OP_ADDB,
+ CC_OP_LOGICB,
+ CC_OP_ADDB,
+ CC_OP_SUBB,
+ CC_OP_LOGICB,
+ CC_OP_SUBB,
+ CC_OP_LOGICB,
+ CC_OP_SUBB,
+};
+
+static GenOpFunc *gen_op_cmpxchg_T0_T1_EAX_cc[3] = {
+ gen_op_cmpxchgb_T0_T1_EAX_cc,
+ gen_op_cmpxchgw_T0_T1_EAX_cc,
+ gen_op_cmpxchgl_T0_T1_EAX_cc,
+};
+
+static GenOpFunc *gen_op_cmpxchg_mem_T0_T1_EAX_cc[3] = {
+ gen_op_cmpxchgb_mem_T0_T1_EAX_cc,
+ gen_op_cmpxchgw_mem_T0_T1_EAX_cc,
+ gen_op_cmpxchgl_mem_T0_T1_EAX_cc,
+};
+
+static GenOpFunc *gen_op_shift_T0_T1_cc[3][8] = {
+ [OT_BYTE] = {
+ gen_op_rolb_T0_T1_cc,
+ gen_op_rorb_T0_T1_cc,
+ gen_op_rclb_T0_T1_cc,
+ gen_op_rcrb_T0_T1_cc,
+ gen_op_shlb_T0_T1_cc,
+ gen_op_shrb_T0_T1_cc,
+ gen_op_shlb_T0_T1_cc,
+ gen_op_sarb_T0_T1_cc,
+ },
+ [OT_WORD] = {
+ gen_op_rolw_T0_T1_cc,
+ gen_op_rorw_T0_T1_cc,
+ gen_op_rclw_T0_T1_cc,
+ gen_op_rcrw_T0_T1_cc,
+ gen_op_shlw_T0_T1_cc,
+ gen_op_shrw_T0_T1_cc,
+ gen_op_shlw_T0_T1_cc,
+ gen_op_sarw_T0_T1_cc,
+ },
+ [OT_LONG] = {
+ gen_op_roll_T0_T1_cc,
+ gen_op_rorl_T0_T1_cc,
+ gen_op_rcll_T0_T1_cc,
+ gen_op_rcrl_T0_T1_cc,
+ gen_op_shll_T0_T1_cc,
+ gen_op_shrl_T0_T1_cc,
+ gen_op_shll_T0_T1_cc,
+ gen_op_sarl_T0_T1_cc,
+ },
+};
+
+static GenOpFunc *gen_op_shift_mem_T0_T1_cc[3][8] = {
+ [OT_BYTE] = {
+ gen_op_rolb_mem_T0_T1_cc,
+ gen_op_rorb_mem_T0_T1_cc,
+ gen_op_rclb_mem_T0_T1_cc,
+ gen_op_rcrb_mem_T0_T1_cc,
+ gen_op_shlb_mem_T0_T1_cc,
+ gen_op_shrb_mem_T0_T1_cc,
+ gen_op_shlb_mem_T0_T1_cc,
+ gen_op_sarb_mem_T0_T1_cc,
+ },
+ [OT_WORD] = {
+ gen_op_rolw_mem_T0_T1_cc,
+ gen_op_rorw_mem_T0_T1_cc,
+ gen_op_rclw_mem_T0_T1_cc,
+ gen_op_rcrw_mem_T0_T1_cc,
+ gen_op_shlw_mem_T0_T1_cc,
+ gen_op_shrw_mem_T0_T1_cc,
+ gen_op_shlw_mem_T0_T1_cc,
+ gen_op_sarw_mem_T0_T1_cc,
+ },
+ [OT_LONG] = {
+ gen_op_roll_mem_T0_T1_cc,
+ gen_op_rorl_mem_T0_T1_cc,
+ gen_op_rcll_mem_T0_T1_cc,
+ gen_op_rcrl_mem_T0_T1_cc,
+ gen_op_shll_mem_T0_T1_cc,
+ gen_op_shrl_mem_T0_T1_cc,
+ gen_op_shll_mem_T0_T1_cc,
+ gen_op_sarl_mem_T0_T1_cc,
+ },
+};
+
+static GenOpFunc1 *gen_op_shiftd_T0_T1_im_cc[2][2] = {
+ [0] = {
+ gen_op_shldw_T0_T1_im_cc,
+ gen_op_shrdw_T0_T1_im_cc,
+ },
+ [1] = {
+ gen_op_shldl_T0_T1_im_cc,
+ gen_op_shrdl_T0_T1_im_cc,
+ },
+};
+
+static GenOpFunc *gen_op_shiftd_T0_T1_ECX_cc[2][2] = {
+ [0] = {
+ gen_op_shldw_T0_T1_ECX_cc,
+ gen_op_shrdw_T0_T1_ECX_cc,
+ },
+ [1] = {
+ gen_op_shldl_T0_T1_ECX_cc,
+ gen_op_shrdl_T0_T1_ECX_cc,
+ },
+};
+
+static GenOpFunc1 *gen_op_shiftd_mem_T0_T1_im_cc[2][2] = {
+ [0] = {
+ gen_op_shldw_mem_T0_T1_im_cc,
+ gen_op_shrdw_mem_T0_T1_im_cc,
+ },
+ [1] = {
+ gen_op_shldl_mem_T0_T1_im_cc,
+ gen_op_shrdl_mem_T0_T1_im_cc,
+ },
+};
+
+static GenOpFunc *gen_op_shiftd_mem_T0_T1_ECX_cc[2][2] = {
+ [0] = {
+ gen_op_shldw_mem_T0_T1_ECX_cc,
+ gen_op_shrdw_mem_T0_T1_ECX_cc,
+ },
+ [1] = {
+ gen_op_shldl_mem_T0_T1_ECX_cc,
+ gen_op_shrdl_mem_T0_T1_ECX_cc,
+ },
+};
+
+static GenOpFunc *gen_op_btx_T0_T1_cc[2][4] = {
+ [0] = {
+ gen_op_btw_T0_T1_cc,
+ gen_op_btsw_T0_T1_cc,
+ gen_op_btrw_T0_T1_cc,
+ gen_op_btcw_T0_T1_cc,
+ },
+ [1] = {
+ gen_op_btl_T0_T1_cc,
+ gen_op_btsl_T0_T1_cc,
+ gen_op_btrl_T0_T1_cc,
+ gen_op_btcl_T0_T1_cc,
+ },
+};
+
+static GenOpFunc *gen_op_bsx_T0_cc[2][2] = {
+ [0] = {
+ gen_op_bsfw_T0_cc,
+ gen_op_bsrw_T0_cc,
+ },
+ [1] = {
+ gen_op_bsfl_T0_cc,
+ gen_op_bsrl_T0_cc,
+ },
+};
+
+static GenOpFunc *gen_op_lds_T0_A0[3 * 3] = {
+ gen_op_ldsb_T0_A0,
+ gen_op_ldsw_T0_A0,
+ NULL,
+
+ gen_op_ldsb_kernel_T0_A0,
+ gen_op_ldsw_kernel_T0_A0,
+ NULL,
+
+ gen_op_ldsb_user_T0_A0,
+ gen_op_ldsw_user_T0_A0,
+ NULL,
+};
+
+static GenOpFunc *gen_op_ldu_T0_A0[3 * 3] = {
+ gen_op_ldub_T0_A0,
+ gen_op_lduw_T0_A0,
+ NULL,
+
+ gen_op_ldub_kernel_T0_A0,
+ gen_op_lduw_kernel_T0_A0,
+ NULL,
+
+ gen_op_ldub_user_T0_A0,
+ gen_op_lduw_user_T0_A0,
+ NULL,
+};
+
+/* sign does not matter, except for lidt/lgdt call (TODO: fix it) */
+static GenOpFunc *gen_op_ld_T0_A0[3 * 3] = {
+ gen_op_ldub_T0_A0,
+ gen_op_lduw_T0_A0,
+ gen_op_ldl_T0_A0,
+
+ gen_op_ldub_kernel_T0_A0,
+ gen_op_lduw_kernel_T0_A0,
+ gen_op_ldl_kernel_T0_A0,
+
+ gen_op_ldub_user_T0_A0,
+ gen_op_lduw_user_T0_A0,
+ gen_op_ldl_user_T0_A0,
+};
+
+static GenOpFunc *gen_op_ld_T1_A0[3 * 3] = {
+ gen_op_ldub_T1_A0,
+ gen_op_lduw_T1_A0,
+ gen_op_ldl_T1_A0,
+
+ gen_op_ldub_kernel_T1_A0,
+ gen_op_lduw_kernel_T1_A0,
+ gen_op_ldl_kernel_T1_A0,
+
+ gen_op_ldub_user_T1_A0,
+ gen_op_lduw_user_T1_A0,
+ gen_op_ldl_user_T1_A0,
+};
+
+static GenOpFunc *gen_op_st_T0_A0[3 * 3] = {
+ gen_op_stb_T0_A0,
+ gen_op_stw_T0_A0,
+ gen_op_stl_T0_A0,
+
+ gen_op_stb_kernel_T0_A0,
+ gen_op_stw_kernel_T0_A0,
+ gen_op_stl_kernel_T0_A0,
+
+ gen_op_stb_user_T0_A0,
+ gen_op_stw_user_T0_A0,
+ gen_op_stl_user_T0_A0,
+};
+
+static inline void gen_string_movl_A0_ESI(DisasContext *s)
+{
+ int override;
+
+ override = s->override;
+ if (s->aflag) {
+ /* 32 bit address */
+ if (s->addseg && override < 0)
+ override = R_DS;
+ if (override >= 0) {
+ gen_op_movl_A0_seg(offsetof(CPUX86State,segs[override].base));
+ gen_op_addl_A0_reg_sN[0][R_ESI]();
+ } else {
+ gen_op_movl_A0_reg[R_ESI]();
+ }
+ } else {
+ /* 16 address, always override */
+ if (override < 0)
+ override = R_DS;
+ gen_op_movl_A0_reg[R_ESI]();
+ gen_op_andl_A0_ffff();
+ gen_op_addl_A0_seg(offsetof(CPUX86State,segs[override].base));
+ }
+}
+
+static inline void gen_string_movl_A0_EDI(DisasContext *s)
+{
+ if (s->aflag) {
+ if (s->addseg) {
+ gen_op_movl_A0_seg(offsetof(CPUX86State,segs[R_ES].base));
+ gen_op_addl_A0_reg_sN[0][R_EDI]();
+ } else {
+ gen_op_movl_A0_reg[R_EDI]();
+ }
+ } else {
+ gen_op_movl_A0_reg[R_EDI]();
+ gen_op_andl_A0_ffff();
+ gen_op_addl_A0_seg(offsetof(CPUX86State,segs[R_ES].base));
+ }
+}
+
+static GenOpFunc *gen_op_movl_T0_Dshift[3] = {
+ gen_op_movl_T0_Dshiftb,
+ gen_op_movl_T0_Dshiftw,
+ gen_op_movl_T0_Dshiftl,
+};
+
+static GenOpFunc2 *gen_op_jz_ecx[2] = {
+ gen_op_jz_ecxw,
+ gen_op_jz_ecxl,
+};
+
+static GenOpFunc1 *gen_op_jz_ecx_im[2] = {
+ gen_op_jz_ecxw_im,
+ gen_op_jz_ecxl_im,
+};
+
+static GenOpFunc *gen_op_dec_ECX[2] = {
+ gen_op_decw_ECX,
+ gen_op_decl_ECX,
+};
+
+static GenOpFunc1 *gen_op_string_jnz_sub[2][3] = {
+ {
+ gen_op_string_jnz_subb,
+ gen_op_string_jnz_subw,
+ gen_op_string_jnz_subl,
+ },
+ {
+ gen_op_string_jz_subb,
+ gen_op_string_jz_subw,
+ gen_op_string_jz_subl,
+ },
+};
+
+static GenOpFunc1 *gen_op_string_jnz_sub_im[2][3] = {
+ {
+ gen_op_string_jnz_subb_im,
+ gen_op_string_jnz_subw_im,
+ gen_op_string_jnz_subl_im,
+ },
+ {
+ gen_op_string_jz_subb_im,
+ gen_op_string_jz_subw_im,
+ gen_op_string_jz_subl_im,
+ },
+};
+
+static GenOpFunc *gen_op_in_DX_T0[3] = {
+ gen_op_inb_DX_T0,
+ gen_op_inw_DX_T0,
+ gen_op_inl_DX_T0,
+};
+
+static GenOpFunc *gen_op_out_DX_T0[3] = {
+ gen_op_outb_DX_T0,
+ gen_op_outw_DX_T0,
+ gen_op_outl_DX_T0,
+};
+
+static inline void gen_movs(DisasContext *s, int ot)
+{
+ gen_string_movl_A0_ESI(s);
+ gen_op_ld_T0_A0[ot + s->mem_index]();
+ gen_string_movl_A0_EDI(s);
+ gen_op_st_T0_A0[ot + s->mem_index]();
+ gen_op_movl_T0_Dshift[ot]();
+ if (s->aflag) {
+ gen_op_addl_ESI_T0();
+ gen_op_addl_EDI_T0();
+ } else {
+ gen_op_addw_ESI_T0();
+ gen_op_addw_EDI_T0();
+ }
+}
+
+static inline void gen_update_cc_op(DisasContext *s)
+{
+ if (s->cc_op != CC_OP_DYNAMIC) {
+ gen_op_set_cc_op(s->cc_op);
+ s->cc_op = CC_OP_DYNAMIC;
+ }
+}
+
+static inline void gen_jz_ecx_string(DisasContext *s, unsigned int next_eip)
+{
+ if (s->jmp_opt) {
+ gen_op_jz_ecx[s->aflag]((long)s->tb, next_eip);
+ } else {
+ /* XXX: does not work with gdbstub "ice" single step - not a
+ serious problem */
+ gen_op_jz_ecx_im[s->aflag](next_eip);
+ }
+}
+
+static inline void gen_stos(DisasContext *s, int ot)
+{
+ gen_op_mov_TN_reg[OT_LONG][0][R_EAX]();
+ gen_string_movl_A0_EDI(s);
+ gen_op_st_T0_A0[ot + s->mem_index]();
+ gen_op_movl_T0_Dshift[ot]();
+ if (s->aflag) {
+ gen_op_addl_EDI_T0();
+ } else {
+ gen_op_addw_EDI_T0();
+ }
+}
+
+static inline void gen_lods(DisasContext *s, int ot)
+{
+ gen_string_movl_A0_ESI(s);
+ gen_op_ld_T0_A0[ot + s->mem_index]();
+ gen_op_mov_reg_T0[ot][R_EAX]();
+ gen_op_movl_T0_Dshift[ot]();
+ if (s->aflag) {
+ gen_op_addl_ESI_T0();
+ } else {
+ gen_op_addw_ESI_T0();
+ }
+}
+
+static inline void gen_scas(DisasContext *s, int ot)
+{
+ gen_op_mov_TN_reg[OT_LONG][0][R_EAX]();
+ gen_string_movl_A0_EDI(s);
+ gen_op_ld_T1_A0[ot + s->mem_index]();
+ gen_op_cmpl_T0_T1_cc();
+ gen_op_movl_T0_Dshift[ot]();
+ if (s->aflag) {
+ gen_op_addl_EDI_T0();
+ } else {
+ gen_op_addw_EDI_T0();
+ }
+}
+
+static inline void gen_cmps(DisasContext *s, int ot)
+{
+ gen_string_movl_A0_ESI(s);
+ gen_op_ld_T0_A0[ot + s->mem_index]();
+ gen_string_movl_A0_EDI(s);
+ gen_op_ld_T1_A0[ot + s->mem_index]();
+ gen_op_cmpl_T0_T1_cc();
+ gen_op_movl_T0_Dshift[ot]();
+ if (s->aflag) {
+ gen_op_addl_ESI_T0();
+ gen_op_addl_EDI_T0();
+ } else {
+ gen_op_addw_ESI_T0();
+ gen_op_addw_EDI_T0();
+ }
+}
+
+static inline void gen_ins(DisasContext *s, int ot)
+{
+ gen_op_in_DX_T0[ot]();
+ gen_string_movl_A0_EDI(s);
+ gen_op_st_T0_A0[ot + s->mem_index]();
+ gen_op_movl_T0_Dshift[ot]();
+ if (s->aflag) {
+ gen_op_addl_EDI_T0();
+ } else {
+ gen_op_addw_EDI_T0();
+ }
+}
+
+static inline void gen_outs(DisasContext *s, int ot)
+{
+ gen_string_movl_A0_ESI(s);
+ gen_op_ld_T0_A0[ot + s->mem_index]();
+ gen_op_out_DX_T0[ot]();
+ gen_op_movl_T0_Dshift[ot]();
+ if (s->aflag) {
+ gen_op_addl_ESI_T0();
+ } else {
+ gen_op_addw_ESI_T0();
+ }
+}
+
+/* same method as Valgrind : we generate jumps to current or next
+ instruction */
+#define GEN_REPZ(op) \
+static inline void gen_repz_ ## op(DisasContext *s, int ot, \
+ unsigned int cur_eip, unsigned int next_eip) \
+{ \
+ gen_update_cc_op(s); \
+ gen_jz_ecx_string(s, next_eip); \
+ gen_ ## op(s, ot); \
+ gen_op_dec_ECX[s->aflag](); \
+ /* a loop would cause two single step exceptions if ECX = 1 \
+ before rep string_insn */ \
+ if (!s->jmp_opt) \
+ gen_op_jz_ecx_im[s->aflag](next_eip); \
+ gen_jmp(s, cur_eip); \
+}
+
+#define GEN_REPZ2(op) \
+static inline void gen_repz_ ## op(DisasContext *s, int ot, \
+ unsigned int cur_eip, \
+ unsigned int next_eip, \
+ int nz) \
+{ \
+ gen_update_cc_op(s); \
+ gen_jz_ecx_string(s, next_eip); \
+ gen_ ## op(s, ot); \
+ gen_op_dec_ECX[s->aflag](); \
+ gen_op_set_cc_op(CC_OP_SUBB + ot); \
+ if (!s->jmp_opt) \
+ gen_op_string_jnz_sub_im[nz][ot](next_eip); \
+ else \
+ gen_op_string_jnz_sub[nz][ot]((long)s->tb); \
+ if (!s->jmp_opt) \
+ gen_op_jz_ecx_im[s->aflag](next_eip); \
+ gen_jmp(s, cur_eip); \
+}
+
+GEN_REPZ(movs)
+GEN_REPZ(stos)
+GEN_REPZ(lods)
+GEN_REPZ(ins)
+GEN_REPZ(outs)
+GEN_REPZ2(scas)
+GEN_REPZ2(cmps)
+
+static GenOpFunc *gen_op_in[3] = {
+ gen_op_inb_T0_T1,
+ gen_op_inw_T0_T1,
+ gen_op_inl_T0_T1,
+};
+
+static GenOpFunc *gen_op_out[3] = {
+ gen_op_outb_T0_T1,
+ gen_op_outw_T0_T1,
+ gen_op_outl_T0_T1,
+};
+
+enum {
+ JCC_O,
+ JCC_B,
+ JCC_Z,
+ JCC_BE,
+ JCC_S,
+ JCC_P,
+ JCC_L,
+ JCC_LE,
+};
+
+static GenOpFunc3 *gen_jcc_sub[3][8] = {
+ [OT_BYTE] = {
+ NULL,
+ gen_op_jb_subb,
+ gen_op_jz_subb,
+ gen_op_jbe_subb,
+ gen_op_js_subb,
+ NULL,
+ gen_op_jl_subb,
+ gen_op_jle_subb,
+ },
+ [OT_WORD] = {
+ NULL,
+ gen_op_jb_subw,
+ gen_op_jz_subw,
+ gen_op_jbe_subw,
+ gen_op_js_subw,
+ NULL,
+ gen_op_jl_subw,
+ gen_op_jle_subw,
+ },
+ [OT_LONG] = {
+ NULL,
+ gen_op_jb_subl,
+ gen_op_jz_subl,
+ gen_op_jbe_subl,
+ gen_op_js_subl,
+ NULL,
+ gen_op_jl_subl,
+ gen_op_jle_subl,
+ },
+};
+static GenOpFunc2 *gen_op_loop[2][4] = {
+ [0] = {
+ gen_op_loopnzw,
+ gen_op_loopzw,
+ gen_op_loopw,
+ gen_op_jecxzw,
+ },
+ [1] = {
+ gen_op_loopnzl,
+ gen_op_loopzl,
+ gen_op_loopl,
+ gen_op_jecxzl,
+ },
+};
+
+static GenOpFunc *gen_setcc_slow[8] = {
+ gen_op_seto_T0_cc,
+ gen_op_setb_T0_cc,
+ gen_op_setz_T0_cc,
+ gen_op_setbe_T0_cc,
+ gen_op_sets_T0_cc,
+ gen_op_setp_T0_cc,
+ gen_op_setl_T0_cc,
+ gen_op_setle_T0_cc,
+};
+
+static GenOpFunc *gen_setcc_sub[3][8] = {
+ [OT_BYTE] = {
+ NULL,
+ gen_op_setb_T0_subb,
+ gen_op_setz_T0_subb,
+ gen_op_setbe_T0_subb,
+ gen_op_sets_T0_subb,
+ NULL,
+ gen_op_setl_T0_subb,
+ gen_op_setle_T0_subb,
+ },
+ [OT_WORD] = {
+ NULL,
+ gen_op_setb_T0_subw,
+ gen_op_setz_T0_subw,
+ gen_op_setbe_T0_subw,
+ gen_op_sets_T0_subw,
+ NULL,
+ gen_op_setl_T0_subw,
+ gen_op_setle_T0_subw,
+ },
+ [OT_LONG] = {
+ NULL,
+ gen_op_setb_T0_subl,
+ gen_op_setz_T0_subl,
+ gen_op_setbe_T0_subl,
+ gen_op_sets_T0_subl,
+ NULL,
+ gen_op_setl_T0_subl,
+ gen_op_setle_T0_subl,
+ },
+};
+
+static GenOpFunc *gen_op_fp_arith_ST0_FT0[8] = {
+ gen_op_fadd_ST0_FT0,
+ gen_op_fmul_ST0_FT0,
+ gen_op_fcom_ST0_FT0,
+ gen_op_fcom_ST0_FT0,
+ gen_op_fsub_ST0_FT0,
+ gen_op_fsubr_ST0_FT0,
+ gen_op_fdiv_ST0_FT0,
+ gen_op_fdivr_ST0_FT0,
+};
+
+/* NOTE the exception in "r" op ordering */
+static GenOpFunc1 *gen_op_fp_arith_STN_ST0[8] = {
+ gen_op_fadd_STN_ST0,
+ gen_op_fmul_STN_ST0,
+ NULL,
+ NULL,
+ gen_op_fsubr_STN_ST0,
+ gen_op_fsub_STN_ST0,
+ gen_op_fdivr_STN_ST0,
+ gen_op_fdiv_STN_ST0,
+};
+
+/* if d == OR_TMP0, it means memory operand (address in A0) */
+static void gen_op(DisasContext *s1, int op, int ot, int d)
+{
+ GenOpFunc *gen_update_cc;
+
+ if (d != OR_TMP0) {
+ gen_op_mov_TN_reg[ot][0][d]();
+ } else {
+ gen_op_ld_T0_A0[ot + s1->mem_index]();
+ }
+ switch(op) {
+ case OP_ADCL:
+ case OP_SBBL:
+ if (s1->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s1->cc_op);
+ if (d != OR_TMP0) {
+ gen_op_arithc_T0_T1_cc[ot][op - OP_ADCL]();
+ gen_op_mov_reg_T0[ot][d]();
+ } else {
+ gen_op_arithc_mem_T0_T1_cc[ot][op - OP_ADCL]();
+ }
+ s1->cc_op = CC_OP_DYNAMIC;
+ goto the_end;
+ case OP_ADDL:
+ gen_op_addl_T0_T1();
+ s1->cc_op = CC_OP_ADDB + ot;
+ gen_update_cc = gen_op_update2_cc;
+ break;
+ case OP_SUBL:
+ gen_op_subl_T0_T1();
+ s1->cc_op = CC_OP_SUBB + ot;
+ gen_update_cc = gen_op_update2_cc;
+ break;
+ default:
+ case OP_ANDL:
+ case OP_ORL:
+ case OP_XORL:
+ gen_op_arith_T0_T1_cc[op]();
+ s1->cc_op = CC_OP_LOGICB + ot;
+ gen_update_cc = gen_op_update1_cc;
+ break;
+ case OP_CMPL:
+ gen_op_cmpl_T0_T1_cc();
+ s1->cc_op = CC_OP_SUBB + ot;
+ gen_update_cc = NULL;
+ break;
+ }
+ if (op != OP_CMPL) {
+ if (d != OR_TMP0)
+ gen_op_mov_reg_T0[ot][d]();
+ else
+ gen_op_st_T0_A0[ot + s1->mem_index]();
+ }
+ /* the flags update must happen after the memory write (precise
+ exception support) */
+ if (gen_update_cc)
+ gen_update_cc();
+ the_end: ;
+}
+
+/* if d == OR_TMP0, it means memory operand (address in A0) */
+static void gen_inc(DisasContext *s1, int ot, int d, int c)
+{
+ if (d != OR_TMP0)
+ gen_op_mov_TN_reg[ot][0][d]();
+ else
+ gen_op_ld_T0_A0[ot + s1->mem_index]();
+ if (s1->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s1->cc_op);
+ if (c > 0) {
+ gen_op_incl_T0();
+ s1->cc_op = CC_OP_INCB + ot;
+ } else {
+ gen_op_decl_T0();
+ s1->cc_op = CC_OP_DECB + ot;
+ }
+ if (d != OR_TMP0)
+ gen_op_mov_reg_T0[ot][d]();
+ else
+ gen_op_st_T0_A0[ot + s1->mem_index]();
+ gen_op_update_inc_cc();
+}
+
+static void gen_shift(DisasContext *s1, int op, int ot, int d, int s)
+{
+ if (d != OR_TMP0)
+ gen_op_mov_TN_reg[ot][0][d]();
+ else
+ gen_op_ld_T0_A0[ot + s1->mem_index]();
+ if (s != OR_TMP1)
+ gen_op_mov_TN_reg[ot][1][s]();
+ /* for zero counts, flags are not updated, so must do it dynamically */
+ if (s1->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s1->cc_op);
+
+ if (d != OR_TMP0)
+ gen_op_shift_T0_T1_cc[ot][op]();
+ else
+ gen_op_shift_mem_T0_T1_cc[ot][op]();
+ if (d != OR_TMP0)
+ gen_op_mov_reg_T0[ot][d]();
+ s1->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
+}
+
+static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c)
+{
+ /* currently not optimized */
+ gen_op_movl_T1_im(c);
+ gen_shift(s1, op, ot, d, OR_TMP1);
+}
+
+static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ptr)
+{
+ int havesib;
+ int base, disp;
+ int index;
+ int scale;
+ int opreg;
+ int mod, rm, code, override, must_add_seg;
+
+ override = s->override;
+ must_add_seg = s->addseg;
+ if (override >= 0)
+ must_add_seg = 1;
+ mod = (modrm >> 6) & 3;
+ rm = modrm & 7;
+
+ if (s->aflag) {
+
+ havesib = 0;
+ base = rm;
+ index = 0;
+ scale = 0;
+
+ if (base == 4) {
+ havesib = 1;
+ code = ldub(s->pc++);
+ scale = (code >> 6) & 3;
+ index = (code >> 3) & 7;
+ base = code & 7;
+ }
+
+ switch (mod) {
+ case 0:
+ if (base == 5) {
+ base = -1;
+ disp = ldl(s->pc);
+ s->pc += 4;
+ } else {
+ disp = 0;
+ }
+ break;
+ case 1:
+ disp = (int8_t)ldub(s->pc++);
+ break;
+ default:
+ case 2:
+ disp = ldl(s->pc);
+ s->pc += 4;
+ break;
+ }
+
+ if (base >= 0) {
+ /* for correct popl handling with esp */
+ if (base == 4 && s->popl_esp_hack)
+ disp += s->popl_esp_hack;
+ gen_op_movl_A0_reg[base]();
+ if (disp != 0)
+ gen_op_addl_A0_im(disp);
+ } else {
+ gen_op_movl_A0_im(disp);
+ }
+ /* XXX: index == 4 is always invalid */
+ if (havesib && (index != 4 || scale != 0)) {
+ gen_op_addl_A0_reg_sN[scale][index]();
+ }
+ if (must_add_seg) {
+ if (override < 0) {
+ if (base == R_EBP || base == R_ESP)
+ override = R_SS;
+ else
+ override = R_DS;
+ }
+ gen_op_addl_A0_seg(offsetof(CPUX86State,segs[override].base));
+ }
+ } else {
+ switch (mod) {
+ case 0:
+ if (rm == 6) {
+ disp = lduw(s->pc);
+ s->pc += 2;
+ gen_op_movl_A0_im(disp);
+ rm = 0; /* avoid SS override */
+ goto no_rm;
+ } else {
+ disp = 0;
+ }
+ break;
+ case 1:
+ disp = (int8_t)ldub(s->pc++);
+ break;
+ default:
+ case 2:
+ disp = lduw(s->pc);
+ s->pc += 2;
+ break;
+ }
+ switch(rm) {
+ case 0:
+ gen_op_movl_A0_reg[R_EBX]();
+ gen_op_addl_A0_reg_sN[0][R_ESI]();
+ break;
+ case 1:
+ gen_op_movl_A0_reg[R_EBX]();
+ gen_op_addl_A0_reg_sN[0][R_EDI]();
+ break;
+ case 2:
+ gen_op_movl_A0_reg[R_EBP]();
+ gen_op_addl_A0_reg_sN[0][R_ESI]();
+ break;
+ case 3:
+ gen_op_movl_A0_reg[R_EBP]();
+ gen_op_addl_A0_reg_sN[0][R_EDI]();
+ break;
+ case 4:
+ gen_op_movl_A0_reg[R_ESI]();
+ break;
+ case 5:
+ gen_op_movl_A0_reg[R_EDI]();
+ break;
+ case 6:
+ gen_op_movl_A0_reg[R_EBP]();
+ break;
+ default:
+ case 7:
+ gen_op_movl_A0_reg[R_EBX]();
+ break;
+ }
+ if (disp != 0)
+ gen_op_addl_A0_im(disp);
+ gen_op_andl_A0_ffff();
+ no_rm:
+ if (must_add_seg) {
+ if (override < 0) {
+ if (rm == 2 || rm == 3 || rm == 6)
+ override = R_SS;
+ else
+ override = R_DS;
+ }
+ gen_op_addl_A0_seg(offsetof(CPUX86State,segs[override].base));
+ }
+ }
+
+ opreg = OR_A0;
+ disp = 0;
+ *reg_ptr = opreg;
+ *offset_ptr = disp;
+}
+
+/* generate modrm memory load or store of 'reg'. TMP0 is used if reg !=
+ OR_TMP0 */
+static void gen_ldst_modrm(DisasContext *s, int modrm, int ot, int reg, int is_store)
+{
+ int mod, rm, opreg, disp;
+
+ mod = (modrm >> 6) & 3;
+ rm = modrm & 7;
+ if (mod == 3) {
+ if (is_store) {
+ if (reg != OR_TMP0)
+ gen_op_mov_TN_reg[ot][0][reg]();
+ gen_op_mov_reg_T0[ot][rm]();
+ } else {
+ gen_op_mov_TN_reg[ot][0][rm]();
+ if (reg != OR_TMP0)
+ gen_op_mov_reg_T0[ot][reg]();
+ }
+ } else {
+ gen_lea_modrm(s, modrm, &opreg, &disp);
+ if (is_store) {
+ if (reg != OR_TMP0)
+ gen_op_mov_TN_reg[ot][0][reg]();
+ gen_op_st_T0_A0[ot + s->mem_index]();
+ } else {
+ gen_op_ld_T0_A0[ot + s->mem_index]();
+ if (reg != OR_TMP0)
+ gen_op_mov_reg_T0[ot][reg]();
+ }
+ }
+}
+
+static inline uint32_t insn_get(DisasContext *s, int ot)
+{
+ uint32_t ret;
+
+ switch(ot) {
+ case OT_BYTE:
+ ret = ldub(s->pc);
+ s->pc++;
+ break;
+ case OT_WORD:
+ ret = lduw(s->pc);
+ s->pc += 2;
+ break;
+ default:
+ case OT_LONG:
+ ret = ldl(s->pc);
+ s->pc += 4;
+ break;
+ }
+ return ret;
+}
+
+static inline void gen_jcc(DisasContext *s, int b, int val, int next_eip)
+{
+ TranslationBlock *tb;
+ int inv, jcc_op;
+ GenOpFunc3 *func;
+
+ inv = b & 1;
+ jcc_op = (b >> 1) & 7;
+
+ if (s->jmp_opt) {
+ switch(s->cc_op) {
+ /* we optimize the cmp/jcc case */
+ case CC_OP_SUBB:
+ case CC_OP_SUBW:
+ case CC_OP_SUBL:
+ func = gen_jcc_sub[s->cc_op - CC_OP_SUBB][jcc_op];
+ break;
+
+ /* some jumps are easy to compute */
+ case CC_OP_ADDB:
+ case CC_OP_ADDW:
+ case CC_OP_ADDL:
+ case CC_OP_ADCB:
+ case CC_OP_ADCW:
+ case CC_OP_ADCL:
+ case CC_OP_SBBB:
+ case CC_OP_SBBW:
+ case CC_OP_SBBL:
+ case CC_OP_LOGICB:
+ case CC_OP_LOGICW:
+ case CC_OP_LOGICL:
+ case CC_OP_INCB:
+ case CC_OP_INCW:
+ case CC_OP_INCL:
+ case CC_OP_DECB:
+ case CC_OP_DECW:
+ case CC_OP_DECL:
+ case CC_OP_SHLB:
+ case CC_OP_SHLW:
+ case CC_OP_SHLL:
+ case CC_OP_SARB:
+ case CC_OP_SARW:
+ case CC_OP_SARL:
+ switch(jcc_op) {
+ case JCC_Z:
+ func = gen_jcc_sub[(s->cc_op - CC_OP_ADDB) % 3][jcc_op];
+ break;
+ case JCC_S:
+ func = gen_jcc_sub[(s->cc_op - CC_OP_ADDB) % 3][jcc_op];
+ break;
+ default:
+ func = NULL;
+ break;
+ }
+ break;
+ default:
+ func = NULL;
+ break;
+ }
+
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+
+ if (!func) {
+ gen_setcc_slow[jcc_op]();
+ func = gen_op_jcc;
+ }
+
+ tb = s->tb;
+ if (!inv) {
+ func((long)tb, val, next_eip);
+ } else {
+ func((long)tb, next_eip, val);
+ }
+ s->is_jmp = 3;
+ } else {
+ if (s->cc_op != CC_OP_DYNAMIC) {
+ gen_op_set_cc_op(s->cc_op);
+ s->cc_op = CC_OP_DYNAMIC;
+ }
+ gen_setcc_slow[jcc_op]();
+ if (!inv) {
+ gen_op_jcc_im(val, next_eip);
+ } else {
+ gen_op_jcc_im(next_eip, val);
+ }
+ gen_eob(s);
+ }
+}
+
+static void gen_setcc(DisasContext *s, int b)
+{
+ int inv, jcc_op;
+ GenOpFunc *func;
+
+ inv = b & 1;
+ jcc_op = (b >> 1) & 7;
+ switch(s->cc_op) {
+ /* we optimize the cmp/jcc case */
+ case CC_OP_SUBB:
+ case CC_OP_SUBW:
+ case CC_OP_SUBL:
+ func = gen_setcc_sub[s->cc_op - CC_OP_SUBB][jcc_op];
+ if (!func)
+ goto slow_jcc;
+ break;
+
+ /* some jumps are easy to compute */
+ case CC_OP_ADDB:
+ case CC_OP_ADDW:
+ case CC_OP_ADDL:
+ case CC_OP_LOGICB:
+ case CC_OP_LOGICW:
+ case CC_OP_LOGICL:
+ case CC_OP_INCB:
+ case CC_OP_INCW:
+ case CC_OP_INCL:
+ case CC_OP_DECB:
+ case CC_OP_DECW:
+ case CC_OP_DECL:
+ case CC_OP_SHLB:
+ case CC_OP_SHLW:
+ case CC_OP_SHLL:
+ switch(jcc_op) {
+ case JCC_Z:
+ func = gen_setcc_sub[(s->cc_op - CC_OP_ADDB) % 3][jcc_op];
+ break;
+ case JCC_S:
+ func = gen_setcc_sub[(s->cc_op - CC_OP_ADDB) % 3][jcc_op];
+ break;
+ default:
+ goto slow_jcc;
+ }
+ break;
+ default:
+ slow_jcc:
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ func = gen_setcc_slow[jcc_op];
+ break;
+ }
+ func();
+ if (inv) {
+ gen_op_xor_T0_1();
+ }
+}
+
+/* move T0 to seg_reg and compute if the CPU state may change. Never
+ call this function with seg_reg == R_CS */
+static void gen_movl_seg_T0(DisasContext *s, int seg_reg, unsigned int cur_eip)
+{
+ if (s->pe && !s->vm86)
+ gen_op_movl_seg_T0(seg_reg, cur_eip);
+ else
+ gen_op_movl_seg_T0_vm(offsetof(CPUX86State,segs[seg_reg]));
+ /* abort translation because the register may have a non zero base
+ or because ss32 may change. For R_SS, translation must always
+ stop as a special handling must be done to disable hardware
+ interrupts for the next instruction */
+ if (seg_reg == R_SS || (!s->addseg && seg_reg < R_FS))
+ s->is_jmp = 3;
+}
+
+/* generate a push. It depends on ss32, addseg and dflag */
+static void gen_push_T0(DisasContext *s)
+{
+ if (s->ss32) {
+ if (!s->addseg) {
+ if (s->dflag)
+ gen_op_pushl_T0();
+ else
+ gen_op_pushw_T0();
+ } else {
+ if (s->dflag)
+ gen_op_pushl_ss32_T0();
+ else
+ gen_op_pushw_ss32_T0();
+ }
+ } else {
+ if (s->dflag)
+ gen_op_pushl_ss16_T0();
+ else
+ gen_op_pushw_ss16_T0();
+ }
+}
+
+/* two step pop is necessary for precise exceptions */
+static void gen_pop_T0(DisasContext *s)
+{
+ if (s->ss32) {
+ if (!s->addseg) {
+ if (s->dflag)
+ gen_op_popl_T0();
+ else
+ gen_op_popw_T0();
+ } else {
+ if (s->dflag)
+ gen_op_popl_ss32_T0();
+ else
+ gen_op_popw_ss32_T0();
+ }
+ } else {
+ if (s->dflag)
+ gen_op_popl_ss16_T0();
+ else
+ gen_op_popw_ss16_T0();
+ }
+}
+
+static inline void gen_stack_update(DisasContext *s, int addend)
+{
+ if (s->ss32) {
+ if (addend == 2)
+ gen_op_addl_ESP_2();
+ else if (addend == 4)
+ gen_op_addl_ESP_4();
+ else
+ gen_op_addl_ESP_im(addend);
+ } else {
+ if (addend == 2)
+ gen_op_addw_ESP_2();
+ else if (addend == 4)
+ gen_op_addw_ESP_4();
+ else
+ gen_op_addw_ESP_im(addend);
+ }
+}
+
+static void gen_pop_update(DisasContext *s)
+{
+ gen_stack_update(s, 2 << s->dflag);
+}
+
+static void gen_stack_A0(DisasContext *s)
+{
+ gen_op_movl_A0_ESP();
+ if (!s->ss32)
+ gen_op_andl_A0_ffff();
+ gen_op_movl_T1_A0();
+ if (s->addseg)
+ gen_op_addl_A0_seg(offsetof(CPUX86State,segs[R_SS].base));
+}
+
+/* NOTE: wrap around in 16 bit not fully handled */
+static void gen_pusha(DisasContext *s)
+{
+ int i;
+ gen_op_movl_A0_ESP();
+ gen_op_addl_A0_im(-16 << s->dflag);
+ if (!s->ss32)
+ gen_op_andl_A0_ffff();
+ gen_op_movl_T1_A0();
+ if (s->addseg)
+ gen_op_addl_A0_seg(offsetof(CPUX86State,segs[R_SS].base));
+ for(i = 0;i < 8; i++) {
+ gen_op_mov_TN_reg[OT_LONG][0][7 - i]();
+ gen_op_st_T0_A0[OT_WORD + s->dflag + s->mem_index]();
+ gen_op_addl_A0_im(2 << s->dflag);
+ }
+ gen_op_mov_reg_T1[OT_WORD + s->dflag][R_ESP]();
+}
+
+/* NOTE: wrap around in 16 bit not fully handled */
+static void gen_popa(DisasContext *s)
+{
+ int i;
+ gen_op_movl_A0_ESP();
+ if (!s->ss32)
+ gen_op_andl_A0_ffff();
+ gen_op_movl_T1_A0();
+ gen_op_addl_T1_im(16 << s->dflag);
+ if (s->addseg)
+ gen_op_addl_A0_seg(offsetof(CPUX86State,segs[R_SS].base));
+ for(i = 0;i < 8; i++) {
+ /* ESP is not reloaded */
+ if (i != 3) {
+ gen_op_ld_T0_A0[OT_WORD + s->dflag + s->mem_index]();
+ gen_op_mov_reg_T0[OT_WORD + s->dflag][7 - i]();
+ }
+ gen_op_addl_A0_im(2 << s->dflag);
+ }
+ gen_op_mov_reg_T1[OT_WORD + s->dflag][R_ESP]();
+}
+
+/* NOTE: wrap around in 16 bit not fully handled */
+/* XXX: check this */
+static void gen_enter(DisasContext *s, int esp_addend, int level)
+{
+ int ot, level1, addend, opsize;
+
+ ot = s->dflag + OT_WORD;
+ level &= 0x1f;
+ level1 = level;
+ opsize = 2 << s->dflag;
+
+ gen_op_movl_A0_ESP();
+ gen_op_addl_A0_im(-opsize);
+ if (!s->ss32)
+ gen_op_andl_A0_ffff();
+ gen_op_movl_T1_A0();
+ if (s->addseg)
+ gen_op_addl_A0_seg(offsetof(CPUX86State,segs[R_SS].base));
+ /* push bp */
+ gen_op_mov_TN_reg[OT_LONG][0][R_EBP]();
+ gen_op_st_T0_A0[ot + s->mem_index]();
+ if (level) {
+ while (level--) {
+ gen_op_addl_A0_im(-opsize);
+ gen_op_addl_T0_im(-opsize);
+ gen_op_st_T0_A0[ot + s->mem_index]();
+ }
+ gen_op_addl_A0_im(-opsize);
+ /* XXX: add st_T1_A0 ? */
+ gen_op_movl_T0_T1();
+ gen_op_st_T0_A0[ot + s->mem_index]();
+ }
+ gen_op_mov_reg_T1[ot][R_EBP]();
+ addend = -esp_addend;
+ if (level1)
+ addend -= opsize * (level1 + 1);
+ gen_op_addl_T1_im(addend);
+ gen_op_mov_reg_T1[ot][R_ESP]();
+}
+
+static void gen_exception(DisasContext *s, int trapno, unsigned int cur_eip)
+{
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_op_jmp_im(cur_eip);
+ gen_op_raise_exception(trapno);
+ s->is_jmp = 3;
+}
+
+/* an interrupt is different from an exception because of the
+ priviledge checks */
+static void gen_interrupt(DisasContext *s, int intno,
+ unsigned int cur_eip, unsigned int next_eip)
+{
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_op_jmp_im(cur_eip);
+ gen_op_raise_interrupt(intno, next_eip);
+ s->is_jmp = 3;
+}
+
+static void gen_debug(DisasContext *s, unsigned int cur_eip)
+{
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_op_jmp_im(cur_eip);
+ gen_op_debug();
+ s->is_jmp = 3;
+}
+
+/* generate a generic end of block. Trace exception is also generated
+ if needed */
+static void gen_eob(DisasContext *s)
+{
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ if (s->tf) {
+ gen_op_raise_exception(EXCP01_SSTP);
+ } else {
+ gen_op_movl_T0_0();
+ gen_op_exit_tb();
+ }
+ s->is_jmp = 3;
+}
+
+/* generate a jump to eip. No segment change must happen before as a
+ direct call to the next block may occur */
+static void gen_jmp(DisasContext *s, unsigned int eip)
+{
+ TranslationBlock *tb = s->tb;
+
+ if (s->jmp_opt) {
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_op_jmp((long)tb, eip);
+ s->is_jmp = 3;
+ } else {
+ gen_op_jmp_im(eip);
+ gen_eob(s);
+ }
+}
+
+/* convert one instruction. s->is_jmp is set if the translation must
+ be stopped. Return the next pc value */
+static uint8_t *disas_insn(DisasContext *s, uint8_t *pc_start)
+{
+ int b, prefixes, aflag, dflag;
+ int shift, ot;
+ int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val;
+ unsigned int next_eip;
+
+ s->pc = pc_start;
+ prefixes = 0;
+ aflag = s->code32;
+ dflag = s->code32;
+ s->override = -1;
+ next_byte:
+ b = ldub(s->pc);
+ s->pc++;
+ /* check prefixes */
+ switch (b) {
+ case 0xf3:
+ prefixes |= PREFIX_REPZ;
+ goto next_byte;
+ case 0xf2:
+ prefixes |= PREFIX_REPNZ;
+ goto next_byte;
+ case 0xf0:
+ prefixes |= PREFIX_LOCK;
+ goto next_byte;
+ case 0x2e:
+ s->override = R_CS;
+ goto next_byte;
+ case 0x36:
+ s->override = R_SS;
+ goto next_byte;
+ case 0x3e:
+ s->override = R_DS;
+ goto next_byte;
+ case 0x26:
+ s->override = R_ES;
+ goto next_byte;
+ case 0x64:
+ s->override = R_FS;
+ goto next_byte;
+ case 0x65:
+ s->override = R_GS;
+ goto next_byte;
+ case 0x66:
+ prefixes |= PREFIX_DATA;
+ goto next_byte;
+ case 0x67:
+ prefixes |= PREFIX_ADR;
+ goto next_byte;
+ }
+
+ if (prefixes & PREFIX_DATA)
+ dflag ^= 1;
+ if (prefixes & PREFIX_ADR)
+ aflag ^= 1;
+
+ s->prefix = prefixes;
+ s->aflag = aflag;
+ s->dflag = dflag;
+
+ /* lock generation */
+ if (prefixes & PREFIX_LOCK)
+ gen_op_lock();
+
+ /* now check op code */
+ reswitch:
+ switch(b) {
+ case 0x0f:
+ /**************************/
+ /* extended op code */
+ b = ldub(s->pc++) | 0x100;
+ goto reswitch;
+
+ /**************************/
+ /* arith & logic */
+ case 0x00 ... 0x05:
+ case 0x08 ... 0x0d:
+ case 0x10 ... 0x15:
+ case 0x18 ... 0x1d:
+ case 0x20 ... 0x25:
+ case 0x28 ... 0x2d:
+ case 0x30 ... 0x35:
+ case 0x38 ... 0x3d:
+ {
+ int op, f, val;
+ op = (b >> 3) & 7;
+ f = (b >> 1) & 3;
+
+ if ((b & 1) == 0)
+ ot = OT_BYTE;
+ else
+ ot = dflag ? OT_LONG : OT_WORD;
+
+ switch(f) {
+ case 0: /* OP Ev, Gv */
+ modrm = ldub(s->pc++);
+ reg = ((modrm >> 3) & 7);
+ mod = (modrm >> 6) & 3;
+ rm = modrm & 7;
+ if (mod != 3) {
+ gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
+ opreg = OR_TMP0;
+ } else if (op == OP_XORL && rm == reg) {
+ xor_zero:
+ /* xor reg, reg optimisation */
+ gen_op_movl_T0_0();
+ s->cc_op = CC_OP_LOGICB + ot;
+ gen_op_mov_reg_T0[ot][reg]();
+ gen_op_update1_cc();
+ break;
+ } else {
+ opreg = rm;
+ }
+ gen_op_mov_TN_reg[ot][1][reg]();
+ gen_op(s, op, ot, opreg);
+ break;
+ case 1: /* OP Gv, Ev */
+ modrm = ldub(s->pc++);
+ mod = (modrm >> 6) & 3;
+ reg = ((modrm >> 3) & 7);
+ rm = modrm & 7;
+ if (mod != 3) {
+ gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
+ gen_op_ld_T1_A0[ot + s->mem_index]();
+ } else if (op == OP_XORL && rm == reg) {
+ goto xor_zero;
+ } else {
+ gen_op_mov_TN_reg[ot][1][rm]();
+ }
+ gen_op(s, op, ot, reg);
+ break;
+ case 2: /* OP A, Iv */
+ val = insn_get(s, ot);
+ gen_op_movl_T1_im(val);
+ gen_op(s, op, ot, OR_EAX);
+ break;
+ }
+ }
+ break;
+
+ case 0x80: /* GRP1 */
+ case 0x81:
+ case 0x83:
+ {
+ int val;
+
+ if ((b & 1) == 0)
+ ot = OT_BYTE;
+ else
+ ot = dflag ? OT_LONG : OT_WORD;
+
+ modrm = ldub(s->pc++);
+ mod = (modrm >> 6) & 3;
+ rm = modrm & 7;
+ op = (modrm >> 3) & 7;
+
+ if (mod != 3) {
+ gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
+ opreg = OR_TMP0;
+ } else {
+ opreg = rm + OR_EAX;
+ }
+
+ switch(b) {
+ default:
+ case 0x80:
+ case 0x81:
+ val = insn_get(s, ot);
+ break;
+ case 0x83:
+ val = (int8_t)insn_get(s, OT_BYTE);
+ break;
+ }
+ gen_op_movl_T1_im(val);
+ gen_op(s, op, ot, opreg);
+ }
+ break;
+
+ /**************************/
+ /* inc, dec, and other misc arith */
+ case 0x40 ... 0x47: /* inc Gv */
+ ot = dflag ? OT_LONG : OT_WORD;
+ gen_inc(s, ot, OR_EAX + (b & 7), 1);
+ break;
+ case 0x48 ... 0x4f: /* dec Gv */
+ ot = dflag ? OT_LONG : OT_WORD;
+ gen_inc(s, ot, OR_EAX + (b & 7), -1);
+ break;
+ case 0xf6: /* GRP3 */
+ case 0xf7:
+ if ((b & 1) == 0)
+ ot = OT_BYTE;
+ else
+ ot = dflag ? OT_LONG : OT_WORD;
+
+ modrm = ldub(s->pc++);
+ mod = (modrm >> 6) & 3;
+ rm = modrm & 7;
+ op = (modrm >> 3) & 7;
+ if (mod != 3) {
+ gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
+ gen_op_ld_T0_A0[ot + s->mem_index]();
+ } else {
+ gen_op_mov_TN_reg[ot][0][rm]();
+ }
+
+ switch(op) {
+ case 0: /* test */
+ val = insn_get(s, ot);
+ gen_op_movl_T1_im(val);
+ gen_op_testl_T0_T1_cc();
+ s->cc_op = CC_OP_LOGICB + ot;
+ break;
+ case 2: /* not */
+ gen_op_notl_T0();
+ if (mod != 3) {
+ gen_op_st_T0_A0[ot + s->mem_index]();
+ } else {
+ gen_op_mov_reg_T0[ot][rm]();
+ }
+ break;
+ case 3: /* neg */
+ gen_op_negl_T0();
+ if (mod != 3) {
+ gen_op_st_T0_A0[ot + s->mem_index]();
+ } else {
+ gen_op_mov_reg_T0[ot][rm]();
+ }
+ gen_op_update_neg_cc();
+ s->cc_op = CC_OP_SUBB + ot;
+ break;
+ case 4: /* mul */
+ switch(ot) {
+ case OT_BYTE:
+ gen_op_mulb_AL_T0();
+ break;
+ case OT_WORD:
+ gen_op_mulw_AX_T0();
+ break;
+ default:
+ case OT_LONG:
+ gen_op_mull_EAX_T0();
+ break;
+ }
+ s->cc_op = CC_OP_MUL;
+ break;
+ case 5: /* imul */
+ switch(ot) {
+ case OT_BYTE:
+ gen_op_imulb_AL_T0();
+ break;
+ case OT_WORD:
+ gen_op_imulw_AX_T0();
+ break;
+ default:
+ case OT_LONG:
+ gen_op_imull_EAX_T0();
+ break;
+ }
+ s->cc_op = CC_OP_MUL;
+ break;
+ case 6: /* div */
+ switch(ot) {
+ case OT_BYTE:
+ gen_op_divb_AL_T0(pc_start - s->cs_base);
+ break;
+ case OT_WORD:
+ gen_op_divw_AX_T0(pc_start - s->cs_base);
+ break;
+ default:
+ case OT_LONG:
+ gen_op_divl_EAX_T0(pc_start - s->cs_base);
+ break;
+ }
+ break;
+ case 7: /* idiv */
+ switch(ot) {
+ case OT_BYTE:
+ gen_op_idivb_AL_T0(pc_start - s->cs_base);
+ break;
+ case OT_WORD:
+ gen_op_idivw_AX_T0(pc_start - s->cs_base);
+ break;
+ default:
+ case OT_LONG:
+ gen_op_idivl_EAX_T0(pc_start - s->cs_base);
+ break;
+ }
+ break;
+ default:
+ goto illegal_op;
+ }
+ break;
+
+ case 0xfe: /* GRP4 */
+ case 0xff: /* GRP5 */
+ if ((b & 1) == 0)
+ ot = OT_BYTE;
+ else
+ ot = dflag ? OT_LONG : OT_WORD;
+
+ modrm = ldub(s->pc++);
+ mod = (modrm >> 6) & 3;
+ rm = modrm & 7;
+ op = (modrm >> 3) & 7;
+ if (op >= 2 && b == 0xfe) {
+ goto illegal_op;
+ }
+ if (mod != 3) {
+ gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
+ if (op >= 2 && op != 3 && op != 5)
+ gen_op_ld_T0_A0[ot + s->mem_index]();
+ } else {
+ gen_op_mov_TN_reg[ot][0][rm]();
+ }
+
+ switch(op) {
+ case 0: /* inc Ev */
+ if (mod != 3)
+ opreg = OR_TMP0;
+ else
+ opreg = rm;
+ gen_inc(s, ot, opreg, 1);
+ break;
+ case 1: /* dec Ev */
+ if (mod != 3)
+ opreg = OR_TMP0;
+ else
+ opreg = rm;
+ gen_inc(s, ot, opreg, -1);
+ break;
+ case 2: /* call Ev */
+ /* XXX: optimize if memory (no and is necessary) */
+ if (s->dflag == 0)
+ gen_op_andl_T0_ffff();
+ gen_op_jmp_T0();
+ next_eip = s->pc - s->cs_base;
+ gen_op_movl_T0_im(next_eip);
+ gen_push_T0(s);
+ gen_eob(s);
+ break;
+ case 3: /*< lcall Ev */
+ gen_op_ld_T1_A0[ot + s->mem_index]();
+ gen_op_addl_A0_im(1 << (ot - OT_WORD + 1));
+ gen_op_ld_T0_A0[OT_WORD + s->mem_index]();
+ do_lcall:
+ if (s->pe && !s->vm86) {
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_op_jmp_im(pc_start - s->cs_base);
+ gen_op_lcall_protected_T0_T1(dflag, s->pc - s->cs_base);
+ } else {
+ gen_op_lcall_real_T0_T1(dflag, s->pc - s->cs_base);
+ }
+ gen_eob(s);
+ break;
+ case 4: /* jmp Ev */
+ if (s->dflag == 0)
+ gen_op_andl_T0_ffff();
+ gen_op_jmp_T0();
+ gen_eob(s);
+ break;
+ case 5: /* ljmp Ev */
+ gen_op_ld_T1_A0[ot + s->mem_index]();
+ gen_op_addl_A0_im(1 << (ot - OT_WORD + 1));
+ gen_op_lduw_T0_A0();
+ do_ljmp:
+ if (s->pe && !s->vm86) {
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_op_jmp_im(pc_start - s->cs_base);
+ gen_op_ljmp_protected_T0_T1();
+ } else {
+ gen_op_movl_seg_T0_vm(offsetof(CPUX86State,segs[R_CS]));
+ gen_op_movl_T0_T1();
+ gen_op_jmp_T0();
+ }
+ gen_eob(s);
+ break;
+ case 6: /* push Ev */
+ gen_push_T0(s);
+ break;
+ default:
+ goto illegal_op;
+ }
+ break;
+
+ case 0x84: /* test Ev, Gv */
+ case 0x85:
+ if ((b & 1) == 0)
+ ot = OT_BYTE;
+ else
+ ot = dflag ? OT_LONG : OT_WORD;
+
+ modrm = ldub(s->pc++);
+ mod = (modrm >> 6) & 3;
+ rm = modrm & 7;
+ reg = (modrm >> 3) & 7;
+
+ gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
+ gen_op_mov_TN_reg[ot][1][reg + OR_EAX]();
+ gen_op_testl_T0_T1_cc();
+ s->cc_op = CC_OP_LOGICB + ot;
+ break;
+
+ case 0xa8: /* test eAX, Iv */
+ case 0xa9:
+ if ((b & 1) == 0)
+ ot = OT_BYTE;
+ else
+ ot = dflag ? OT_LONG : OT_WORD;
+ val = insn_get(s, ot);
+
+ gen_op_mov_TN_reg[ot][0][OR_EAX]();
+ gen_op_movl_T1_im(val);
+ gen_op_testl_T0_T1_cc();
+ s->cc_op = CC_OP_LOGICB + ot;
+ break;
+
+ case 0x98: /* CWDE/CBW */
+ if (dflag)
+ gen_op_movswl_EAX_AX();
+ else
+ gen_op_movsbw_AX_AL();
+ break;
+ case 0x99: /* CDQ/CWD */
+ if (dflag)
+ gen_op_movslq_EDX_EAX();
+ else
+ gen_op_movswl_DX_AX();
+ break;
+ case 0x1af: /* imul Gv, Ev */
+ case 0x69: /* imul Gv, Ev, I */
+ case 0x6b:
+ ot = dflag ? OT_LONG : OT_WORD;
+ modrm = ldub(s->pc++);
+ reg = ((modrm >> 3) & 7) + OR_EAX;
+ gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
+ if (b == 0x69) {
+ val = insn_get(s, ot);
+ gen_op_movl_T1_im(val);
+ } else if (b == 0x6b) {
+ val = insn_get(s, OT_BYTE);
+ gen_op_movl_T1_im(val);
+ } else {
+ gen_op_mov_TN_reg[ot][1][reg]();
+ }
+
+ if (ot == OT_LONG) {
+ gen_op_imull_T0_T1();
+ } else {
+ gen_op_imulw_T0_T1();
+ }
+ gen_op_mov_reg_T0[ot][reg]();
+ s->cc_op = CC_OP_MUL;
+ break;
+ case 0x1c0:
+ case 0x1c1: /* xadd Ev, Gv */
+ if ((b & 1) == 0)
+ ot = OT_BYTE;
+ else
+ ot = dflag ? OT_LONG : OT_WORD;
+ modrm = ldub(s->pc++);
+ reg = (modrm >> 3) & 7;
+ mod = (modrm >> 6) & 3;
+ if (mod == 3) {
+ rm = modrm & 7;
+ gen_op_mov_TN_reg[ot][0][reg]();
+ gen_op_mov_TN_reg[ot][1][rm]();
+ gen_op_addl_T0_T1();
+ gen_op_mov_reg_T0[ot][rm]();
+ gen_op_mov_reg_T1[ot][reg]();
+ } else {
+ gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
+ gen_op_mov_TN_reg[ot][0][reg]();
+ gen_op_ld_T1_A0[ot + s->mem_index]();
+ gen_op_addl_T0_T1();
+ gen_op_st_T0_A0[ot + s->mem_index]();
+ gen_op_mov_reg_T1[ot][reg]();
+ }
+ gen_op_update2_cc();
+ s->cc_op = CC_OP_ADDB + ot;
+ break;
+ case 0x1b0:
+ case 0x1b1: /* cmpxchg Ev, Gv */
+ if ((b & 1) == 0)
+ ot = OT_BYTE;
+ else
+ ot = dflag ? OT_LONG : OT_WORD;
+ modrm = ldub(s->pc++);
+ reg = (modrm >> 3) & 7;
+ mod = (modrm >> 6) & 3;
+ gen_op_mov_TN_reg[ot][1][reg]();
+ if (mod == 3) {
+ rm = modrm & 7;
+ gen_op_mov_TN_reg[ot][0][rm]();
+ gen_op_cmpxchg_T0_T1_EAX_cc[ot]();
+ gen_op_mov_reg_T0[ot][rm]();
+ } else {
+ gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
+ gen_op_ld_T0_A0[ot + s->mem_index]();
+ gen_op_cmpxchg_mem_T0_T1_EAX_cc[ot]();
+ }
+ s->cc_op = CC_OP_SUBB + ot;
+ break;
+ case 0x1c7: /* cmpxchg8b */
+ modrm = ldub(s->pc++);
+ mod = (modrm >> 6) & 3;
+ if (mod == 3)
+ goto illegal_op;
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
+ gen_op_cmpxchg8b();
+ s->cc_op = CC_OP_EFLAGS;
+ break;
+
+ /**************************/
+ /* push/pop */
+ case 0x50 ... 0x57: /* push */
+ gen_op_mov_TN_reg[OT_LONG][0][b & 7]();
+ gen_push_T0(s);
+ break;
+ case 0x58 ... 0x5f: /* pop */
+ ot = dflag ? OT_LONG : OT_WORD;
+ gen_pop_T0(s);
+ gen_op_mov_reg_T0[ot][b & 7]();
+ gen_pop_update(s);
+ break;
+ case 0x60: /* pusha */
+ gen_pusha(s);
+ break;
+ case 0x61: /* popa */
+ gen_popa(s);
+ break;
+ case 0x68: /* push Iv */
+ case 0x6a:
+ ot = dflag ? OT_LONG : OT_WORD;
+ if (b == 0x68)
+ val = insn_get(s, ot);
+ else
+ val = (int8_t)insn_get(s, OT_BYTE);
+ gen_op_movl_T0_im(val);
+ gen_push_T0(s);
+ break;
+ case 0x8f: /* pop Ev */
+ ot = dflag ? OT_LONG : OT_WORD;
+ modrm = ldub(s->pc++);
+ gen_pop_T0(s);
+ s->popl_esp_hack = 2 << dflag;
+ gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
+ s->popl_esp_hack = 0;
+ gen_pop_update(s);
+ break;
+ case 0xc8: /* enter */
+ {
+ int level;
+ val = lduw(s->pc);
+ s->pc += 2;
+ level = ldub(s->pc++);
+ gen_enter(s, val, level);
+ }
+ break;
+ case 0xc9: /* leave */
+ /* XXX: exception not precise (ESP is updated before potential exception) */
+ if (s->ss32) {
+ gen_op_mov_TN_reg[OT_LONG][0][R_EBP]();
+ gen_op_mov_reg_T0[OT_LONG][R_ESP]();
+ } else {
+ gen_op_mov_TN_reg[OT_WORD][0][R_EBP]();
+ gen_op_mov_reg_T0[OT_WORD][R_ESP]();
+ }
+ gen_pop_T0(s);
+ ot = dflag ? OT_LONG : OT_WORD;
+ gen_op_mov_reg_T0[ot][R_EBP]();
+ gen_pop_update(s);
+ break;
+ case 0x06: /* push es */
+ case 0x0e: /* push cs */
+ case 0x16: /* push ss */
+ case 0x1e: /* push ds */
+ gen_op_movl_T0_seg(b >> 3);
+ gen_push_T0(s);
+ break;
+ case 0x1a0: /* push fs */
+ case 0x1a8: /* push gs */
+ gen_op_movl_T0_seg((b >> 3) & 7);
+ gen_push_T0(s);
+ break;
+ case 0x07: /* pop es */
+ case 0x17: /* pop ss */
+ case 0x1f: /* pop ds */
+ reg = b >> 3;
+ gen_pop_T0(s);
+ gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
+ gen_pop_update(s);
+ if (reg == R_SS) {
+ /* if reg == SS, inhibit interrupts/trace */
+ gen_op_set_inhibit_irq();
+ s->tf = 0;
+ }
+ if (s->is_jmp) {
+ gen_op_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ }
+ break;
+ case 0x1a1: /* pop fs */
+ case 0x1a9: /* pop gs */
+ gen_pop_T0(s);
+ gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
+ gen_pop_update(s);
+ if (s->is_jmp) {
+ gen_op_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ }
+ break;
+
+ /**************************/
+ /* mov */
+ case 0x88:
+ case 0x89: /* mov Gv, Ev */
+ if ((b & 1) == 0)
+ ot = OT_BYTE;
+ else
+ ot = dflag ? OT_LONG : OT_WORD;
+ modrm = ldub(s->pc++);
+ reg = (modrm >> 3) & 7;
+
+ /* generate a generic store */
+ gen_ldst_modrm(s, modrm, ot, OR_EAX + reg, 1);
+ break;
+ case 0xc6:
+ case 0xc7: /* mov Ev, Iv */
+ if ((b & 1) == 0)
+ ot = OT_BYTE;
+ else
+ ot = dflag ? OT_LONG : OT_WORD;
+ modrm = ldub(s->pc++);
+ mod = (modrm >> 6) & 3;
+ if (mod != 3)
+ gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
+ val = insn_get(s, ot);
+ gen_op_movl_T0_im(val);
+ if (mod != 3)
+ gen_op_st_T0_A0[ot + s->mem_index]();
+ else
+ gen_op_mov_reg_T0[ot][modrm & 7]();
+ break;
+ case 0x8a:
+ case 0x8b: /* mov Ev, Gv */
+ if ((b & 1) == 0)
+ ot = OT_BYTE;
+ else
+ ot = dflag ? OT_LONG : OT_WORD;
+ modrm = ldub(s->pc++);
+ reg = (modrm >> 3) & 7;
+
+ gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
+ gen_op_mov_reg_T0[ot][reg]();
+ break;
+ case 0x8e: /* mov seg, Gv */
+ modrm = ldub(s->pc++);
+ reg = (modrm >> 3) & 7;
+ if (reg >= 6 || reg == R_CS)
+ goto illegal_op;
+ gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
+ gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
+ if (reg == R_SS) {
+ /* if reg == SS, inhibit interrupts/trace */
+ gen_op_set_inhibit_irq();
+ s->tf = 0;
+ }
+ if (s->is_jmp) {
+ gen_op_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ }
+ break;
+ case 0x8c: /* mov Gv, seg */
+ modrm = ldub(s->pc++);
+ reg = (modrm >> 3) & 7;
+ mod = (modrm >> 6) & 3;
+ if (reg >= 6)
+ goto illegal_op;
+ gen_op_movl_T0_seg(reg);
+ ot = OT_WORD;
+ if (mod == 3 && dflag)
+ ot = OT_LONG;
+ gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
+ break;
+
+ case 0x1b6: /* movzbS Gv, Eb */
+ case 0x1b7: /* movzwS Gv, Eb */
+ case 0x1be: /* movsbS Gv, Eb */
+ case 0x1bf: /* movswS Gv, Eb */
+ {
+ int d_ot;
+ /* d_ot is the size of destination */
+ d_ot = dflag + OT_WORD;
+ /* ot is the size of source */
+ ot = (b & 1) + OT_BYTE;
+ modrm = ldub(s->pc++);
+ reg = ((modrm >> 3) & 7) + OR_EAX;
+ mod = (modrm >> 6) & 3;
+ rm = modrm & 7;
+
+ if (mod == 3) {
+ gen_op_mov_TN_reg[ot][0][rm]();
+ switch(ot | (b & 8)) {
+ case OT_BYTE:
+ gen_op_movzbl_T0_T0();
+ break;
+ case OT_BYTE | 8:
+ gen_op_movsbl_T0_T0();
+ break;
+ case OT_WORD:
+ gen_op_movzwl_T0_T0();
+ break;
+ default:
+ case OT_WORD | 8:
+ gen_op_movswl_T0_T0();
+ break;
+ }
+ gen_op_mov_reg_T0[d_ot][reg]();
+ } else {
+ gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
+ if (b & 8) {
+ gen_op_lds_T0_A0[ot + s->mem_index]();
+ } else {
+ gen_op_ldu_T0_A0[ot + s->mem_index]();
+ }
+ gen_op_mov_reg_T0[d_ot][reg]();
+ }
+ }
+ break;
+
+ case 0x8d: /* lea */
+ ot = dflag ? OT_LONG : OT_WORD;
+ modrm = ldub(s->pc++);
+ reg = (modrm >> 3) & 7;
+ /* we must ensure that no segment is added */
+ s->override = -1;
+ val = s->addseg;
+ s->addseg = 0;
+ gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
+ s->addseg = val;
+ gen_op_mov_reg_A0[ot - OT_WORD][reg]();
+ break;
+
+ case 0xa0: /* mov EAX, Ov */
+ case 0xa1:
+ case 0xa2: /* mov Ov, EAX */
+ case 0xa3:
+ if ((b & 1) == 0)
+ ot = OT_BYTE;
+ else
+ ot = dflag ? OT_LONG : OT_WORD;
+ if (s->aflag)
+ offset_addr = insn_get(s, OT_LONG);
+ else
+ offset_addr = insn_get(s, OT_WORD);
+ gen_op_movl_A0_im(offset_addr);
+ /* handle override */
+ {
+ int override, must_add_seg;
+ must_add_seg = s->addseg;
+ if (s->override >= 0) {
+ override = s->override;
+ must_add_seg = 1;
+ } else {
+ override = R_DS;
+ }
+ if (must_add_seg) {
+ gen_op_addl_A0_seg(offsetof(CPUX86State,segs[override].base));
+ }
+ }
+ if ((b & 2) == 0) {
+ gen_op_ld_T0_A0[ot + s->mem_index]();
+ gen_op_mov_reg_T0[ot][R_EAX]();
+ } else {
+ gen_op_mov_TN_reg[ot][0][R_EAX]();
+ gen_op_st_T0_A0[ot + s->mem_index]();
+ }
+ break;
+ case 0xd7: /* xlat */
+ gen_op_movl_A0_reg[R_EBX]();
+ gen_op_addl_A0_AL();
+ if (s->aflag == 0)
+ gen_op_andl_A0_ffff();
+ /* handle override */
+ {
+ int override, must_add_seg;
+ must_add_seg = s->addseg;
+ override = R_DS;
+ if (s->override >= 0) {
+ override = s->override;
+ must_add_seg = 1;
+ } else {
+ override = R_DS;
+ }
+ if (must_add_seg) {
+ gen_op_addl_A0_seg(offsetof(CPUX86State,segs[override].base));
+ }
+ }
+ gen_op_ldu_T0_A0[OT_BYTE + s->mem_index]();
+ gen_op_mov_reg_T0[OT_BYTE][R_EAX]();
+ break;
+ case 0xb0 ... 0xb7: /* mov R, Ib */
+ val = insn_get(s, OT_BYTE);
+ gen_op_movl_T0_im(val);
+ gen_op_mov_reg_T0[OT_BYTE][b & 7]();
+ break;
+ case 0xb8 ... 0xbf: /* mov R, Iv */
+ ot = dflag ? OT_LONG : OT_WORD;
+ val = insn_get(s, ot);
+ reg = OR_EAX + (b & 7);
+ gen_op_movl_T0_im(val);
+ gen_op_mov_reg_T0[ot][reg]();
+ break;
+
+ case 0x91 ... 0x97: /* xchg R, EAX */
+ ot = dflag ? OT_LONG : OT_WORD;
+ reg = b & 7;
+ rm = R_EAX;
+ goto do_xchg_reg;
+ case 0x86:
+ case 0x87: /* xchg Ev, Gv */
+ if ((b & 1) == 0)
+ ot = OT_BYTE;
+ else
+ ot = dflag ? OT_LONG : OT_WORD;
+ modrm = ldub(s->pc++);
+ reg = (modrm >> 3) & 7;
+ mod = (modrm >> 6) & 3;
+ if (mod == 3) {
+ rm = modrm & 7;
+ do_xchg_reg:
+ gen_op_mov_TN_reg[ot][0][reg]();
+ gen_op_mov_TN_reg[ot][1][rm]();
+ gen_op_mov_reg_T0[ot][rm]();
+ gen_op_mov_reg_T1[ot][reg]();
+ } else {
+ gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
+ gen_op_mov_TN_reg[ot][0][reg]();
+ /* for xchg, lock is implicit */
+ if (!(prefixes & PREFIX_LOCK))
+ gen_op_lock();
+ gen_op_ld_T1_A0[ot + s->mem_index]();
+ gen_op_st_T0_A0[ot + s->mem_index]();
+ if (!(prefixes & PREFIX_LOCK))
+ gen_op_unlock();
+ gen_op_mov_reg_T1[ot][reg]();
+ }
+ break;
+ case 0xc4: /* les Gv */
+ op = R_ES;
+ goto do_lxx;
+ case 0xc5: /* lds Gv */
+ op = R_DS;
+ goto do_lxx;
+ case 0x1b2: /* lss Gv */
+ op = R_SS;
+ goto do_lxx;
+ case 0x1b4: /* lfs Gv */
+ op = R_FS;
+ goto do_lxx;
+ case 0x1b5: /* lgs Gv */
+ op = R_GS;
+ do_lxx:
+ ot = dflag ? OT_LONG : OT_WORD;
+ modrm = ldub(s->pc++);
+ reg = (modrm >> 3) & 7;
+ mod = (modrm >> 6) & 3;
+ if (mod == 3)
+ goto illegal_op;
+ gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
+ gen_op_ld_T1_A0[ot + s->mem_index]();
+ gen_op_addl_A0_im(1 << (ot - OT_WORD + 1));
+ /* load the segment first to handle exceptions properly */
+ gen_op_lduw_T0_A0();
+ gen_movl_seg_T0(s, op, pc_start - s->cs_base);
+ /* then put the data */
+ gen_op_mov_reg_T1[ot][reg]();
+ if (s->is_jmp) {
+ gen_op_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ }
+ break;
+
+ /************************/
+ /* shifts */
+ case 0xc0:
+ case 0xc1:
+ /* shift Ev,Ib */
+ shift = 2;
+ grp2:
+ {
+ if ((b & 1) == 0)
+ ot = OT_BYTE;
+ else
+ ot = dflag ? OT_LONG : OT_WORD;
+
+ modrm = ldub(s->pc++);
+ mod = (modrm >> 6) & 3;
+ rm = modrm & 7;
+ op = (modrm >> 3) & 7;
+
+ if (mod != 3) {
+ gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
+ opreg = OR_TMP0;
+ } else {
+ opreg = rm + OR_EAX;
+ }
+
+ /* simpler op */
+ if (shift == 0) {
+ gen_shift(s, op, ot, opreg, OR_ECX);
+ } else {
+ if (shift == 2) {
+ shift = ldub(s->pc++);
+ }
+ gen_shifti(s, op, ot, opreg, shift);
+ }
+ }
+ break;
+ case 0xd0:
+ case 0xd1:
+ /* shift Ev,1 */
+ shift = 1;
+ goto grp2;
+ case 0xd2:
+ case 0xd3:
+ /* shift Ev,cl */
+ shift = 0;
+ goto grp2;
+
+ case 0x1a4: /* shld imm */
+ op = 0;
+ shift = 1;
+ goto do_shiftd;
+ case 0x1a5: /* shld cl */
+ op = 0;
+ shift = 0;
+ goto do_shiftd;
+ case 0x1ac: /* shrd imm */
+ op = 1;
+ shift = 1;
+ goto do_shiftd;
+ case 0x1ad: /* shrd cl */
+ op = 1;
+ shift = 0;
+ do_shiftd:
+ ot = dflag ? OT_LONG : OT_WORD;
+ modrm = ldub(s->pc++);
+ mod = (modrm >> 6) & 3;
+ rm = modrm & 7;
+ reg = (modrm >> 3) & 7;
+
+ if (mod != 3) {
+ gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
+ gen_op_ld_T0_A0[ot + s->mem_index]();
+ } else {
+ gen_op_mov_TN_reg[ot][0][rm]();
+ }
+ gen_op_mov_TN_reg[ot][1][reg]();
+
+ if (shift) {
+ val = ldub(s->pc++);
+ val &= 0x1f;
+ if (val) {
+ if (mod == 3)
+ gen_op_shiftd_T0_T1_im_cc[ot - OT_WORD][op](val);
+ else
+ gen_op_shiftd_mem_T0_T1_im_cc[ot - OT_WORD][op](val);
+ if (op == 0 && ot != OT_WORD)
+ s->cc_op = CC_OP_SHLB + ot;
+ else
+ s->cc_op = CC_OP_SARB + ot;
+ }
+ } else {
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ if (mod == 3)
+ gen_op_shiftd_T0_T1_ECX_cc[ot - OT_WORD][op]();
+ else
+ gen_op_shiftd_mem_T0_T1_ECX_cc[ot - OT_WORD][op]();
+ s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
+ }
+ if (mod == 3) {
+ gen_op_mov_reg_T0[ot][rm]();
+ }
+ break;
+
+ /************************/
+ /* floats */
+ case 0xd8 ... 0xdf:
+ modrm = ldub(s->pc++);
+ mod = (modrm >> 6) & 3;
+ rm = modrm & 7;
+ op = ((b & 7) << 3) | ((modrm >> 3) & 7);
+
+ if (mod != 3) {
+ /* memory op */
+ gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
+ switch(op) {
+ case 0x00 ... 0x07: /* fxxxs */
+ case 0x10 ... 0x17: /* fixxxl */
+ case 0x20 ... 0x27: /* fxxxl */
+ case 0x30 ... 0x37: /* fixxx */
+ {
+ int op1;
+ op1 = op & 7;
+
+ switch(op >> 4) {
+ case 0:
+ gen_op_flds_FT0_A0();
+ break;
+ case 1:
+ gen_op_fildl_FT0_A0();
+ break;
+ case 2:
+ gen_op_fldl_FT0_A0();
+ break;
+ case 3:
+ default:
+ gen_op_fild_FT0_A0();
+ break;
+ }
+
+ gen_op_fp_arith_ST0_FT0[op1]();
+ if (op1 == 3) {
+ /* fcomp needs pop */
+ gen_op_fpop();
+ }
+ }
+ break;
+ case 0x08: /* flds */
+ case 0x0a: /* fsts */
+ case 0x0b: /* fstps */
+ case 0x18: /* fildl */
+ case 0x1a: /* fistl */
+ case 0x1b: /* fistpl */
+ case 0x28: /* fldl */
+ case 0x2a: /* fstl */
+ case 0x2b: /* fstpl */
+ case 0x38: /* filds */
+ case 0x3a: /* fists */
+ case 0x3b: /* fistps */
+
+ switch(op & 7) {
+ case 0:
+ switch(op >> 4) {
+ case 0:
+ gen_op_flds_ST0_A0();
+ break;
+ case 1:
+ gen_op_fildl_ST0_A0();
+ break;
+ case 2:
+ gen_op_fldl_ST0_A0();
+ break;
+ case 3:
+ default:
+ gen_op_fild_ST0_A0();
+ break;
+ }
+ break;
+ default:
+ switch(op >> 4) {
+ case 0:
+ gen_op_fsts_ST0_A0();
+ break;
+ case 1:
+ gen_op_fistl_ST0_A0();
+ break;
+ case 2:
+ gen_op_fstl_ST0_A0();
+ break;
+ case 3:
+ default:
+ gen_op_fist_ST0_A0();
+ break;
+ }
+ if ((op & 7) == 3)
+ gen_op_fpop();
+ break;
+ }
+ break;
+ case 0x0c: /* fldenv mem */
+ gen_op_fldenv_A0(s->dflag);
+ break;
+ case 0x0d: /* fldcw mem */
+ gen_op_fldcw_A0();
+ break;
+ case 0x0e: /* fnstenv mem */
+ gen_op_fnstenv_A0(s->dflag);
+ break;
+ case 0x0f: /* fnstcw mem */
+ gen_op_fnstcw_A0();
+ break;
+ case 0x1d: /* fldt mem */
+ gen_op_fldt_ST0_A0();
+ break;
+ case 0x1f: /* fstpt mem */
+ gen_op_fstt_ST0_A0();
+ gen_op_fpop();
+ break;
+ case 0x2c: /* frstor mem */
+ gen_op_frstor_A0(s->dflag);
+ break;
+ case 0x2e: /* fnsave mem */
+ gen_op_fnsave_A0(s->dflag);
+ break;
+ case 0x2f: /* fnstsw mem */
+ gen_op_fnstsw_A0();
+ break;
+ case 0x3c: /* fbld */
+ gen_op_fbld_ST0_A0();
+ break;
+ case 0x3e: /* fbstp */
+ gen_op_fbst_ST0_A0();
+ gen_op_fpop();
+ break;
+ case 0x3d: /* fildll */
+ gen_op_fildll_ST0_A0();
+ break;
+ case 0x3f: /* fistpll */
+ gen_op_fistll_ST0_A0();
+ gen_op_fpop();
+ break;
+ default:
+ goto illegal_op;
+ }
+ } else {
+ /* register float ops */
+ opreg = rm;
+
+ switch(op) {
+ case 0x08: /* fld sti */
+ gen_op_fpush();
+ gen_op_fmov_ST0_STN((opreg + 1) & 7);
+ break;
+ case 0x09: /* fxchg sti */
+ gen_op_fxchg_ST0_STN(opreg);
+ break;
+ case 0x0a: /* grp d9/2 */
+ switch(rm) {
+ case 0: /* fnop */
+ break;
+ default:
+ goto illegal_op;
+ }
+ break;
+ case 0x0c: /* grp d9/4 */
+ switch(rm) {
+ case 0: /* fchs */
+ gen_op_fchs_ST0();
+ break;
+ case 1: /* fabs */
+ gen_op_fabs_ST0();
+ break;
+ case 4: /* ftst */
+ gen_op_fldz_FT0();
+ gen_op_fcom_ST0_FT0();
+ break;
+ case 5: /* fxam */
+ gen_op_fxam_ST0();
+ break;
+ default:
+ goto illegal_op;
+ }
+ break;
+ case 0x0d: /* grp d9/5 */
+ {
+ switch(rm) {
+ case 0:
+ gen_op_fpush();
+ gen_op_fld1_ST0();
+ break;
+ case 1:
+ gen_op_fpush();
+ gen_op_fldl2t_ST0();
+ break;
+ case 2:
+ gen_op_fpush();
+ gen_op_fldl2e_ST0();
+ break;
+ case 3:
+ gen_op_fpush();
+ gen_op_fldpi_ST0();
+ break;
+ case 4:
+ gen_op_fpush();
+ gen_op_fldlg2_ST0();
+ break;
+ case 5:
+ gen_op_fpush();
+ gen_op_fldln2_ST0();
+ break;
+ case 6:
+ gen_op_fpush();
+ gen_op_fldz_ST0();
+ break;
+ default:
+ goto illegal_op;
+ }
+ }
+ break;
+ case 0x0e: /* grp d9/6 */
+ switch(rm) {
+ case 0: /* f2xm1 */
+ gen_op_f2xm1();
+ break;
+ case 1: /* fyl2x */
+ gen_op_fyl2x();
+ break;
+ case 2: /* fptan */
+ gen_op_fptan();
+ break;
+ case 3: /* fpatan */
+ gen_op_fpatan();
+ break;
+ case 4: /* fxtract */
+ gen_op_fxtract();
+ break;
+ case 5: /* fprem1 */
+ gen_op_fprem1();
+ break;
+ case 6: /* fdecstp */
+ gen_op_fdecstp();
+ break;
+ default:
+ case 7: /* fincstp */
+ gen_op_fincstp();
+ break;
+ }
+ break;
+ case 0x0f: /* grp d9/7 */
+ switch(rm) {
+ case 0: /* fprem */
+ gen_op_fprem();
+ break;
+ case 1: /* fyl2xp1 */
+ gen_op_fyl2xp1();
+ break;
+ case 2: /* fsqrt */
+ gen_op_fsqrt();
+ break;
+ case 3: /* fsincos */
+ gen_op_fsincos();
+ break;
+ case 5: /* fscale */
+ gen_op_fscale();
+ break;
+ case 4: /* frndint */
+ gen_op_frndint();
+ break;
+ case 6: /* fsin */
+ gen_op_fsin();
+ break;
+ default:
+ case 7: /* fcos */
+ gen_op_fcos();
+ break;
+ }
+ break;
+ case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
+ case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
+ case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
+ {
+ int op1;
+
+ op1 = op & 7;
+ if (op >= 0x20) {
+ gen_op_fp_arith_STN_ST0[op1](opreg);
+ if (op >= 0x30)
+ gen_op_fpop();
+ } else {
+ gen_op_fmov_FT0_STN(opreg);
+ gen_op_fp_arith_ST0_FT0[op1]();
+ }
+ }
+ break;
+ case 0x02: /* fcom */
+ gen_op_fmov_FT0_STN(opreg);
+ gen_op_fcom_ST0_FT0();
+ break;
+ case 0x03: /* fcomp */
+ gen_op_fmov_FT0_STN(opreg);
+ gen_op_fcom_ST0_FT0();
+ gen_op_fpop();
+ break;
+ case 0x15: /* da/5 */
+ switch(rm) {
+ case 1: /* fucompp */
+ gen_op_fmov_FT0_STN(1);
+ gen_op_fucom_ST0_FT0();
+ gen_op_fpop();
+ gen_op_fpop();
+ break;
+ default:
+ goto illegal_op;
+ }
+ break;
+ case 0x1c:
+ switch(rm) {
+ case 0: /* feni (287 only, just do nop here) */
+ break;
+ case 1: /* fdisi (287 only, just do nop here) */
+ break;
+ case 2: /* fclex */
+ gen_op_fclex();
+ break;
+ case 3: /* fninit */
+ gen_op_fninit();
+ break;
+ case 4: /* fsetpm (287 only, just do nop here) */
+ break;
+ default:
+ goto illegal_op;
+ }
+ break;
+ case 0x1d: /* fucomi */
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_op_fmov_FT0_STN(opreg);
+ gen_op_fucomi_ST0_FT0();
+ s->cc_op = CC_OP_EFLAGS;
+ break;
+ case 0x1e: /* fcomi */
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_op_fmov_FT0_STN(opreg);
+ gen_op_fcomi_ST0_FT0();
+ s->cc_op = CC_OP_EFLAGS;
+ break;
+ case 0x2a: /* fst sti */
+ gen_op_fmov_STN_ST0(opreg);
+ break;
+ case 0x2b: /* fstp sti */
+ gen_op_fmov_STN_ST0(opreg);
+ gen_op_fpop();
+ break;
+ case 0x2c: /* fucom st(i) */
+ gen_op_fmov_FT0_STN(opreg);
+ gen_op_fucom_ST0_FT0();
+ break;
+ case 0x2d: /* fucomp st(i) */
+ gen_op_fmov_FT0_STN(opreg);
+ gen_op_fucom_ST0_FT0();
+ gen_op_fpop();
+ break;
+ case 0x33: /* de/3 */
+ switch(rm) {
+ case 1: /* fcompp */
+ gen_op_fmov_FT0_STN(1);
+ gen_op_fcom_ST0_FT0();
+ gen_op_fpop();
+ gen_op_fpop();
+ break;
+ default:
+ goto illegal_op;
+ }
+ break;
+ case 0x3c: /* df/4 */
+ switch(rm) {
+ case 0:
+ gen_op_fnstsw_EAX();
+ break;
+ default:
+ goto illegal_op;
+ }
+ break;
+ case 0x3d: /* fucomip */
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_op_fmov_FT0_STN(opreg);
+ gen_op_fucomi_ST0_FT0();
+ gen_op_fpop();
+ s->cc_op = CC_OP_EFLAGS;
+ break;
+ case 0x3e: /* fcomip */
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_op_fmov_FT0_STN(opreg);
+ gen_op_fcomi_ST0_FT0();
+ gen_op_fpop();
+ s->cc_op = CC_OP_EFLAGS;
+ break;
+ default:
+ goto illegal_op;
+ }
+ }
+ break;
+ /************************/
+ /* string ops */
+
+ case 0xa4: /* movsS */
+ case 0xa5:
+ if ((b & 1) == 0)
+ ot = OT_BYTE;
+ else
+ ot = dflag ? OT_LONG : OT_WORD;
+
+ if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
+ gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
+ } else {
+ gen_movs(s, ot);
+ }
+ break;
+
+ case 0xaa: /* stosS */
+ case 0xab:
+ if ((b & 1) == 0)
+ ot = OT_BYTE;
+ else
+ ot = dflag ? OT_LONG : OT_WORD;
+
+ if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
+ gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
+ } else {
+ gen_stos(s, ot);
+ }
+ break;
+ case 0xac: /* lodsS */
+ case 0xad:
+ if ((b & 1) == 0)
+ ot = OT_BYTE;
+ else
+ ot = dflag ? OT_LONG : OT_WORD;
+ if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
+ gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
+ } else {
+ gen_lods(s, ot);
+ }
+ break;
+ case 0xae: /* scasS */
+ case 0xaf:
+ if ((b & 1) == 0)
+ ot = OT_BYTE;
+ else
+ ot = dflag ? OT_LONG : OT_WORD;
+ if (prefixes & PREFIX_REPNZ) {
+ gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
+ } else if (prefixes & PREFIX_REPZ) {
+ gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
+ } else {
+ gen_scas(s, ot);
+ s->cc_op = CC_OP_SUBB + ot;
+ }
+ break;
+
+ case 0xa6: /* cmpsS */
+ case 0xa7:
+ if ((b & 1) == 0)
+ ot = OT_BYTE;
+ else
+ ot = dflag ? OT_LONG : OT_WORD;
+ if (prefixes & PREFIX_REPNZ) {
+ gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
+ } else if (prefixes & PREFIX_REPZ) {
+ gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
+ } else {
+ gen_cmps(s, ot);
+ s->cc_op = CC_OP_SUBB + ot;
+ }
+ break;
+ case 0x6c: /* insS */
+ case 0x6d:
+ if (s->pe && (s->cpl > s->iopl || s->vm86)) {
+ /* NOTE: even for (E)CX = 0 the exception is raised */
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ if ((b & 1) == 0)
+ ot = OT_BYTE;
+ else
+ ot = dflag ? OT_LONG : OT_WORD;
+ if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
+ gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
+ } else {
+ gen_ins(s, ot);
+ }
+ }
+ break;
+ case 0x6e: /* outsS */
+ case 0x6f:
+ if (s->pe && (s->cpl > s->iopl || s->vm86)) {
+ /* NOTE: even for (E)CX = 0 the exception is raised */
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ if ((b & 1) == 0)
+ ot = OT_BYTE;
+ else
+ ot = dflag ? OT_LONG : OT_WORD;
+ if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
+ gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
+ } else {
+ gen_outs(s, ot);
+ }
+ }
+ break;
+
+ /************************/
+ /* port I/O */
+ case 0xe4:
+ case 0xe5:
+ if (s->pe && (s->cpl > s->iopl || s->vm86)) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ if ((b & 1) == 0)
+ ot = OT_BYTE;
+ else
+ ot = dflag ? OT_LONG : OT_WORD;
+ val = ldub(s->pc++);
+ gen_op_movl_T0_im(val);
+ gen_op_in[ot]();
+ gen_op_mov_reg_T1[ot][R_EAX]();
+ }
+ break;
+ case 0xe6:
+ case 0xe7:
+ if (s->pe && (s->cpl > s->iopl || s->vm86)) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ if ((b & 1) == 0)
+ ot = OT_BYTE;
+ else
+ ot = dflag ? OT_LONG : OT_WORD;
+ val = ldub(s->pc++);
+ gen_op_movl_T0_im(val);
+ gen_op_mov_TN_reg[ot][1][R_EAX]();
+ gen_op_out[ot]();
+ }
+ break;
+ case 0xec:
+ case 0xed:
+ if (s->pe && (s->cpl > s->iopl || s->vm86)) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ if ((b & 1) == 0)
+ ot = OT_BYTE;
+ else
+ ot = dflag ? OT_LONG : OT_WORD;
+ gen_op_mov_TN_reg[OT_WORD][0][R_EDX]();
+ gen_op_in[ot]();
+ gen_op_mov_reg_T1[ot][R_EAX]();
+ }
+ break;
+ case 0xee:
+ case 0xef:
+ if (s->pe && (s->cpl > s->iopl || s->vm86)) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ if ((b & 1) == 0)
+ ot = OT_BYTE;
+ else
+ ot = dflag ? OT_LONG : OT_WORD;
+ gen_op_mov_TN_reg[OT_WORD][0][R_EDX]();
+ gen_op_mov_TN_reg[ot][1][R_EAX]();
+ gen_op_out[ot]();
+ }
+ break;
+
+ /************************/
+ /* control */
+ case 0xc2: /* ret im */
+ val = ldsw(s->pc);
+ s->pc += 2;
+ gen_pop_T0(s);
+ gen_stack_update(s, val + (2 << s->dflag));
+ if (s->dflag == 0)
+ gen_op_andl_T0_ffff();
+ gen_op_jmp_T0();
+ gen_eob(s);
+ break;
+ case 0xc3: /* ret */
+ gen_pop_T0(s);
+ gen_pop_update(s);
+ if (s->dflag == 0)
+ gen_op_andl_T0_ffff();
+ gen_op_jmp_T0();
+ gen_eob(s);
+ break;
+ case 0xca: /* lret im */
+ val = ldsw(s->pc);
+ s->pc += 2;
+ do_lret:
+ if (s->pe && !s->vm86) {
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_op_jmp_im(pc_start - s->cs_base);
+ gen_op_lret_protected(s->dflag, val);
+ } else {
+ gen_stack_A0(s);
+ /* pop offset */
+ gen_op_ld_T0_A0[1 + s->dflag + s->mem_index]();
+ if (s->dflag == 0)
+ gen_op_andl_T0_ffff();
+ /* NOTE: keeping EIP updated is not a problem in case of
+ exception */
+ gen_op_jmp_T0();
+ /* pop selector */
+ gen_op_addl_A0_im(2 << s->dflag);
+ gen_op_ld_T0_A0[1 + s->dflag + s->mem_index]();
+ gen_op_movl_seg_T0_vm(offsetof(CPUX86State,segs[R_CS]));
+ /* add stack offset */
+ gen_stack_update(s, val + (4 << s->dflag));
+ }
+ gen_eob(s);
+ break;
+ case 0xcb: /* lret */
+ val = 0;
+ goto do_lret;
+ case 0xcf: /* iret */
+ if (!s->pe) {
+ /* real mode */
+ gen_op_iret_real(s->dflag);
+ s->cc_op = CC_OP_EFLAGS;
+ } else if (s->vm86 && s->iopl != 3) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_op_jmp_im(pc_start - s->cs_base);
+ gen_op_iret_protected(s->dflag);
+ s->cc_op = CC_OP_EFLAGS;
+ }
+ gen_eob(s);
+ break;
+ case 0xe8: /* call im */
+ {
+ unsigned int next_eip;
+ ot = dflag ? OT_LONG : OT_WORD;
+ val = insn_get(s, ot);
+ next_eip = s->pc - s->cs_base;
+ val += next_eip;
+ if (s->dflag == 0)
+ val &= 0xffff;
+ gen_op_movl_T0_im(next_eip);
+ gen_push_T0(s);
+ gen_jmp(s, val);
+ }
+ break;
+ case 0x9a: /* lcall im */
+ {
+ unsigned int selector, offset;
+
+ ot = dflag ? OT_LONG : OT_WORD;
+ offset = insn_get(s, ot);
+ selector = insn_get(s, OT_WORD);
+
+ gen_op_movl_T0_im(selector);
+ gen_op_movl_T1_im(offset);
+ }
+ goto do_lcall;
+ case 0xe9: /* jmp */
+ ot = dflag ? OT_LONG : OT_WORD;
+ val = insn_get(s, ot);
+ val += s->pc - s->cs_base;
+ if (s->dflag == 0)
+ val = val & 0xffff;
+ gen_jmp(s, val);
+ break;
+ case 0xea: /* ljmp im */
+ {
+ unsigned int selector, offset;
+
+ ot = dflag ? OT_LONG : OT_WORD;
+ offset = insn_get(s, ot);
+ selector = insn_get(s, OT_WORD);
+
+ gen_op_movl_T0_im(selector);
+ gen_op_movl_T1_im(offset);
+ }
+ goto do_ljmp;
+ case 0xeb: /* jmp Jb */
+ val = (int8_t)insn_get(s, OT_BYTE);
+ val += s->pc - s->cs_base;
+ if (s->dflag == 0)
+ val = val & 0xffff;
+ gen_jmp(s, val);
+ break;
+ case 0x70 ... 0x7f: /* jcc Jb */
+ val = (int8_t)insn_get(s, OT_BYTE);
+ goto do_jcc;
+ case 0x180 ... 0x18f: /* jcc Jv */
+ if (dflag) {
+ val = insn_get(s, OT_LONG);
+ } else {
+ val = (int16_t)insn_get(s, OT_WORD);
+ }
+ do_jcc:
+ next_eip = s->pc - s->cs_base;
+ val += next_eip;
+ if (s->dflag == 0)
+ val &= 0xffff;
+ gen_jcc(s, b, val, next_eip);
+ break;
+
+ case 0x190 ... 0x19f: /* setcc Gv */
+ modrm = ldub(s->pc++);
+ gen_setcc(s, b);
+ gen_ldst_modrm(s, modrm, OT_BYTE, OR_TMP0, 1);
+ break;
+ case 0x140 ... 0x14f: /* cmov Gv, Ev */
+ ot = dflag ? OT_LONG : OT_WORD;
+ modrm = ldub(s->pc++);
+ reg = (modrm >> 3) & 7;
+ mod = (modrm >> 6) & 3;
+ gen_setcc(s, b);
+ if (mod != 3) {
+ gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
+ gen_op_ld_T1_A0[ot + s->mem_index]();
+ } else {
+ rm = modrm & 7;
+ gen_op_mov_TN_reg[ot][1][rm]();
+ }
+ gen_op_cmov_reg_T1_T0[ot - OT_WORD][reg]();
+ break;
+
+ /************************/
+ /* flags */
+ case 0x9c: /* pushf */
+ if (s->vm86 && s->iopl != 3) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_op_movl_T0_eflags();
+ gen_push_T0(s);
+ }
+ break;
+ case 0x9d: /* popf */
+ if (s->vm86 && s->iopl != 3) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ gen_pop_T0(s);
+ if (s->cpl == 0) {
+ if (s->dflag) {
+ gen_op_movl_eflags_T0_cpl0();
+ } else {
+ gen_op_movw_eflags_T0_cpl0();
+ }
+ } else {
+ if (s->dflag) {
+ gen_op_movl_eflags_T0();
+ } else {
+ gen_op_movw_eflags_T0();
+ }
+ }
+ gen_pop_update(s);
+ s->cc_op = CC_OP_EFLAGS;
+ /* abort translation because TF flag may change */
+ gen_op_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ }
+ break;
+ case 0x9e: /* sahf */
+ gen_op_mov_TN_reg[OT_BYTE][0][R_AH]();
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_op_movb_eflags_T0();
+ s->cc_op = CC_OP_EFLAGS;
+ break;
+ case 0x9f: /* lahf */
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_op_movl_T0_eflags();
+ gen_op_mov_reg_T0[OT_BYTE][R_AH]();
+ break;
+ case 0xf5: /* cmc */
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_op_cmc();
+ s->cc_op = CC_OP_EFLAGS;
+ break;
+ case 0xf8: /* clc */
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_op_clc();
+ s->cc_op = CC_OP_EFLAGS;
+ break;
+ case 0xf9: /* stc */
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_op_stc();
+ s->cc_op = CC_OP_EFLAGS;
+ break;
+ case 0xfc: /* cld */
+ gen_op_cld();
+ break;
+ case 0xfd: /* std */
+ gen_op_std();
+ break;
+
+ /************************/
+ /* bit operations */
+ case 0x1ba: /* bt/bts/btr/btc Gv, im */
+ ot = dflag ? OT_LONG : OT_WORD;
+ modrm = ldub(s->pc++);
+ op = (modrm >> 3) & 7;
+ mod = (modrm >> 6) & 3;
+ rm = modrm & 7;
+ if (mod != 3) {
+ gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
+ gen_op_ld_T0_A0[ot + s->mem_index]();
+ } else {
+ gen_op_mov_TN_reg[ot][0][rm]();
+ }
+ /* load shift */
+ val = ldub(s->pc++);
+ gen_op_movl_T1_im(val);
+ if (op < 4)
+ goto illegal_op;
+ op -= 4;
+ gen_op_btx_T0_T1_cc[ot - OT_WORD][op]();
+ s->cc_op = CC_OP_SARB + ot;
+ if (op != 0) {
+ if (mod != 3)
+ gen_op_st_T0_A0[ot + s->mem_index]();
+ else
+ gen_op_mov_reg_T0[ot][rm]();
+ gen_op_update_bt_cc();
+ }
+ break;
+ case 0x1a3: /* bt Gv, Ev */
+ op = 0;
+ goto do_btx;
+ case 0x1ab: /* bts */
+ op = 1;
+ goto do_btx;
+ case 0x1b3: /* btr */
+ op = 2;
+ goto do_btx;
+ case 0x1bb: /* btc */
+ op = 3;
+ do_btx:
+ ot = dflag ? OT_LONG : OT_WORD;
+ modrm = ldub(s->pc++);
+ reg = (modrm >> 3) & 7;
+ mod = (modrm >> 6) & 3;
+ rm = modrm & 7;
+ gen_op_mov_TN_reg[OT_LONG][1][reg]();
+ if (mod != 3) {
+ gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
+ /* specific case: we need to add a displacement */
+ if (ot == OT_WORD)
+ gen_op_add_bitw_A0_T1();
+ else
+ gen_op_add_bitl_A0_T1();
+ gen_op_ld_T0_A0[ot + s->mem_index]();
+ } else {
+ gen_op_mov_TN_reg[ot][0][rm]();
+ }
+ gen_op_btx_T0_T1_cc[ot - OT_WORD][op]();
+ s->cc_op = CC_OP_SARB + ot;
+ if (op != 0) {
+ if (mod != 3)
+ gen_op_st_T0_A0[ot + s->mem_index]();
+ else
+ gen_op_mov_reg_T0[ot][rm]();
+ gen_op_update_bt_cc();
+ }
+ break;
+ case 0x1bc: /* bsf */
+ case 0x1bd: /* bsr */
+ ot = dflag ? OT_LONG : OT_WORD;
+ modrm = ldub(s->pc++);
+ reg = (modrm >> 3) & 7;
+ gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
+ gen_op_bsx_T0_cc[ot - OT_WORD][b & 1]();
+ /* NOTE: we always write back the result. Intel doc says it is
+ undefined if T0 == 0 */
+ gen_op_mov_reg_T0[ot][reg]();
+ s->cc_op = CC_OP_LOGICB + ot;
+ break;
+ /************************/
+ /* bcd */
+ case 0x27: /* daa */
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_op_daa();
+ s->cc_op = CC_OP_EFLAGS;
+ break;
+ case 0x2f: /* das */
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_op_das();
+ s->cc_op = CC_OP_EFLAGS;
+ break;
+ case 0x37: /* aaa */
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_op_aaa();
+ s->cc_op = CC_OP_EFLAGS;
+ break;
+ case 0x3f: /* aas */
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_op_aas();
+ s->cc_op = CC_OP_EFLAGS;
+ break;
+ case 0xd4: /* aam */
+ val = ldub(s->pc++);
+ gen_op_aam(val);
+ s->cc_op = CC_OP_LOGICB;
+ break;
+ case 0xd5: /* aad */
+ val = ldub(s->pc++);
+ gen_op_aad(val);
+ s->cc_op = CC_OP_LOGICB;
+ break;
+ /************************/
+ /* misc */
+ case 0x90: /* nop */
+ break;
+ case 0x9b: /* fwait */
+ break;
+ case 0xcc: /* int3 */
+ gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
+ break;
+ case 0xcd: /* int N */
+ val = ldub(s->pc++);
+ /* XXX: add error code for vm86 GPF */
+ if (!s->vm86)
+ gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
+ else
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
+ case 0xce: /* into */
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_op_into(s->pc - s->cs_base);
+ break;
+ case 0xf1: /* icebp (undocumented, exits to external debugger) */
+ gen_debug(s, pc_start - s->cs_base);
+ break;
+ case 0xfa: /* cli */
+ if (!s->vm86) {
+ if (s->cpl <= s->iopl) {
+ gen_op_cli();
+ } else {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ }
+ } else {
+ if (s->iopl == 3) {
+ gen_op_cli();
+ } else {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ }
+ }
+ break;
+ case 0xfb: /* sti */
+ if (!s->vm86) {
+ if (s->cpl <= s->iopl) {
+ gen_sti:
+ gen_op_sti();
+ /* interruptions are enabled only the first insn after sti */
+ gen_op_set_inhibit_irq();
+ /* give a chance to handle pending irqs */
+ gen_op_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ } else {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ }
+ } else {
+ if (s->iopl == 3) {
+ goto gen_sti;
+ } else {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ }
+ }
+ break;
+ case 0x62: /* bound */
+ ot = dflag ? OT_LONG : OT_WORD;
+ modrm = ldub(s->pc++);
+ reg = (modrm >> 3) & 7;
+ mod = (modrm >> 6) & 3;
+ if (mod == 3)
+ goto illegal_op;
+ gen_op_mov_reg_T0[ot][reg]();
+ gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
+ if (ot == OT_WORD)
+ gen_op_boundw(pc_start - s->cs_base);
+ else
+ gen_op_boundl(pc_start - s->cs_base);
+ break;
+ case 0x1c8 ... 0x1cf: /* bswap reg */
+ reg = b & 7;
+ gen_op_mov_TN_reg[OT_LONG][0][reg]();
+ gen_op_bswapl_T0();
+ gen_op_mov_reg_T0[OT_LONG][reg]();
+ break;
+ case 0xd6: /* salc */
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_op_salc();
+ break;
+ case 0xe0: /* loopnz */
+ case 0xe1: /* loopz */
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ /* FALL THRU */
+ case 0xe2: /* loop */
+ case 0xe3: /* jecxz */
+ val = (int8_t)insn_get(s, OT_BYTE);
+ next_eip = s->pc - s->cs_base;
+ val += next_eip;
+ if (s->dflag == 0)
+ val &= 0xffff;
+ gen_op_loop[s->aflag][b & 3](val, next_eip);
+ gen_eob(s);
+ break;
+ case 0x130: /* wrmsr */
+ case 0x132: /* rdmsr */
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ if (b & 2)
+ gen_op_rdmsr();
+ else
+ gen_op_wrmsr();
+ }
+ break;
+ case 0x131: /* rdtsc */
+ gen_op_rdtsc();
+ break;
+ case 0x1a2: /* cpuid */
+ gen_op_cpuid();
+ break;
+ case 0xf4: /* hlt */
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ gen_op_jmp_im(s->pc - s->cs_base);
+ gen_op_hlt();
+ s->is_jmp = 3;
+ }
+ break;
+ case 0x100:
+ modrm = ldub(s->pc++);
+ mod = (modrm >> 6) & 3;
+ op = (modrm >> 3) & 7;
+ switch(op) {
+ case 0: /* sldt */
+ gen_op_movl_T0_env(offsetof(CPUX86State,ldt.selector));
+ ot = OT_WORD;
+ if (mod == 3)
+ ot += s->dflag;
+ gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
+ break;
+ case 2: /* lldt */
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
+ gen_op_jmp_im(pc_start - s->cs_base);
+ gen_op_lldt_T0();
+ }
+ break;
+ case 1: /* str */
+ gen_op_movl_T0_env(offsetof(CPUX86State,tr.selector));
+ ot = OT_WORD;
+ if (mod == 3)
+ ot += s->dflag;
+ gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
+ break;
+ case 3: /* ltr */
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
+ gen_op_jmp_im(pc_start - s->cs_base);
+ gen_op_ltr_T0();
+ }
+ break;
+ case 4: /* verr */
+ case 5: /* verw */
+ default:
+ goto illegal_op;
+ }
+ break;
+ case 0x101:
+ modrm = ldub(s->pc++);
+ mod = (modrm >> 6) & 3;
+ op = (modrm >> 3) & 7;
+ switch(op) {
+ case 0: /* sgdt */
+ case 1: /* sidt */
+ if (mod == 3)
+ goto illegal_op;
+ gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
+ if (op == 0)
+ gen_op_movl_T0_env(offsetof(CPUX86State,gdt.limit));
+ else
+ gen_op_movl_T0_env(offsetof(CPUX86State,idt.limit));
+ gen_op_st_T0_A0[OT_WORD + s->mem_index]();
+ gen_op_addl_A0_im(2);
+ if (op == 0)
+ gen_op_movl_T0_env(offsetof(CPUX86State,gdt.base));
+ else
+ gen_op_movl_T0_env(offsetof(CPUX86State,idt.base));
+ if (!s->dflag)
+ gen_op_andl_T0_im(0xffffff);
+ gen_op_st_T0_A0[OT_LONG + s->mem_index]();
+ break;
+ case 2: /* lgdt */
+ case 3: /* lidt */
+ if (mod == 3)
+ goto illegal_op;
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
+ gen_op_ld_T1_A0[OT_WORD + s->mem_index]();
+ gen_op_addl_A0_im(2);
+ gen_op_ld_T0_A0[OT_LONG + s->mem_index]();
+ if (!s->dflag)
+ gen_op_andl_T0_im(0xffffff);
+ if (op == 2) {
+ gen_op_movl_env_T0(offsetof(CPUX86State,gdt.base));
+ gen_op_movl_env_T1(offsetof(CPUX86State,gdt.limit));
+ } else {
+ gen_op_movl_env_T0(offsetof(CPUX86State,idt.base));
+ gen_op_movl_env_T1(offsetof(CPUX86State,idt.limit));
+ }
+ }
+ break;
+ case 4: /* smsw */
+ gen_op_movl_T0_env(offsetof(CPUX86State,cr[0]));
+ gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 1);
+ break;
+ case 6: /* lmsw */
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
+ gen_op_lmsw_T0();
+ }
+ break;
+ case 7: /* invlpg */
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ if (mod == 3)
+ goto illegal_op;
+ gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
+ gen_op_invlpg_A0();
+ }
+ break;
+ default:
+ goto illegal_op;
+ }
+ break;
+ case 0x102: /* lar */
+ case 0x103: /* lsl */
+ if (!s->pe || s->vm86)
+ goto illegal_op;
+ ot = dflag ? OT_LONG : OT_WORD;
+ modrm = ldub(s->pc++);
+ reg = (modrm >> 3) & 7;
+ gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
+ gen_op_mov_TN_reg[ot][1][reg]();
+ if (s->cc_op != CC_OP_DYNAMIC)
+ gen_op_set_cc_op(s->cc_op);
+ if (b == 0x102)
+ gen_op_lar();
+ else
+ gen_op_lsl();
+ s->cc_op = CC_OP_EFLAGS;
+ gen_op_mov_reg_T1[ot][reg]();
+ break;
+ case 0x118:
+ modrm = ldub(s->pc++);
+ mod = (modrm >> 6) & 3;
+ op = (modrm >> 3) & 7;
+ switch(op) {
+ case 0: /* prefetchnta */
+ case 1: /* prefetchnt0 */
+ case 2: /* prefetchnt0 */
+ case 3: /* prefetchnt0 */
+ if (mod == 3)
+ goto illegal_op;
+ gen_lea_modrm(s, modrm, ®_addr, &offset_addr);
+ /* nothing more to do */
+ break;
+ default:
+ goto illegal_op;
+ }
+ break;
+ case 0x120: /* mov reg, crN */
+ case 0x122: /* mov crN, reg */
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ modrm = ldub(s->pc++);
+ if ((modrm & 0xc0) != 0xc0)
+ goto illegal_op;
+ rm = modrm & 7;
+ reg = (modrm >> 3) & 7;
+ switch(reg) {
+ case 0:
+ case 2:
+ case 3:
+ case 4:
+ if (b & 2) {
+ gen_op_mov_TN_reg[OT_LONG][0][rm]();
+ gen_op_movl_crN_T0(reg);
+ gen_op_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ } else {
+ gen_op_movl_T0_env(offsetof(CPUX86State,cr[reg]));
+ gen_op_mov_reg_T0[OT_LONG][rm]();
+ }
+ break;
+ default:
+ goto illegal_op;
+ }
+ }
+ break;
+ case 0x121: /* mov reg, drN */
+ case 0x123: /* mov drN, reg */
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ modrm = ldub(s->pc++);
+ if ((modrm & 0xc0) != 0xc0)
+ goto illegal_op;
+ rm = modrm & 7;
+ reg = (modrm >> 3) & 7;
+ /* XXX: do it dynamically with CR4.DE bit */
+ if (reg == 4 || reg == 5)
+ goto illegal_op;
+ if (b & 2) {
+ gen_op_mov_TN_reg[OT_LONG][0][rm]();
+ gen_op_movl_drN_T0(reg);
+ gen_op_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ } else {
+ gen_op_movl_T0_env(offsetof(CPUX86State,dr[reg]));
+ gen_op_mov_reg_T0[OT_LONG][rm]();
+ }
+ }
+ break;
+ case 0x106: /* clts */
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ gen_op_clts();
+ }
+ break;
+ default:
+ goto illegal_op;
+ }
+ /* lock generation */
+ if (s->prefix & PREFIX_LOCK)
+ gen_op_unlock();
+ return s->pc;
+ illegal_op:
+ /* XXX: ensure that no lock was generated */
+ gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
+ return s->pc;
+}
+
+#define CC_OSZAPC (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C)
+#define CC_OSZAP (CC_O | CC_S | CC_Z | CC_A | CC_P)
+
+/* flags read by an operation */
+static uint16_t opc_read_flags[NB_OPS] = {
+ [INDEX_op_aas] = CC_A,
+ [INDEX_op_aaa] = CC_A,
+ [INDEX_op_das] = CC_A | CC_C,
+ [INDEX_op_daa] = CC_A | CC_C,
+
+ [INDEX_op_adcb_T0_T1_cc] = CC_C,
+ [INDEX_op_adcw_T0_T1_cc] = CC_C,
+ [INDEX_op_adcl_T0_T1_cc] = CC_C,
+ [INDEX_op_sbbb_T0_T1_cc] = CC_C,
+ [INDEX_op_sbbw_T0_T1_cc] = CC_C,
+ [INDEX_op_sbbl_T0_T1_cc] = CC_C,
+
+ [INDEX_op_adcb_mem_T0_T1_cc] = CC_C,
+ [INDEX_op_adcw_mem_T0_T1_cc] = CC_C,
+ [INDEX_op_adcl_mem_T0_T1_cc] = CC_C,
+ [INDEX_op_sbbb_mem_T0_T1_cc] = CC_C,
+ [INDEX_op_sbbw_mem_T0_T1_cc] = CC_C,
+ [INDEX_op_sbbl_mem_T0_T1_cc] = CC_C,
+
+ /* subtle: due to the incl/decl implementation, C is used */
+ [INDEX_op_update_inc_cc] = CC_C,
+
+ [INDEX_op_into] = CC_O,
+
+ [INDEX_op_jb_subb] = CC_C,
+ [INDEX_op_jb_subw] = CC_C,
+ [INDEX_op_jb_subl] = CC_C,
+
+ [INDEX_op_jz_subb] = CC_Z,
+ [INDEX_op_jz_subw] = CC_Z,
+ [INDEX_op_jz_subl] = CC_Z,
+
+ [INDEX_op_jbe_subb] = CC_Z | CC_C,
+ [INDEX_op_jbe_subw] = CC_Z | CC_C,
+ [INDEX_op_jbe_subl] = CC_Z | CC_C,
+
+ [INDEX_op_js_subb] = CC_S,
+ [INDEX_op_js_subw] = CC_S,
+ [INDEX_op_js_subl] = CC_S,
+
+ [INDEX_op_jl_subb] = CC_O | CC_S,
+ [INDEX_op_jl_subw] = CC_O | CC_S,
+ [INDEX_op_jl_subl] = CC_O | CC_S,
+
+ [INDEX_op_jle_subb] = CC_O | CC_S | CC_Z,
+ [INDEX_op_jle_subw] = CC_O | CC_S | CC_Z,
+ [INDEX_op_jle_subl] = CC_O | CC_S | CC_Z,
+
+ [INDEX_op_loopnzw] = CC_Z,
+ [INDEX_op_loopnzl] = CC_Z,
+ [INDEX_op_loopzw] = CC_Z,
+ [INDEX_op_loopzl] = CC_Z,
+
+ [INDEX_op_seto_T0_cc] = CC_O,
+ [INDEX_op_setb_T0_cc] = CC_C,
+ [INDEX_op_setz_T0_cc] = CC_Z,
+ [INDEX_op_setbe_T0_cc] = CC_Z | CC_C,
+ [INDEX_op_sets_T0_cc] = CC_S,
+ [INDEX_op_setp_T0_cc] = CC_P,
+ [INDEX_op_setl_T0_cc] = CC_O | CC_S,
+ [INDEX_op_setle_T0_cc] = CC_O | CC_S | CC_Z,
+
+ [INDEX_op_setb_T0_subb] = CC_C,
+ [INDEX_op_setb_T0_subw] = CC_C,
+ [INDEX_op_setb_T0_subl] = CC_C,
+
+ [INDEX_op_setz_T0_subb] = CC_Z,
+ [INDEX_op_setz_T0_subw] = CC_Z,
+ [INDEX_op_setz_T0_subl] = CC_Z,
+
+ [INDEX_op_setbe_T0_subb] = CC_Z | CC_C,
+ [INDEX_op_setbe_T0_subw] = CC_Z | CC_C,
+ [INDEX_op_setbe_T0_subl] = CC_Z | CC_C,
+
+ [INDEX_op_sets_T0_subb] = CC_S,
+ [INDEX_op_sets_T0_subw] = CC_S,
+ [INDEX_op_sets_T0_subl] = CC_S,
+
+ [INDEX_op_setl_T0_subb] = CC_O | CC_S,
+ [INDEX_op_setl_T0_subw] = CC_O | CC_S,
+ [INDEX_op_setl_T0_subl] = CC_O | CC_S,
+
+ [INDEX_op_setle_T0_subb] = CC_O | CC_S | CC_Z,
+ [INDEX_op_setle_T0_subw] = CC_O | CC_S | CC_Z,
+ [INDEX_op_setle_T0_subl] = CC_O | CC_S | CC_Z,
+
+ [INDEX_op_movl_T0_eflags] = CC_OSZAPC,
+ [INDEX_op_cmc] = CC_C,
+ [INDEX_op_salc] = CC_C,
+
+ [INDEX_op_rclb_T0_T1_cc] = CC_C,
+ [INDEX_op_rclw_T0_T1_cc] = CC_C,
+ [INDEX_op_rcll_T0_T1_cc] = CC_C,
+ [INDEX_op_rcrb_T0_T1_cc] = CC_C,
+ [INDEX_op_rcrw_T0_T1_cc] = CC_C,
+ [INDEX_op_rcrl_T0_T1_cc] = CC_C,
+
+ [INDEX_op_rclb_mem_T0_T1_cc] = CC_C,
+ [INDEX_op_rclw_mem_T0_T1_cc] = CC_C,
+ [INDEX_op_rcll_mem_T0_T1_cc] = CC_C,
+ [INDEX_op_rcrb_mem_T0_T1_cc] = CC_C,
+ [INDEX_op_rcrw_mem_T0_T1_cc] = CC_C,
+ [INDEX_op_rcrl_mem_T0_T1_cc] = CC_C,
+};
+
+/* flags written by an operation */
+static uint16_t opc_write_flags[NB_OPS] = {
+ [INDEX_op_update2_cc] = CC_OSZAPC,
+ [INDEX_op_update1_cc] = CC_OSZAPC,
+ [INDEX_op_cmpl_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_update_neg_cc] = CC_OSZAPC,
+ /* subtle: due to the incl/decl implementation, C is used */
+ [INDEX_op_update_inc_cc] = CC_OSZAPC,
+ [INDEX_op_testl_T0_T1_cc] = CC_OSZAPC,
+
+ [INDEX_op_adcb_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_adcw_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_adcl_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_sbbb_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_sbbw_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_sbbl_T0_T1_cc] = CC_OSZAPC,
+
+ [INDEX_op_adcb_mem_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_adcw_mem_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_adcl_mem_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_sbbb_mem_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_sbbw_mem_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_sbbl_mem_T0_T1_cc] = CC_OSZAPC,
+
+ [INDEX_op_mulb_AL_T0] = CC_OSZAPC,
+ [INDEX_op_imulb_AL_T0] = CC_OSZAPC,
+ [INDEX_op_mulw_AX_T0] = CC_OSZAPC,
+ [INDEX_op_imulw_AX_T0] = CC_OSZAPC,
+ [INDEX_op_mull_EAX_T0] = CC_OSZAPC,
+ [INDEX_op_imull_EAX_T0] = CC_OSZAPC,
+ [INDEX_op_imulw_T0_T1] = CC_OSZAPC,
+ [INDEX_op_imull_T0_T1] = CC_OSZAPC,
+
+ /* bcd */
+ [INDEX_op_aam] = CC_OSZAPC,
+ [INDEX_op_aad] = CC_OSZAPC,
+ [INDEX_op_aas] = CC_OSZAPC,
+ [INDEX_op_aaa] = CC_OSZAPC,
+ [INDEX_op_das] = CC_OSZAPC,
+ [INDEX_op_daa] = CC_OSZAPC,
+
+ [INDEX_op_movb_eflags_T0] = CC_S | CC_Z | CC_A | CC_P | CC_C,
+ [INDEX_op_movw_eflags_T0] = CC_OSZAPC,
+ [INDEX_op_movl_eflags_T0] = CC_OSZAPC,
+ [INDEX_op_clc] = CC_C,
+ [INDEX_op_stc] = CC_C,
+ [INDEX_op_cmc] = CC_C,
+
+ [INDEX_op_rolb_T0_T1_cc] = CC_O | CC_C,
+ [INDEX_op_rolw_T0_T1_cc] = CC_O | CC_C,
+ [INDEX_op_roll_T0_T1_cc] = CC_O | CC_C,
+ [INDEX_op_rorb_T0_T1_cc] = CC_O | CC_C,
+ [INDEX_op_rorw_T0_T1_cc] = CC_O | CC_C,
+ [INDEX_op_rorl_T0_T1_cc] = CC_O | CC_C,
+
+ [INDEX_op_rclb_T0_T1_cc] = CC_O | CC_C,
+ [INDEX_op_rclw_T0_T1_cc] = CC_O | CC_C,
+ [INDEX_op_rcll_T0_T1_cc] = CC_O | CC_C,
+ [INDEX_op_rcrb_T0_T1_cc] = CC_O | CC_C,
+ [INDEX_op_rcrw_T0_T1_cc] = CC_O | CC_C,
+ [INDEX_op_rcrl_T0_T1_cc] = CC_O | CC_C,
+
+ [INDEX_op_shlb_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_shlw_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_shll_T0_T1_cc] = CC_OSZAPC,
+
+ [INDEX_op_shrb_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_shrw_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_shrl_T0_T1_cc] = CC_OSZAPC,
+
+ [INDEX_op_sarb_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_sarw_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_sarl_T0_T1_cc] = CC_OSZAPC,
+
+ [INDEX_op_shldw_T0_T1_ECX_cc] = CC_OSZAPC,
+ [INDEX_op_shldl_T0_T1_ECX_cc] = CC_OSZAPC,
+ [INDEX_op_shldw_T0_T1_im_cc] = CC_OSZAPC,
+ [INDEX_op_shldl_T0_T1_im_cc] = CC_OSZAPC,
+
+ [INDEX_op_shrdw_T0_T1_ECX_cc] = CC_OSZAPC,
+ [INDEX_op_shrdl_T0_T1_ECX_cc] = CC_OSZAPC,
+ [INDEX_op_shrdw_T0_T1_im_cc] = CC_OSZAPC,
+ [INDEX_op_shrdl_T0_T1_im_cc] = CC_OSZAPC,
+
+ [INDEX_op_rolb_mem_T0_T1_cc] = CC_O | CC_C,
+ [INDEX_op_rolw_mem_T0_T1_cc] = CC_O | CC_C,
+ [INDEX_op_roll_mem_T0_T1_cc] = CC_O | CC_C,
+ [INDEX_op_rorb_mem_T0_T1_cc] = CC_O | CC_C,
+ [INDEX_op_rorw_mem_T0_T1_cc] = CC_O | CC_C,
+ [INDEX_op_rorl_mem_T0_T1_cc] = CC_O | CC_C,
+
+ [INDEX_op_rclb_mem_T0_T1_cc] = CC_O | CC_C,
+ [INDEX_op_rclw_mem_T0_T1_cc] = CC_O | CC_C,
+ [INDEX_op_rcll_mem_T0_T1_cc] = CC_O | CC_C,
+ [INDEX_op_rcrb_mem_T0_T1_cc] = CC_O | CC_C,
+ [INDEX_op_rcrw_mem_T0_T1_cc] = CC_O | CC_C,
+ [INDEX_op_rcrl_mem_T0_T1_cc] = CC_O | CC_C,
+
+ [INDEX_op_shlb_mem_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_shlw_mem_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_shll_mem_T0_T1_cc] = CC_OSZAPC,
+
+ [INDEX_op_shrb_mem_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_shrw_mem_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_shrl_mem_T0_T1_cc] = CC_OSZAPC,
+
+ [INDEX_op_sarb_mem_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_sarw_mem_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_sarl_mem_T0_T1_cc] = CC_OSZAPC,
+
+ [INDEX_op_shldw_mem_T0_T1_ECX_cc] = CC_OSZAPC,
+ [INDEX_op_shldl_mem_T0_T1_ECX_cc] = CC_OSZAPC,
+ [INDEX_op_shldw_mem_T0_T1_im_cc] = CC_OSZAPC,
+ [INDEX_op_shldl_mem_T0_T1_im_cc] = CC_OSZAPC,
+
+ [INDEX_op_shrdw_mem_T0_T1_ECX_cc] = CC_OSZAPC,
+ [INDEX_op_shrdl_mem_T0_T1_ECX_cc] = CC_OSZAPC,
+ [INDEX_op_shrdw_mem_T0_T1_im_cc] = CC_OSZAPC,
+ [INDEX_op_shrdl_mem_T0_T1_im_cc] = CC_OSZAPC,
+
+ [INDEX_op_btw_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_btl_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_btsw_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_btsl_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_btrw_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_btrl_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_btcw_T0_T1_cc] = CC_OSZAPC,
+ [INDEX_op_btcl_T0_T1_cc] = CC_OSZAPC,
+
+ [INDEX_op_bsfw_T0_cc] = CC_OSZAPC,
+ [INDEX_op_bsfl_T0_cc] = CC_OSZAPC,
+ [INDEX_op_bsrw_T0_cc] = CC_OSZAPC,
+ [INDEX_op_bsrl_T0_cc] = CC_OSZAPC,
+
+ [INDEX_op_cmpxchgb_T0_T1_EAX_cc] = CC_OSZAPC,
+ [INDEX_op_cmpxchgw_T0_T1_EAX_cc] = CC_OSZAPC,
+ [INDEX_op_cmpxchgl_T0_T1_EAX_cc] = CC_OSZAPC,
+
+ [INDEX_op_cmpxchgb_mem_T0_T1_EAX_cc] = CC_OSZAPC,
+ [INDEX_op_cmpxchgw_mem_T0_T1_EAX_cc] = CC_OSZAPC,
+ [INDEX_op_cmpxchgl_mem_T0_T1_EAX_cc] = CC_OSZAPC,
+
+ [INDEX_op_cmpxchg8b] = CC_Z,
+ [INDEX_op_lar] = CC_Z,
+ [INDEX_op_lsl] = CC_Z,
+ [INDEX_op_fcomi_ST0_FT0] = CC_Z | CC_P | CC_C,
+ [INDEX_op_fucomi_ST0_FT0] = CC_Z | CC_P | CC_C,
+};
+
+/* simpler form of an operation if no flags need to be generated */
+static uint16_t opc_simpler[NB_OPS] = {
+ [INDEX_op_update2_cc] = INDEX_op_nop,
+ [INDEX_op_update1_cc] = INDEX_op_nop,
+ [INDEX_op_update_neg_cc] = INDEX_op_nop,
+#if 0
+ /* broken: CC_OP logic must be rewritten */
+ [INDEX_op_update_inc_cc] = INDEX_op_nop,
+#endif
+ [INDEX_op_rolb_T0_T1_cc] = INDEX_op_rolb_T0_T1,
+ [INDEX_op_rolw_T0_T1_cc] = INDEX_op_rolw_T0_T1,
+ [INDEX_op_roll_T0_T1_cc] = INDEX_op_roll_T0_T1,
+
+ [INDEX_op_rorb_T0_T1_cc] = INDEX_op_rorb_T0_T1,
+ [INDEX_op_rorw_T0_T1_cc] = INDEX_op_rorw_T0_T1,
+ [INDEX_op_rorl_T0_T1_cc] = INDEX_op_rorl_T0_T1,
+
+ [INDEX_op_rolb_mem_T0_T1_cc] = INDEX_op_rolb_mem_T0_T1,
+ [INDEX_op_rolw_mem_T0_T1_cc] = INDEX_op_rolw_mem_T0_T1,
+ [INDEX_op_roll_mem_T0_T1_cc] = INDEX_op_roll_mem_T0_T1,
+
+ [INDEX_op_rorb_mem_T0_T1_cc] = INDEX_op_rorb_mem_T0_T1,
+ [INDEX_op_rorw_mem_T0_T1_cc] = INDEX_op_rorw_mem_T0_T1,
+ [INDEX_op_rorl_mem_T0_T1_cc] = INDEX_op_rorl_mem_T0_T1,
+
+ [INDEX_op_shlb_T0_T1_cc] = INDEX_op_shlb_T0_T1,
+ [INDEX_op_shlw_T0_T1_cc] = INDEX_op_shlw_T0_T1,
+ [INDEX_op_shll_T0_T1_cc] = INDEX_op_shll_T0_T1,
+
+ [INDEX_op_shrb_T0_T1_cc] = INDEX_op_shrb_T0_T1,
+ [INDEX_op_shrw_T0_T1_cc] = INDEX_op_shrw_T0_T1,
+ [INDEX_op_shrl_T0_T1_cc] = INDEX_op_shrl_T0_T1,
+
+ [INDEX_op_sarb_T0_T1_cc] = INDEX_op_sarb_T0_T1,
+ [INDEX_op_sarw_T0_T1_cc] = INDEX_op_sarw_T0_T1,
+ [INDEX_op_sarl_T0_T1_cc] = INDEX_op_sarl_T0_T1,
+};
+
+void optimize_flags_init(void)
+{
+ int i;
+ /* put default values in arrays */
+ for(i = 0; i < NB_OPS; i++) {
+ if (opc_simpler[i] == 0)
+ opc_simpler[i] = i;
+ }
+}
+
+/* CPU flags computation optimization: we move backward thru the
+ generated code to see which flags are needed. The operation is
+ modified if suitable */
+static void optimize_flags(uint16_t *opc_buf, int opc_buf_len)
+{
+ uint16_t *opc_ptr;
+ int live_flags, write_flags, op;
+
+ opc_ptr = opc_buf + opc_buf_len;
+ /* live_flags contains the flags needed by the next instructions
+ in the code. At the end of the bloc, we consider that all the
+ flags are live. */
+ live_flags = CC_OSZAPC;
+ while (opc_ptr > opc_buf) {
+ op = *--opc_ptr;
+ /* if none of the flags written by the instruction is used,
+ then we can try to find a simpler instruction */
+ write_flags = opc_write_flags[op];
+ if ((live_flags & write_flags) == 0) {
+ *opc_ptr = opc_simpler[op];
+ }
+ /* compute the live flags before the instruction */
+ live_flags &= ~write_flags;
+ live_flags |= opc_read_flags[op];
+ }
+}
+
+/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
+ basic block 'tb'. If search_pc is TRUE, also generate PC
+ information for each intermediate instruction. */
+static inline int gen_intermediate_code_internal(CPUState *env,
+ TranslationBlock *tb,
+ int search_pc)
+{
+ DisasContext dc1, *dc = &dc1;
+ uint8_t *pc_ptr;
+ uint16_t *gen_opc_end;
+ int flags, j, lj;
+ uint8_t *pc_start;
+ uint8_t *cs_base;
+
+ /* generate intermediate code */
+ pc_start = (uint8_t *)tb->pc;
+ cs_base = (uint8_t *)tb->cs_base;
+ flags = tb->flags;
+
+ dc->pe = env->cr[0] & CR0_PE_MASK;
+ dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
+ dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
+ dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
+ dc->f_st = 0;
+ dc->vm86 = (flags >> VM_SHIFT) & 1;
+ dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
+ dc->iopl = (flags >> IOPL_SHIFT) & 3;
+ dc->tf = (flags >> TF_SHIFT) & 1;
+ dc->cc_op = CC_OP_DYNAMIC;
+ dc->cs_base = cs_base;
+ dc->tb = tb;
+ dc->popl_esp_hack = 0;
+ /* select memory access functions */
+ dc->mem_index = 0;
+ if (flags & HF_SOFTMMU_MASK) {
+ if (dc->cpl == 3)
+ dc->mem_index = 6;
+ else
+ dc->mem_index = 3;
+ }
+ dc->jmp_opt = !(dc->tf || env->singlestep_enabled
+#ifndef CONFIG_SOFT_MMU
+ || (flags & HF_SOFTMMU_MASK)
+#endif
+ );
+ gen_opc_ptr = gen_opc_buf;
+ gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
+ gen_opparam_ptr = gen_opparam_buf;
+
+ dc->is_jmp = DISAS_NEXT;
+ pc_ptr = pc_start;
+ lj = -1;
+
+ /* if irq were inhibited for the next instruction, we can disable
+ them here as it is simpler (otherwise jumps would have to
+ handled as special case) */
+ if (flags & HF_INHIBIT_IRQ_MASK) {
+ gen_op_reset_inhibit_irq();
+ }
+ for(;;) {
+ if (env->nb_breakpoints > 0) {
+ for(j = 0; j < env->nb_breakpoints; j++) {
+ if (env->breakpoints[j] == (unsigned long)pc_ptr) {
+ gen_debug(dc, pc_ptr - dc->cs_base);
+ break;
+ }
+ }
+ }
+ if (search_pc) {
+ j = gen_opc_ptr - gen_opc_buf;
+ if (lj < j) {
+ lj++;
+ while (lj < j)
+ gen_opc_instr_start[lj++] = 0;
+ }
+ gen_opc_pc[lj] = (uint32_t)pc_ptr;
+ gen_opc_cc_op[lj] = dc->cc_op;
+ gen_opc_instr_start[lj] = 1;
+ }
+ pc_ptr = disas_insn(dc, pc_ptr);
+ /* stop translation if indicated */
+ if (dc->is_jmp)
+ break;
+ /* if single step mode, we generate only one instruction and
+ generate an exception */
+ if (dc->tf) {
+ gen_op_jmp_im(pc_ptr - dc->cs_base);
+ gen_eob(dc);
+ break;
+ }
+ /* if too long translation, stop generation too */
+ if (gen_opc_ptr >= gen_opc_end ||
+ (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32)) {
+ gen_op_jmp_im(pc_ptr - dc->cs_base);
+ gen_eob(dc);
+ break;
+ }
+ }
+ *gen_opc_ptr = INDEX_op_end;
+ /* we don't forget to fill the last values */
+ if (search_pc) {
+ j = gen_opc_ptr - gen_opc_buf;
+ lj++;
+ while (lj <= j)
+ gen_opc_instr_start[lj++] = 0;
+ }
+
+#ifdef DEBUG_DISAS
+ if (loglevel) {
+ fprintf(logfile, "----------------\n");
+ fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
+ disas(logfile, pc_start, pc_ptr - pc_start, 0, !dc->code32);
+ fprintf(logfile, "\n");
+
+ fprintf(logfile, "OP:\n");
+ dump_ops(gen_opc_buf, gen_opparam_buf);
+ fprintf(logfile, "\n");
+ }
+#endif
+
+ /* optimize flag computations */
+ optimize_flags(gen_opc_buf, gen_opc_ptr - gen_opc_buf);
+
+#ifdef DEBUG_DISAS
+ if (loglevel) {
+ fprintf(logfile, "AFTER FLAGS OPT:\n");
+ dump_ops(gen_opc_buf, gen_opparam_buf);
+ fprintf(logfile, "\n");
+ }
+#endif
+ if (!search_pc)
+ tb->size = pc_ptr - pc_start;
+ return 0;
+}
+
+int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
+{
+ return gen_intermediate_code_internal(env, tb, 0);
+}
+
+int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
+{
+ return gen_intermediate_code_internal(env, tb, 1);
+}
+