--- /dev/null
+/*
+ * amd64-codegen.h: Macros for generating amd64 code
+ *
+ * Authors:
+ * Paolo Molaro (lupus@ximian.com)
+ * Intel Corporation (ORP Project)
+ * Sergey Chaban (serge@wildwestsoftware.com)
+ * Dietmar Maurer (dietmar@ximian.com)
+ * Patrik Torstensson
+ * Zalman Stern
+ *
+ * Copyright (C) 2000 Intel Corporation. All rights reserved.
+ * Copyright (C) 2001, 2002 Ximian, Inc.
+ */
+
+#ifndef AMD64_H
+#define AMD64_H
+
+//#include <glib.h>
+
+typedef enum {
+ AMD64_RAX = 0,
+ AMD64_RCX = 1,
+ AMD64_RDX = 2,
+ AMD64_RBX = 3,
+ AMD64_RSP = 4,
+ AMD64_RBP = 5,
+ AMD64_RSI = 6,
+ AMD64_RDI = 7,
+ AMD64_R8 = 8,
+ AMD64_R9 = 9,
+ AMD64_R10 = 10,
+ AMD64_R11 = 11,
+ AMD64_R12 = 12,
+ AMD64_R13 = 13,
+ AMD64_R14 = 14,
+ AMD64_R15 = 15,
+ AMD64_RIP = 16,
+ AMD64_NREG
+} AMD64_Reg_No;
+
+typedef enum {
+ AMD64_XMM0 = 0,
+ AMD64_XMM1 = 1,
+ AMD64_XMM2 = 2,
+ AMD64_XMM3 = 3,
+ AMD64_XMM4 = 4,
+ AMD64_XMM5 = 5,
+ AMD64_XMM6 = 6,
+ AMD64_XMM7 = 7,
+ AMD64_XMM8 = 8,
+ AMD64_XMM9 = 9,
+ AMD64_XMM10 = 10,
+ AMD64_XMM11 = 11,
+ AMD64_XMM12 = 12,
+ AMD64_XMM13 = 13,
+ AMD64_XMM14 = 14,
+ AMD64_XMM15 = 15,
+ AMD64_XMM_NREG = 16,
+} AMD64_XMM_Reg_No;
+
+typedef enum
+{
+ AMD64_REX_B = 1, /* The register in r/m field, base register in SIB byte, or reg in opcode is 8-15 rather than 0-7 */
+ AMD64_REX_X = 2, /* The index register in SIB byte is 8-15 rather than 0-7 */
+ AMD64_REX_R = 4, /* The reg field of ModRM byte is 8-15 rather than 0-7 */
+ AMD64_REX_W = 8 /* Opeartion is 64-bits instead of 32 (default) or 16 (with 0x66 prefix) */
+} AMD64_REX_Bits;
+
+#if defined(__default_codegen__)
+
+#define amd64_codegen_pre(inst)
+#define amd64_codegen_post(inst)
+
+#elif defined(__native_client_codegen__)
+
+#define amd64_codegen_pre(inst) guint8* _codegen_start = (inst); amd64_nacl_instruction_pre();
+#define amd64_codegen_post(inst) (amd64_nacl_instruction_post(&_codegen_start, &(inst)), _codegen_start);
+
+/* Because of rex prefixes, etc, call sequences are not constant size. */
+/* These pre- and post-sequence hooks remedy this by aligning the call */
+/* sequence after we emit it, since we will know the exact size then. */
+#define amd64_call_sequence_pre(inst) guint8* _code_start = (inst);
+#define amd64_call_sequence_post(inst) \
+ (mono_nacl_align_call(&_code_start, &(inst)), _code_start);
+
+/* Native client can load/store using one of the following registers */
+/* as a base: rip, r15, rbp, rsp. Any other base register needs to have */
+/* its upper 32 bits cleared and reference memory using r15 as the base. */
+#define amd64_is_valid_nacl_base(reg) \
+ ((reg) == AMD64_RIP || (reg) == AMD64_R15 || \
+ (reg) == AMD64_RBP || (reg) == AMD64_RSP)
+
+#endif /*__native_client_codegen__*/
+
+#ifdef TARGET_WIN32
+#define AMD64_ARG_REG1 AMD64_RCX
+#define AMD64_ARG_REG2 AMD64_RDX
+#define AMD64_ARG_REG3 AMD64_R8
+#define AMD64_ARG_REG4 AMD64_R9
+#else
+#define AMD64_ARG_REG1 AMD64_RDI
+#define AMD64_ARG_REG2 AMD64_RSI
+#define AMD64_ARG_REG3 AMD64_RDX
+#define AMD64_ARG_REG4 AMD64_RCX
+#endif
+
+#ifdef TARGET_WIN32
+#define AMD64_CALLEE_REGS ((1<<AMD64_RAX) | (1<<AMD64_RCX) | (1<<AMD64_RDX) | (1<<AMD64_R8) | (1<<AMD64_R9) | (1<<AMD64_R10))
+#define AMD64_IS_CALLEE_REG(reg) (AMD64_CALLEE_REGS & (1 << (reg)))
+
+#define AMD64_ARGUMENT_REGS ((1<<AMD64_RDX) | (1<<AMD64_RCX) | (1<<AMD64_R8) | (1<<AMD64_R9))
+#define AMD64_IS_ARGUMENT_REG(reg) (AMD64_ARGUMENT_REGS & (1 << (reg)))
+
+#define AMD64_CALLEE_SAVED_REGS ((1<<AMD64_RDI) | (1<<AMD64_RSI) | (1<<AMD64_RBX) | (1<<AMD64_R12) | (1<<AMD64_R13) | (1<<AMD64_R14) | (1<<AMD64_R15) | (1<<AMD64_RBP))
+#define AMD64_IS_CALLEE_SAVED_REG(reg) (AMD64_CALLEE_SAVED_REGS & (1 << (reg)))
+#elif defined(__native_client_codegen__)
+/* AMD64 Native Client code may not write R15 */
+#define AMD64_CALLEE_REGS ((1<<AMD64_RAX) | (1<<AMD64_RCX) | (1<<AMD64_RDX) | (1<<AMD64_RSI) | (1<<AMD64_RDI) | (1<<AMD64_R8) | (1<<AMD64_R9) | (1<<AMD64_R10))
+#define AMD64_IS_CALLEE_REG(reg) (AMD64_CALLEE_REGS & (1 << (reg)))
+
+#define AMD64_ARGUMENT_REGS ((1<<AMD64_RDI) | (1<<AMD64_RSI) | (1<<AMD64_RDX) | (1<<AMD64_RCX) | (1<<AMD64_R8) | (1<<AMD64_R9))
+#define AMD64_IS_ARGUMENT_REG(reg) (AMD64_ARGUMENT_REGS & (1 << (reg)))
+
+#define AMD64_CALLEE_SAVED_REGS ((1<<AMD64_RBX) | (1<<AMD64_R12) | (1<<AMD64_R13) | (1<<AMD64_R14) | (1<<AMD64_RBP))
+#define AMD64_IS_CALLEE_SAVED_REG(reg) (AMD64_CALLEE_SAVED_REGS & (1 << (reg)))
+#else
+#define AMD64_CALLEE_REGS ((1<<AMD64_RAX) | (1<<AMD64_RCX) | (1<<AMD64_RDX) | (1<<AMD64_RSI) | (1<<AMD64_RDI) | (1<<AMD64_R8) | (1<<AMD64_R9) | (1<<AMD64_R10))
+#define AMD64_IS_CALLEE_REG(reg) (AMD64_CALLEE_REGS & (1 << (reg)))
+
+#define AMD64_ARGUMENT_REGS ((1<<AMD64_RDI) | (1<<AMD64_RSI) | (1<<AMD64_RDX) | (1<<AMD64_RCX) | (1<<AMD64_R8) | (1<<AMD64_R9))
+#define AMD64_IS_ARGUMENT_REG(reg) (AMD64_ARGUMENT_REGS & (1 << (reg)))
+
+#define AMD64_CALLEE_SAVED_REGS ((1<<AMD64_RBX) | (1<<AMD64_R12) | (1<<AMD64_R13) | (1<<AMD64_R14) | (1<<AMD64_R15) | (1<<AMD64_RBP))
+#define AMD64_IS_CALLEE_SAVED_REG(reg) (AMD64_CALLEE_SAVED_REGS & (1 << (reg)))
+#endif
+
+#define AMD64_REX(bits) ((unsigned char)(0x40 | (bits)))
+#if defined(__default_codegen__)
+#define amd64_emit_rex(inst, width, reg_modrm, reg_index, reg_rm_base_opcode) do \
+ { \
+ unsigned char _amd64_rex_bits = \
+ (((width) > 4) ? AMD64_REX_W : 0) | \
+ (((reg_modrm) > 7) ? AMD64_REX_R : 0) | \
+ (((reg_index) > 7) ? AMD64_REX_X : 0) | \
+ (((reg_rm_base_opcode) > 7) ? AMD64_REX_B : 0); \
+ if ((_amd64_rex_bits != 0) || (((width) == 1))) *(inst)++ = AMD64_REX(_amd64_rex_bits); \
+ } while (0)
+#elif defined(__native_client_codegen__)
+#define amd64_emit_rex(inst, width, reg_modrm, reg_index, reg_rm_base_opcode) do \
+ { \
+ unsigned char _amd64_rex_bits = \
+ (((width) > 4) ? AMD64_REX_W : 0) | \
+ (((reg_modrm) > 7) ? AMD64_REX_R : 0) | \
+ (((reg_index) > 7) ? AMD64_REX_X : 0) | \
+ (((reg_rm_base_opcode) > 7) ? AMD64_REX_B : 0); \
+ amd64_nacl_tag_rex((inst)); \
+ if ((_amd64_rex_bits != 0) || (((width) == 1))) *(inst)++ = AMD64_REX(_amd64_rex_bits); \
+ } while (0)
+#endif
+
+typedef union {
+ guint64 val;
+ unsigned char b [8];
+} amd64_imm_buf;
+
+#include "x86-codegen.h"
+
+/* In 64 bit mode, all registers have a low byte subregister */
+#undef X86_IS_BYTE_REG
+#define X86_IS_BYTE_REG(reg) 1
+
+#define amd64_modrm_mod(modrm) ((modrm) >> 6)
+#define amd64_modrm_reg(modrm) (((modrm) >> 3) & 0x7)
+#define amd64_modrm_rm(modrm) ((modrm) & 0x7)
+
+#define amd64_rex_r(rex) ((((rex) >> 2) & 0x1) << 3)
+#define amd64_rex_x(rex) ((((rex) >> 1) & 0x1) << 3)
+#define amd64_rex_b(rex) ((((rex) >> 0) & 0x1) << 3)
+
+#define amd64_sib_scale(sib) ((sib) >> 6)
+#define amd64_sib_index(sib) (((sib) >> 3) & 0x7)
+#define amd64_sib_base(sib) ((sib) & 0x7)
+
+#define amd64_is_imm32(val) ((gint64)val >= -((gint64)1<<31) && (gint64)val <= (((gint64)1<<31)-1))
+
+#define x86_imm_emit64(inst,imm) \
+ do { \
+ amd64_imm_buf imb; \
+ imb.val = (guint64) (imm); \
+ *(inst)++ = imb.b [0]; \
+ *(inst)++ = imb.b [1]; \
+ *(inst)++ = imb.b [2]; \
+ *(inst)++ = imb.b [3]; \
+ *(inst)++ = imb.b [4]; \
+ *(inst)++ = imb.b [5]; \
+ *(inst)++ = imb.b [6]; \
+ *(inst)++ = imb.b [7]; \
+ } while (0)
+
+#define amd64_membase_emit(inst,reg,basereg,disp) do { \
+ if ((basereg) == AMD64_RIP) { \
+ x86_address_byte ((inst), 0, (reg)&0x7, 5); \
+ x86_imm_emit32 ((inst), (disp)); \
+ } \
+ else \
+ x86_membase_emit ((inst),(reg)&0x7, (basereg)&0x7, (disp)); \
+} while (0)
+
+#define amd64_alu_reg_imm_size_body(inst,opc,reg,imm,size) \
+ do { \
+ if (x86_is_imm8((imm))) { \
+ amd64_emit_rex(inst, size, 0, 0, (reg)); \
+ *(inst)++ = (unsigned char)0x83; \
+ x86_reg_emit ((inst), (opc), (reg)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else if ((reg) == AMD64_RAX) { \
+ amd64_emit_rex(inst, size, 0, 0, 0); \
+ *(inst)++ = (((unsigned char)(opc)) << 3) + 5; \
+ x86_imm_emit32 ((inst), (imm)); \
+ } else { \
+ amd64_emit_rex(inst, size, 0, 0, (reg)); \
+ *(inst)++ = (unsigned char)0x81; \
+ x86_reg_emit ((inst), (opc), (reg)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define amd64_alu_reg_reg_size_body(inst,opc,dreg,reg,size) \
+ do { \
+ amd64_emit_rex(inst, size, (dreg), 0, (reg)); \
+ *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ } while (0)
+
+#if defined(__default_codegen__)
+
+#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) \
+ amd64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), (size))
+
+#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) \
+ amd64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), (size))
+
+#elif defined(__native_client_codegen__)
+/* NaCl modules may not directly update RSP or RBP other than direct copies */
+/* between them. Instead the lower 4 bytes are updated and then added to R15 */
+#define amd64_is_nacl_stack_reg(reg) (((reg) == AMD64_RSP) || ((reg) == AMD64_RBP))
+
+#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) \
+ do{ \
+ amd64_codegen_pre(inst); \
+ if (amd64_is_nacl_stack_reg(reg)) { \
+ if (((opc) != X86_ADD) && ((opc) != X86_SUB)) \
+ g_assert_not_reached(); \
+ amd64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), 4); \
+ /* Use LEA instead of ADD to preserve flags */ \
+ amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \
+ } else { \
+ amd64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), (size)); \
+ } \
+ amd64_codegen_post(inst); \
+ } while(0)
+
+#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) \
+ do { \
+ amd64_codegen_pre(inst); \
+ if (amd64_is_nacl_stack_reg((dreg)) && ((reg) != AMD64_R15)) { \
+ if (((opc) != X86_ADD && (opc) != X86_SUB)) \
+ g_assert_not_reached(); \
+ amd64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), 4); \
+ /* Use LEA instead of ADD to preserve flags */ \
+ amd64_lea_memindex_size((inst), (dreg), (dreg), 0, AMD64_R15, 0, 8); \
+ } else { \
+ amd64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), (size)); \
+ } \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+#endif /*__native_client_codegen__*/
+
+#define amd64_alu_reg_imm(inst,opc,reg,imm) amd64_alu_reg_imm_size((inst),(opc),(reg),(imm),8)
+
+#define amd64_alu_reg_reg(inst,opc,dreg,reg) amd64_alu_reg_reg_size ((inst),(opc),(dreg),(reg),8)
+
+#define amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) \
+ do { \
+ amd64_codegen_pre(inst); \
+ amd64_emit_rex ((inst),(size),(reg),0,(basereg)); \
+ *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \
+ amd64_membase_emit (inst, reg, basereg, disp); \
+ amd64_codegen_post(inst); \
+} while (0)
+
+#define amd64_mov_regp_reg(inst,regp,reg,size) \
+ do { \
+ amd64_codegen_pre(inst); \
+ if ((size) == 2) \
+ x86_prefix((inst), X86_OPERAND_PREFIX); \
+ amd64_emit_rex(inst, (size), (reg), 0, (regp)); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x88; break; \
+ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \
+ default: assert (0); \
+ } \
+ x86_regp_emit ((inst), (reg), (regp)); \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+#define amd64_mov_membase_reg(inst,basereg,disp,reg,size) \
+ do { \
+ amd64_codegen_pre(inst); \
+ if ((size) == 2) \
+ x86_prefix((inst), X86_OPERAND_PREFIX); \
+ amd64_emit_rex(inst, (size), (reg), 0, (basereg)); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x88; break; \
+ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \
+ default: assert (0); \
+ } \
+ x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+#define amd64_mov_mem_reg(inst,mem,reg,size) \
+ do { \
+ amd64_codegen_pre(inst); \
+ if ((size) == 2) \
+ x86_prefix((inst), X86_OPERAND_PREFIX); \
+ amd64_emit_rex(inst, (size), (reg), 0, 0); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x88; break; \
+ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \
+ default: assert (0); \
+ } \
+ x86_address_byte ((inst), 0, (reg), 4); \
+ x86_address_byte ((inst), 0, 4, 5); \
+ x86_imm_emit32 ((inst), (mem)); \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+#define amd64_mov_reg_reg(inst,dreg,reg,size) \
+ do { \
+ amd64_codegen_pre(inst); \
+ if ((size) == 2) \
+ x86_prefix((inst), X86_OPERAND_PREFIX); \
+ amd64_emit_rex(inst, (size), (dreg), 0, (reg)); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x8a; break; \
+ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \
+ default: assert (0); \
+ } \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+#define amd64_mov_reg_mem_body(inst,reg,mem,size) \
+ do { \
+ amd64_codegen_pre(inst); \
+ if ((size) == 2) \
+ x86_prefix((inst), X86_OPERAND_PREFIX); \
+ amd64_emit_rex(inst, (size), (reg), 0, 0); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x8a; break; \
+ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \
+ default: assert (0); \
+ } \
+ x86_address_byte ((inst), 0, (reg), 4); \
+ x86_address_byte ((inst), 0, 4, 5); \
+ x86_imm_emit32 ((inst), (mem)); \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+#if defined(__default_codegen__)
+#define amd64_mov_reg_mem(inst,reg,mem,size) \
+ do { \
+ amd64_mov_reg_mem_body((inst),(reg),(mem),(size)); \
+ } while (0)
+#elif defined(__native_client_codegen__)
+/* We have to re-base memory reads because memory isn't zero based. */
+#define amd64_mov_reg_mem(inst,reg,mem,size) \
+ do { \
+ amd64_mov_reg_membase((inst),(reg),AMD64_R15,(mem),(size)); \
+ } while (0)
+#endif /* __native_client_codegen__ */
+
+#define amd64_mov_reg_membase_body(inst,reg,basereg,disp,size) \
+ do { \
+ if ((size) == 2) \
+ x86_prefix((inst), X86_OPERAND_PREFIX); \
+ amd64_emit_rex(inst, (size), (reg), 0, (basereg)); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x8a; break; \
+ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \
+ default: assert (0); \
+ } \
+ amd64_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define amd64_mov_reg_memindex_size_body(inst,reg,basereg,disp,indexreg,shift,size) \
+ do { \
+ amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); \
+ x86_mov_reg_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(size) == 8 ? 4 : (size)); \
+ } while (0)
+
+#if defined(__default_codegen__)
+
+#define amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) \
+ amd64_mov_reg_memindex_size_body((inst),(reg),(basereg),(disp),(indexreg),(shift),(size))
+#define amd64_mov_reg_membase(inst,reg,basereg,disp,size) \
+ do { \
+ amd64_mov_reg_membase_body((inst), (reg), (basereg), (disp), (size)); \
+ } while (0)
+
+#elif defined(__native_client_codegen__)
+
+#define amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) \
+ do { \
+ amd64_codegen_pre(inst); \
+ if (amd64_is_nacl_stack_reg((reg))) { \
+ /* Clear upper 32 bits with mov of size 4 */ \
+ amd64_mov_reg_memindex_size_body((inst), (reg), (basereg), (disp), (indexreg), (shift), 4); \
+ /* Add %r15 using LEA to preserve flags */ \
+ amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \
+ } else { \
+ amd64_mov_reg_memindex_size_body((inst), (reg), (basereg), (disp), (indexreg), (shift), (size)); \
+ } \
+ amd64_codegen_post(inst); \
+ } while(0)
+
+#define amd64_mov_reg_membase(inst,reg,basereg,disp,size) \
+ do { \
+ amd64_codegen_pre(inst); \
+ if (amd64_is_nacl_stack_reg((reg))) { \
+ /* Clear upper 32 bits with mov of size 4 */ \
+ amd64_mov_reg_membase_body((inst), (reg), (basereg), (disp), 4); \
+ /* Add %r15 */ \
+ amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \
+ } else { \
+ amd64_mov_reg_membase_body((inst), (reg), (basereg), (disp), (size)); \
+ } \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+#endif /*__native_client_codegen__*/
+
+#define amd64_movzx_reg_membase(inst,reg,basereg,disp,size) \
+ do { \
+ amd64_codegen_pre(inst); \
+ amd64_emit_rex(inst, (size), (reg), 0, (basereg)); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x0f; *(inst)++ = (unsigned char)0xb6; break; \
+ case 2: *(inst)++ = (unsigned char)0x0f; *(inst)++ = (unsigned char)0xb7; break; \
+ case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \
+ default: assert (0); \
+ } \
+ x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+#define amd64_movsxd_reg_mem(inst,reg,mem) \
+ do { \
+ amd64_codegen_pre(inst); \
+ amd64_emit_rex(inst,8,(reg),0,0); \
+ *(inst)++ = (unsigned char)0x63; \
+ x86_mem_emit ((inst), ((reg)&0x7), (mem)); \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+#define amd64_movsxd_reg_membase(inst,reg,basereg,disp) \
+ do { \
+ amd64_codegen_pre(inst); \
+ amd64_emit_rex(inst,8,(reg),0,(basereg)); \
+ *(inst)++ = (unsigned char)0x63; \
+ x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+#define amd64_movsxd_reg_reg(inst,dreg,reg) \
+ do { \
+ amd64_codegen_pre(inst); \
+ amd64_emit_rex(inst,8,(dreg),0,(reg)); \
+ *(inst)++ = (unsigned char)0x63; \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+/* Pretty much the only instruction that supports a 64-bit immediate. Optimize for common case of
+ * 32-bit immediate. Pepper with casts to avoid warnings.
+ */
+#define amd64_mov_reg_imm_size(inst,reg,imm,size) \
+ do { \
+ amd64_codegen_pre(inst); \
+ amd64_emit_rex(inst, (size), 0, 0, (reg)); \
+ *(inst)++ = (unsigned char)0xb8 + ((reg) & 0x7); \
+ if ((size) == 8) \
+ x86_imm_emit64 ((inst), (guint64)(imm)); \
+ else \
+ x86_imm_emit32 ((inst), (int)(guint64)(imm)); \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+#define amd64_mov_reg_imm(inst,reg,imm) \
+ do { \
+ int _amd64_width_temp = ((guint64)(imm) == (guint64)(int)(guint64)(imm)); \
+ amd64_codegen_pre(inst); \
+ amd64_mov_reg_imm_size ((inst), (reg), (imm), (_amd64_width_temp ? 4 : 8)); \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+#define amd64_set_reg_template(inst,reg) amd64_mov_reg_imm_size ((inst),(reg), 0, 8)
+
+#define amd64_set_template(inst,reg) amd64_set_reg_template((inst),(reg))
+
+#define amd64_mov_membase_imm(inst,basereg,disp,imm,size) \
+ do { \
+ amd64_codegen_pre(inst); \
+ if ((size) == 2) \
+ x86_prefix((inst), X86_OPERAND_PREFIX); \
+ amd64_emit_rex(inst, (size) == 1 ? 0 : (size), 0, 0, (basereg)); \
+ if ((size) == 1) { \
+ *(inst)++ = (unsigned char)0xc6; \
+ x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else if ((size) == 2) { \
+ *(inst)++ = (unsigned char)0xc7; \
+ x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \
+ x86_imm_emit16 ((inst), (imm)); \
+ } else { \
+ *(inst)++ = (unsigned char)0xc7; \
+ x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+
+#define amd64_lea_membase_body(inst,reg,basereg,disp) \
+ do { \
+ amd64_emit_rex(inst, 8, (reg), 0, (basereg)); \
+ *(inst)++ = (unsigned char)0x8d; \
+ amd64_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#if defined(__default_codegen__)
+#define amd64_lea_membase(inst,reg,basereg,disp) \
+ amd64_lea_membase_body((inst), (reg), (basereg), (disp))
+#elif defined(__native_client_codegen__)
+/* NaCl modules may not write directly into RSP/RBP. Instead, use a */
+/* 32-bit LEA and add R15 to the effective address */
+#define amd64_lea_membase(inst,reg,basereg,disp) \
+ do { \
+ amd64_codegen_pre(inst); \
+ if (amd64_is_nacl_stack_reg(reg)) { \
+ /* 32-bit LEA */ \
+ amd64_emit_rex((inst), 4, (reg), 0, (basereg)); \
+ *(inst)++ = (unsigned char)0x8d; \
+ amd64_membase_emit((inst), (reg), (basereg), (disp)); \
+ /* Use a 64-bit LEA instead of an ADD to preserve flags */ \
+ amd64_lea_memindex_size((inst), (reg), (reg), 0, AMD64_R15, 0, 8); \
+ } else { \
+ amd64_lea_membase_body((inst), (reg), (basereg), (disp)); \
+ } \
+ amd64_codegen_post(inst); \
+ } while (0)
+#endif /*__native_client_codegen__*/
+
+/* Instruction are implicitly 64-bits so don't generate REX for just the size. */
+#define amd64_push_reg(inst,reg) \
+ do { \
+ amd64_codegen_pre(inst); \
+ amd64_emit_rex(inst, 0, 0, 0, (reg)); \
+ *(inst)++ = (unsigned char)0x50 + ((reg) & 0x7); \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+/* Instruction is implicitly 64-bits so don't generate REX for just the size. */
+#define amd64_push_membase(inst,basereg,disp) \
+ do { \
+ amd64_codegen_pre(inst); \
+ amd64_emit_rex(inst, 0, 0, 0, (basereg)); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_membase_emit ((inst), 6, (basereg) & 0x7, (disp)); \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+#define amd64_pop_reg_body(inst,reg) \
+ do { \
+ amd64_codegen_pre(inst); \
+ amd64_emit_rex(inst, 0, 0, 0, (reg)); \
+ *(inst)++ = (unsigned char)0x58 + ((reg) & 0x7); \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+#if defined(__default_codegen__)
+
+#define amd64_call_reg(inst,reg) \
+ do { \
+ amd64_emit_rex(inst, 0, 0, 0, (reg)); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_reg_emit ((inst), 2, ((reg) & 0x7)); \
+ } while (0)
+
+
+#define amd64_ret(inst) do { *(inst)++ = (unsigned char)0xc3; } while (0)
+#define amd64_leave(inst) do { *(inst)++ = (unsigned char)0xc9; } while (0)
+
+#define amd64_pop_reg(inst,reg) amd64_pop_reg_body((inst), (reg))
+
+#elif defined(__native_client_codegen__)
+
+/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */
+#define amd64_jump_reg_size(inst,reg,size) \
+ do { \
+ amd64_codegen_pre((inst)); \
+ amd64_alu_reg_imm_size((inst), X86_AND, (reg), (nacl_align_byte), 4); \
+ amd64_alu_reg_reg_size((inst), X86_ADD, (reg), AMD64_R15, 8); \
+ amd64_emit_rex ((inst),0,0,0,(reg)); \
+ x86_jump_reg((inst),((reg)&0x7)); \
+ amd64_codegen_post((inst)); \
+ } while (0)
+
+/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */
+#define amd64_jump_mem_size(inst,mem,size) \
+ do { \
+ amd64_codegen_pre((inst)); \
+ amd64_mov_reg_mem((inst), (mem), AMD64_R11, 4); \
+ amd64_jump_reg_size((inst), AMD64_R11, 4); \
+ amd64_codegen_post((inst)); \
+ } while (0)
+
+#define amd64_call_reg_internal(inst,reg) \
+ do { \
+ amd64_codegen_pre((inst)); \
+ amd64_alu_reg_imm_size((inst), X86_AND, (reg), (nacl_align_byte), 4); \
+ amd64_alu_reg_reg_size((inst), X86_ADD, (reg), AMD64_R15, 8); \
+ amd64_emit_rex((inst), 0, 0, 0, (reg)); \
+ x86_call_reg((inst), ((reg) & 0x7)); \
+ amd64_codegen_post((inst)); \
+ } while (0)
+
+#define amd64_call_reg(inst,reg) \
+ do { \
+ amd64_codegen_pre((inst)); \
+ amd64_call_sequence_pre(inst); \
+ amd64_call_reg_internal((inst), (reg)); \
+ amd64_call_sequence_post(inst); \
+ amd64_codegen_post((inst)); \
+ } while (0)
+
+
+#define amd64_ret(inst) \
+ do { \
+ amd64_codegen_pre(inst); \
+ amd64_pop_reg_body((inst), AMD64_R11); \
+ amd64_jump_reg_size((inst), AMD64_R11, 8); \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+#define amd64_leave(inst) \
+ do { \
+ amd64_codegen_pre(inst); \
+ amd64_mov_reg_reg((inst), AMD64_RSP, AMD64_RBP, 8); \
+ amd64_pop_reg_body((inst), AMD64_R11); \
+ amd64_mov_reg_reg_size((inst), AMD64_RBP, AMD64_R11, 4); \
+ amd64_alu_reg_reg_size((inst), X86_ADD, AMD64_RBP, AMD64_R15, 8); \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+#define amd64_pop_reg(inst,reg) \
+ do { \
+ amd64_codegen_pre(inst); \
+ if (amd64_is_nacl_stack_reg((reg))) { \
+ amd64_pop_reg_body((inst), AMD64_R11); \
+ amd64_mov_reg_reg_size((inst), (reg), AMD64_R11, 4); \
+ amd64_alu_reg_reg_size((inst), X86_ADD, (reg), AMD64_R15, 8); \
+ } else { \
+ amd64_pop_reg_body((inst), (reg)); \
+ } \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+#endif /*__native_client_codegen__*/
+
+#define amd64_movsd_reg_regp(inst,reg,regp) \
+ do { \
+ amd64_codegen_pre(inst); \
+ x86_prefix((inst), 0xf2); \
+ amd64_emit_rex(inst, 0, (reg), 0, (regp)); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x10; \
+ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+#define amd64_movsd_regp_reg(inst,regp,reg) \
+ do { \
+ amd64_codegen_pre(inst); \
+ x86_prefix((inst), 0xf2); \
+ amd64_emit_rex(inst, 0, (reg), 0, (regp)); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x11; \
+ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+#define amd64_movss_reg_regp(inst,reg,regp) \
+ do { \
+ amd64_codegen_pre(inst); \
+ x86_prefix((inst), 0xf3); \
+ amd64_emit_rex(inst, 0, (reg), 0, (regp)); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x10; \
+ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+#define amd64_movss_regp_reg(inst,regp,reg) \
+ do { \
+ amd64_codegen_pre(inst); \
+ x86_prefix((inst), 0xf3); \
+ amd64_emit_rex(inst, 0, (reg), 0, (regp)); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x11; \
+ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+#define amd64_movsd_reg_membase(inst,reg,basereg,disp) \
+ do { \
+ amd64_codegen_pre(inst); \
+ x86_prefix((inst), 0xf2); \
+ amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x10; \
+ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+#define amd64_movss_reg_membase(inst,reg,basereg,disp) \
+ do { \
+ amd64_codegen_pre(inst); \
+ x86_prefix((inst), 0xf3); \
+ amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x10; \
+ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+#define amd64_movsd_membase_reg(inst,basereg,disp,reg) \
+ do { \
+ amd64_codegen_pre(inst); \
+ x86_prefix((inst), 0xf2); \
+ amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x11; \
+ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+#define amd64_movss_membase_reg(inst,basereg,disp,reg) \
+ do { \
+ amd64_codegen_pre(inst); \
+ x86_prefix((inst), 0xf3); \
+ amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x11; \
+ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+/* The original inc_reg opcode is used as the REX prefix */
+#define amd64_inc_reg_size(inst,reg,size) \
+ do { \
+ amd64_codegen_pre(inst); \
+ amd64_emit_rex ((inst),(size),0,0,(reg)); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_reg_emit ((inst),0,(reg) & 0x7); \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+#define amd64_dec_reg_size(inst,reg,size) \
+ do { \
+ amd64_codegen_pre(inst); \
+ amd64_emit_rex ((inst),(size),0,0,(reg)); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_reg_emit ((inst),1,(reg) & 0x7); \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+#define amd64_fld_membase_size(inst,basereg,disp,is_double,size) do { \
+ amd64_codegen_pre(inst); \
+ amd64_emit_rex ((inst),0,0,0,(basereg)); \
+ *(inst)++ = (is_double) ? (unsigned char)0xdd : (unsigned char)0xd9; \
+ amd64_membase_emit ((inst), 0, (basereg), (disp)); \
+ amd64_codegen_post(inst); \
+} while (0)
+
+#if defined (__default_codegen__)
+
+/* From the AMD64 Software Optimization Manual */
+#define amd64_padding_size(inst,size) \
+ do { \
+ switch ((size)) { \
+ case 1: *(inst)++ = 0x90; break; \
+ case 2: *(inst)++ = 0x66; *(inst)++ = 0x90; break; \
+ case 3: *(inst)++ = 0x66; *(inst)++ = 0x66; *(inst)++ = 0x90; break; \
+ default: amd64_emit_rex ((inst),8,0,0,0); x86_padding ((inst), (size) - 1); \
+ }; \
+ } while (0)
+
+#define amd64_call_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; amd64_membase_emit ((inst),2, (basereg),(disp)); } while (0)
+#define amd64_jump_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; amd64_membase_emit ((inst), 4, (basereg), (disp)); } while (0)
+
+#define amd64_jump_code_size(inst,target,size) do { \
+ if (amd64_is_imm32 ((gint64)(target) - (gint64)(inst))) { \
+ x86_jump_code((inst),(target)); \
+ } else { \
+ amd64_jump_membase ((inst), AMD64_RIP, 0); \
+ *(guint64*)(inst) = (guint64)(target); \
+ (inst) += 8; \
+ } \
+} while (0)
+
+#elif defined(__native_client_codegen__)
+
+/* The 3-7 byte NOP sequences in amd64_padding_size below are all illegal in */
+/* 64-bit Native Client because they load into rSP/rBP or use duplicate */
+/* prefixes. Instead we use the NOPs recommended in Section 3.5.1.8 of the */
+/* Intel64 and IA-32 Architectures Optimization Reference Manual and */
+/* Section 4.13 of AMD Software Optimization Guide for Family 10h Processors. */
+
+#define amd64_padding_size(inst,size) \
+ do { \
+ unsigned char *code_start = (inst); \
+ switch ((size)) { \
+ /* xchg %eax,%eax, recognized by hardware as a NOP */ \
+ case 1: *(inst)++ = 0x90; break; \
+ /* xchg %ax,%ax */ \
+ case 2: *(inst)++ = 0x66; *(inst)++ = 0x90; \
+ break; \
+ /* nop (%rax) */ \
+ case 3: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \
+ *(inst)++ = 0x00; \
+ break; \
+ /* nop 0x0(%rax) */ \
+ case 4: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \
+ x86_address_byte ((inst), 1, 0, AMD64_RAX); \
+ x86_imm_emit8 ((inst), 0); \
+ break; \
+ /* nop 0x0(%rax,%rax) */ \
+ case 5: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \
+ x86_address_byte ((inst), 1, 0, 4); \
+ x86_address_byte ((inst), 0, AMD64_RAX, AMD64_RAX); \
+ x86_imm_emit8 ((inst), 0); \
+ break; \
+ /* nopw 0x0(%rax,%rax) */ \
+ case 6: *(inst)++ = 0x66; *(inst)++ = 0x0f; \
+ *(inst)++ = 0x1f; \
+ x86_address_byte ((inst), 1, 0, 4); \
+ x86_address_byte ((inst), 0, AMD64_RAX, AMD64_RAX); \
+ x86_imm_emit8 ((inst), 0); \
+ break; \
+ /* nop 0x0(%rax) (32-bit displacement) */ \
+ case 7: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \
+ x86_address_byte ((inst), 2, 0, AMD64_RAX); \
+ x86_imm_emit32((inst), 0); \
+ break; \
+ /* nop 0x0(%rax,%rax) (32-bit displacement) */ \
+ case 8: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \
+ x86_address_byte ((inst), 2, 0, 4); \
+ x86_address_byte ((inst), 0, AMD64_RAX, AMD64_RAX); \
+ x86_imm_emit32 ((inst), 0); \
+ break; \
+ default: \
+ g_assert_not_reached(); \
+ } \
+ g_assert(code_start + (size) == (unsigned char *)(inst)); \
+ } while (0)
+
+
+/* Size is ignored for Native Client calls, we restrict jumping to 32-bits */
+#define amd64_call_membase_size(inst,basereg,disp,size) \
+ do { \
+ amd64_codegen_pre((inst)); \
+ amd64_call_sequence_pre(inst); \
+ amd64_mov_reg_membase((inst), AMD64_R11, (basereg), (disp), 4); \
+ amd64_call_reg_internal((inst), AMD64_R11); \
+ amd64_call_sequence_post(inst); \
+ amd64_codegen_post((inst)); \
+ } while (0)
+
+/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */
+#define amd64_jump_membase_size(inst,basereg,disp,size) \
+ do { \
+ amd64_mov_reg_membase((inst), AMD64_R11, (basereg), (disp), 4); \
+ amd64_jump_reg_size((inst), AMD64_R11, 4); \
+ } while (0)
+
+/* On Native Client we can't jump more than INT_MAX in either direction */
+#define amd64_jump_code_size(inst,target,size) \
+ do { \
+ /* x86_jump_code used twice in case of */ \
+ /* relocation by amd64_codegen_post */ \
+ guint8* jump_start; \
+ amd64_codegen_pre(inst); \
+ assert(amd64_is_imm32 ((gint64)(target) - (gint64)(inst))); \
+ x86_jump_code((inst),(target)); \
+ inst = amd64_codegen_post(inst); \
+ jump_start = (inst); \
+ x86_jump_code((inst),(target)); \
+ mono_amd64_patch(jump_start, (target)); \
+} while (0)
+
+#endif /*__native_client_codegen__*/
+
+/*
+ * SSE
+ */
+
+//TODO Reorganize SSE opcode defines.
+
+/* Two opcode SSE defines */
+
+#define emit_sse_reg_reg_op2_size(inst,dreg,reg,op1,op2,size) do { \
+ amd64_codegen_pre(inst); \
+ amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \
+ *(inst)++ = (unsigned char)(op1); \
+ *(inst)++ = (unsigned char)(op2); \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ amd64_codegen_post(inst); \
+} while (0)
+
+#define emit_sse_reg_reg_op2(inst,dreg,reg,op1,op2) emit_sse_reg_reg_op2_size ((inst), (dreg), (reg), (op1), (op2), 0)
+
+#define emit_sse_reg_reg_op2_imm(inst,dreg,reg,op1,op2,imm) do { \
+ amd64_codegen_pre(inst); \
+ emit_sse_reg_reg_op2 ((inst), (dreg), (reg), (op1), (op2)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ amd64_codegen_post(inst); \
+} while (0)
+
+#define emit_sse_membase_reg_op2(inst,basereg,disp,reg,op1,op2) do { \
+ amd64_codegen_pre(inst); \
+ amd64_emit_rex ((inst), 0, (reg), 0, (basereg)); \
+ *(inst)++ = (unsigned char)(op1); \
+ *(inst)++ = (unsigned char)(op2); \
+ amd64_membase_emit ((inst), (reg), (basereg), (disp)); \
+ amd64_codegen_post(inst); \
+} while (0)
+
+#define emit_sse_reg_membase_op2(inst,dreg,basereg,disp,op1,op2) do { \
+ amd64_codegen_pre(inst); \
+ amd64_emit_rex ((inst), 0, (dreg), 0, (basereg) == AMD64_RIP ? 0 : (basereg)); \
+ *(inst)++ = (unsigned char)(op1); \
+ *(inst)++ = (unsigned char)(op2); \
+ amd64_membase_emit ((inst), (dreg), (basereg), (disp)); \
+ amd64_codegen_post(inst); \
+} while (0)
+
+/* Three opcode SSE defines */
+
+#define emit_opcode3(inst,op1,op2,op3) do { \
+ *(inst)++ = (unsigned char)(op1); \
+ *(inst)++ = (unsigned char)(op2); \
+ *(inst)++ = (unsigned char)(op3); \
+} while (0)
+
+#define emit_sse_reg_reg_size(inst,dreg,reg,op1,op2,op3,size) do { \
+ amd64_codegen_pre(inst); \
+ *(inst)++ = (unsigned char)(op1); \
+ amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \
+ *(inst)++ = (unsigned char)(op2); \
+ *(inst)++ = (unsigned char)(op3); \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ amd64_codegen_post(inst); \
+} while (0)
+
+#define emit_sse_reg_reg(inst,dreg,reg,op1,op2,op3) emit_sse_reg_reg_size ((inst), (dreg), (reg), (op1), (op2), (op3), 0)
+
+#define emit_sse_reg_reg_imm(inst,dreg,reg,op1,op2,op3,imm) do { \
+ amd64_codegen_pre(inst); \
+ emit_sse_reg_reg ((inst), (dreg), (reg), (op1), (op2), (op3)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ amd64_codegen_post(inst); \
+} while (0)
+
+#define emit_sse_membase_reg(inst,basereg,disp,reg,op1,op2,op3) do { \
+ amd64_codegen_pre(inst); \
+ x86_prefix((inst), (unsigned char)(op1)); \
+ amd64_emit_rex ((inst), 0, (reg), 0, (basereg)); \
+ *(inst)++ = (unsigned char)(op2); \
+ *(inst)++ = (unsigned char)(op3); \
+ amd64_membase_emit ((inst), (reg), (basereg), (disp)); \
+ amd64_codegen_post(inst); \
+} while (0)
+
+#define emit_sse_reg_membase(inst,dreg,basereg,disp,op1,op2,op3) do { \
+ amd64_codegen_pre(inst); \
+ x86_prefix((inst), (unsigned char)(op1)); \
+ amd64_emit_rex ((inst), 0, (dreg), 0, (basereg) == AMD64_RIP ? 0 : (basereg)); \
+ *(inst)++ = (unsigned char)(op2); \
+ *(inst)++ = (unsigned char)(op3); \
+ amd64_membase_emit ((inst), (dreg), (basereg), (disp)); \
+ amd64_codegen_post(inst); \
+} while (0)
+
+/* Four opcode SSE defines */
+
+#define emit_sse_reg_reg_op4_size(inst,dreg,reg,op1,op2,op3,op4,size) do { \
+ amd64_codegen_pre(inst); \
+ x86_prefix((inst), (unsigned char)(op1)); \
+ amd64_emit_rex ((inst), size, (dreg), 0, (reg)); \
+ *(inst)++ = (unsigned char)(op2); \
+ *(inst)++ = (unsigned char)(op3); \
+ *(inst)++ = (unsigned char)(op4); \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ amd64_codegen_post(inst); \
+} while (0)
+
+#define emit_sse_reg_reg_op4(inst,dreg,reg,op1,op2,op3,op4) emit_sse_reg_reg_op4_size ((inst), (dreg), (reg), (op1), (op2), (op3), (op4), 0)
+
+/* specific SSE opcode defines */
+
+#define amd64_sse_xorpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg), 0x66, 0x0f, 0x57)
+
+#define amd64_sse_xorpd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst),(dreg),(basereg), (disp), 0x66, 0x0f, 0x57)
+
+#define amd64_sse_andpd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst),(dreg),(basereg), (disp), 0x66, 0x0f, 0x54)
+
+#define amd64_sse_movsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x10)
+
+#define amd64_sse_movsd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0xf2, 0x0f, 0x10)
+
+#define amd64_sse_movsd_membase_reg(inst,basereg,disp,reg) emit_sse_membase_reg ((inst), (basereg), (disp), (reg), 0xf2, 0x0f, 0x11)
+
+#define amd64_sse_movss_membase_reg(inst,basereg,disp,reg) emit_sse_membase_reg ((inst), (basereg), (disp), (reg), 0xf3, 0x0f, 0x11)
+
+#define amd64_sse_movss_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0xf3, 0x0f, 0x10)
+
+#define amd64_sse_comisd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg),0x66,0x0f,0x2f)
+
+#define amd64_sse_comisd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x2f)
+
+#define amd64_sse_ucomisd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg),0x66,0x0f,0x2e)
+
+#define amd64_sse_cvtsd2si_reg_reg(inst,dreg,reg) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2d, 8)
+
+#define amd64_sse_cvttsd2si_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2c, (size))
+
+#define amd64_sse_cvttsd2si_reg_reg(inst,dreg,reg) amd64_sse_cvttsd2si_reg_reg_size ((inst), (dreg), (reg), 8)
+
+#define amd64_sse_cvtsi2sd_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2a, (size))
+
+#define amd64_sse_cvtsi2sd_reg_reg(inst,dreg,reg) amd64_sse_cvtsi2sd_reg_reg_size ((inst), (dreg), (reg), 8)
+
+#define amd64_sse_cvtsd2ss_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5a)
+
+#define amd64_sse_cvtss2sd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf3, 0x0f, 0x5a)
+
+#define amd64_sse_addsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x58)
+
+#define amd64_sse_subsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5c)
+
+#define amd64_sse_mulsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x59)
+
+#define amd64_sse_divsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5e)
+
+#define amd64_sse_sqrtsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x51)
+
+
+#define amd64_sse_pinsrw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm ((inst), (dreg), (reg), 0x66, 0x0f, 0xc4, (imm))
+
+#define amd64_sse_pextrw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm ((inst), (dreg), (reg), 0x66, 0x0f, 0xc5, (imm))
+
+
+#define amd64_sse_cvttsd2si_reg_xreg_size(inst,reg,xreg,size) emit_sse_reg_reg_size ((inst), (reg), (xreg), 0xf2, 0x0f, 0x2c, (size))
+
+
+#define amd64_sse_addps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x58)
+
+#define amd64_sse_divps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5e)
+
+#define amd64_sse_mulps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x59)
+
+#define amd64_sse_subps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5c)
+
+#define amd64_sse_maxps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5f)
+
+#define amd64_sse_minps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5d)
+
+#define amd64_sse_cmpps_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_op2_imm((inst), (dreg), (reg), 0x0f, 0xc2, (imm))
+
+#define amd64_sse_andps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x54)
+
+#define amd64_sse_andnps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x55)
+
+#define amd64_sse_orps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x56)
+
+#define amd64_sse_xorps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x57)
+
+#define amd64_sse_sqrtps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x51)
+
+#define amd64_sse_rsqrtps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x52)
+
+#define amd64_sse_rcpps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x53)
+
+#define amd64_sse_addsubps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0xd0)
+
+#define amd64_sse_haddps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x7c)
+
+#define amd64_sse_hsubps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x7d)
+
+#define amd64_sse_movshdup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf3, 0x0f, 0x16)
+
+#define amd64_sse_movsldup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf3, 0x0f, 0x12)
+
+
+#define amd64_sse_pshufhw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0xf3, 0x0f, 0x70, (imm))
+
+#define amd64_sse_pshuflw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0xf2, 0x0f, 0x70, (imm))
+
+#define amd64_sse_pshufd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0x70, (imm))
+
+#define amd64_sse_shufps_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_op2_imm((inst), (dreg), (reg), 0x0f, 0xC6, (imm))
+
+#define amd64_sse_shufpd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0xC6, (imm))
+
+
+#define amd64_sse_addpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x58)
+
+#define amd64_sse_divpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5e)
+
+#define amd64_sse_mulpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x59)
+
+#define amd64_sse_subpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5c)
+
+#define amd64_sse_maxpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5f)
+
+#define amd64_sse_minpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5d)
+
+#define amd64_sse_cmppd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0xc2, (imm))
+
+#define amd64_sse_andpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x54)
+
+#define amd64_sse_andnpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x55)
+
+#define amd64_sse_orpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x56)
+
+#define amd64_sse_sqrtpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x51)
+
+#define amd64_sse_rsqrtpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x52)
+
+#define amd64_sse_rcppd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x53)
+
+#define amd64_sse_addsubpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd0)
+
+#define amd64_sse_haddpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x7c)
+
+#define amd64_sse_hsubpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x7d)
+
+#define amd64_sse_movddup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x12)
+
+
+#define amd64_sse_pmovmskb_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd7)
+
+
+#define amd64_sse_pand_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdb)
+
+#define amd64_sse_por_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xeb)
+
+#define amd64_sse_pxor_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xef)
+
+
+#define amd64_sse_paddb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfc)
+
+#define amd64_sse_paddw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfd)
+
+#define amd64_sse_paddd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfe)
+
+#define amd64_sse_paddq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd4)
+
+
+#define amd64_sse_psubb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf8)
+
+#define amd64_sse_psubw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf9)
+
+#define amd64_sse_psubd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfa)
+
+#define amd64_sse_psubq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfb)
+
+
+#define amd64_sse_pmaxub_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xde)
+
+#define amd64_sse_pmaxuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3e)
+
+#define amd64_sse_pmaxud_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3f)
+
+
+#define amd64_sse_pmaxsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3c)
+
+#define amd64_sse_pmaxsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xee)
+
+#define amd64_sse_pmaxsd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3d)
+
+
+#define amd64_sse_pavgb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe0)
+
+#define amd64_sse_pavgw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe3)
+
+
+#define amd64_sse_pminub_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xda)
+
+#define amd64_sse_pminuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3a)
+
+#define amd64_sse_pminud_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3b)
+
+
+#define amd64_sse_pminsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x38)
+
+#define amd64_sse_pminsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xea)
+
+#define amd64_sse_pminsd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x39)
+
+
+#define amd64_sse_pcmpeqb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x74)
+
+#define amd64_sse_pcmpeqw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x75)
+
+#define amd64_sse_pcmpeqd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x76)
+
+#define amd64_sse_pcmpeqq_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x29)
+
+
+#define amd64_sse_pcmpgtb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x64)
+
+#define amd64_sse_pcmpgtw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x65)
+
+#define amd64_sse_pcmpgtd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x66)
+
+#define amd64_sse_pcmpgtq_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x37)
+
+
+#define amd64_sse_psadbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf6)
+
+
+#define amd64_sse_punpcklbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x60)
+
+#define amd64_sse_punpcklwd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x61)
+
+#define amd64_sse_punpckldq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x62)
+
+#define amd64_sse_punpcklqdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6c)
+
+#define amd64_sse_unpcklpd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x14)
+
+#define amd64_sse_unpcklps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x14)
+
+
+#define amd64_sse_punpckhbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x68)
+
+#define amd64_sse_punpckhwd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x69)
+
+#define amd64_sse_punpckhdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6a)
+
+#define amd64_sse_punpckhqdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6d)
+
+#define amd64_sse_unpckhpd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x15)
+
+#define amd64_sse_unpckhps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x15)
+
+
+#define amd64_sse_packsswb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x63)
+
+#define amd64_sse_packssdw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6b)
+
+#define amd64_sse_packuswb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x67)
+
+#define amd64_sse_packusdw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x2b)
+
+
+#define amd64_sse_paddusb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdc)
+
+#define amd64_sse_psubusb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd8)
+
+#define amd64_sse_paddusw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdd)
+
+#define amd64_sse_psubusw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd8)
+
+
+#define amd64_sse_paddsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xec)
+
+#define amd64_sse_psubsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe8)
+
+#define amd64_sse_paddsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xed)
+
+#define amd64_sse_psubsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe9)
+
+
+#define amd64_sse_pmullw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd5)
+
+#define amd64_sse_pmulld_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x40)
+
+#define amd64_sse_pmuludq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf4)
+
+#define amd64_sse_pmulhuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe4)
+
+#define amd64_sse_pmulhw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe5)
+
+
+#define amd64_sse_psrlw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x71, (imm))
+
+#define amd64_sse_psrlw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd1)
+
+
+#define amd64_sse_psraw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x71, (imm))
+
+#define amd64_sse_psraw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe1)
+
+
+#define amd64_sse_psllw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x71, (imm))
+
+#define amd64_sse_psllw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf1)
+
+
+#define amd64_sse_psrld_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x72, (imm))
+
+#define amd64_sse_psrld_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd2)
+
+
+#define amd64_sse_psrad_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x72, (imm))
+
+#define amd64_sse_psrad_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe2)
+
+
+#define amd64_sse_pslld_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x72, (imm))
+
+#define amd64_sse_pslld_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf2)
+
+
+#define amd64_sse_psrlq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x73, (imm))
+
+#define amd64_sse_psrlq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd3)
+
+
+#define amd64_sse_psraq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x73, (imm))
+
+#define amd64_sse_psraq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe3)
+
+
+#define amd64_sse_psllq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x73, (imm))
+
+#define amd64_sse_psllq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf3)
+
+
+#define amd64_sse_cvtdq2pd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF3, 0x0F, 0xE6)
+
+#define amd64_sse_cvtdq2ps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0F, 0x5B)
+
+#define amd64_sse_cvtpd2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF2, 0x0F, 0xE6)
+
+#define amd64_sse_cvtpd2ps_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0x5A)
+
+#define amd64_sse_cvtps2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0x5B)
+
+#define amd64_sse_cvtps2pd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0F, 0x5A)
+
+#define amd64_sse_cvttpd2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0xE6)
+
+#define amd64_sse_cvttps2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF3, 0x0F, 0x5B)
+
+
+#define amd64_movd_xreg_reg_size(inst,dreg,sreg,size) emit_sse_reg_reg_size((inst), (dreg), (sreg), 0x66, 0x0f, 0x6e, (size))
+
+#define amd64_movd_reg_xreg_size(inst,dreg,sreg,size) emit_sse_reg_reg_size((inst), (sreg), (dreg), 0x66, 0x0f, 0x7e, (size))
+
+#define amd64_movd_xreg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x6e)
+
+
+#define amd64_movlhps_reg_reg(inst,dreg,sreg) emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x16)
+
+#define amd64_movhlps_reg_reg(inst,dreg,sreg) emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x12)
+
+
+#define amd64_sse_movups_membase_reg(inst, basereg, disp, reg) emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x11)
+
+#define amd64_sse_movups_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x10)
+
+#define amd64_sse_movaps_membase_reg(inst, basereg, disp, reg) emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x29)
+
+#define amd64_sse_movaps_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x28)
+
+#define amd64_sse_movaps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x28)
+
+#define amd64_sse_movntps_reg_membase(inst, dreg, basereg, disp) emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x2b)
+
+#define amd64_sse_prefetch_reg_membase(inst, arg, basereg, disp) emit_sse_reg_membase_op2((inst), (arg), (basereg), (disp), 0x0f, 0x18)
+
+/* Generated from x86-codegen.h */
+
+#define amd64_breakpoint_size(inst,size) do { x86_breakpoint(inst); } while (0)
+#define amd64_cld_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_cld(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_stosb_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_stosb(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_stosl_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_stosl(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_stosd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_stosd(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_movsb_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_movsb(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_movsl_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_movsl(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_movsd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_movsd(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_prefix_size(inst,p,size) do { x86_prefix((inst), p); } while (0)
+#define amd64_rdtsc_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_rdtsc(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_cmpxchg_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmpxchg_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0)
+#define amd64_cmpxchg_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_cmpxchg_mem_reg((inst),(mem),((reg)&0x7)); amd64_codegen_post(inst); } while (0)
+#define amd64_cmpxchg_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_cmpxchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7)); amd64_codegen_post(inst); } while (0)
+#define amd64_xchg_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_xchg_reg_reg((inst),((dreg)&0x7),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0)
+#define amd64_xchg_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_xchg_mem_reg((inst),(mem),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0)
+#define amd64_xchg_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_xchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0)
+#define amd64_inc_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_inc_mem((inst),(mem)); amd64_codegen_post(inst); } while (0)
+#define amd64_inc_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_inc_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0)
+//#define amd64_inc_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_inc_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0)
+#define amd64_dec_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_dec_mem((inst),(mem)); amd64_codegen_post(inst); } while (0)
+#define amd64_dec_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_dec_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0)
+//#define amd64_dec_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_dec_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0)
+#define amd64_not_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_not_mem((inst),(mem)); amd64_codegen_post(inst); } while (0)
+#define amd64_not_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_not_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0)
+#define amd64_not_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_not_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0)
+#define amd64_neg_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_neg_mem((inst),(mem)); amd64_codegen_post(inst); } while (0)
+#define amd64_neg_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_neg_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0)
+#define amd64_neg_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_neg_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0)
+#define amd64_nop_size(inst,size) do { amd64_codegen_pre(inst); x86_nop(inst); amd64_codegen_post(inst); } while (0)
+//#define amd64_alu_reg_imm_size(inst,opc,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_imm((inst),(opc),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0)
+#define amd64_alu_mem_imm_size(inst,opc,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_alu_mem_imm((inst),(opc),(mem),(imm)); amd64_codegen_post(inst); } while (0)
+#define amd64_alu_membase_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_alu_membase_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0)
+#define amd64_alu_membase8_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_alu_membase8_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0)
+#define amd64_alu_mem_reg_size(inst,opc,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_mem_reg((inst),(opc),(mem),((reg)&0x7)); amd64_codegen_post(inst); } while (0)
+#define amd64_alu_membase_reg_size(inst,opc,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_membase_reg((inst),(opc),((basereg)&0x7),(disp),((reg)&0x7)); amd64_codegen_post(inst); } while (0)
+//#define amd64_alu_reg_reg_size(inst,opc,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg_reg((inst),(opc),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0)
+#define amd64_alu_reg8_reg8_size(inst,opc,dreg,reg,is_dreg_h,is_reg_h,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg8_reg8((inst),(opc),((dreg)&0x7),((reg)&0x7),(is_dreg_h),(is_reg_h)); amd64_codegen_post(inst); } while (0)
+#define amd64_alu_reg_mem_size(inst,opc,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_mem((inst),(opc),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0)
+//#define amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_reg_membase((inst),(opc),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0)
+#define amd64_test_reg_imm_size(inst,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_test_reg_imm((inst),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0)
+#define amd64_test_mem_imm_size(inst,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_test_mem_imm((inst),(mem),(imm)); amd64_codegen_post(inst); } while (0)
+#define amd64_test_membase_imm_size(inst,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_test_membase_imm((inst),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0)
+#define amd64_test_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_test_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0)
+#define amd64_test_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_test_mem_reg((inst),(mem),((reg)&0x7)); amd64_codegen_post(inst); } while (0)
+#define amd64_test_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_test_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7)); amd64_codegen_post(inst); } while (0)
+#define amd64_shift_reg_imm_size(inst,opc,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_reg_imm((inst),(opc),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0)
+#define amd64_shift_mem_imm_size(inst,opc,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_mem_imm((inst),(opc),(mem),(imm)); amd64_codegen_post(inst); } while (0)
+#define amd64_shift_membase_imm_size(inst,opc,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_shift_membase_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0)
+#define amd64_shift_reg_size(inst,opc,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_reg((inst),(opc),((reg)&0x7)); amd64_codegen_post(inst); } while (0)
+#define amd64_shift_mem_size(inst,opc,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_mem((inst),(opc),(mem)); amd64_codegen_post(inst); } while (0)
+#define amd64_shift_membase_size(inst,opc,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_shift_membase((inst),(opc),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0)
+#define amd64_shrd_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shrd_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0)
+#define amd64_shrd_reg_imm_size(inst,dreg,reg,shamt,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shrd_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(shamt)); amd64_codegen_post(inst); } while (0)
+#define amd64_shld_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shld_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0)
+#define amd64_shld_reg_imm_size(inst,dreg,reg,shamt,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shld_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(shamt)); amd64_codegen_post(inst); } while (0)
+#define amd64_mul_reg_size(inst,reg,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mul_reg((inst),((reg)&0x7),(is_signed)); amd64_codegen_post(inst); } while (0)
+#define amd64_mul_mem_size(inst,mem,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_mul_mem((inst),(mem),(is_signed)); amd64_codegen_post(inst); } while (0)
+#define amd64_mul_membase_size(inst,basereg,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_mul_membase((inst),((basereg)&0x7),(disp),(is_signed)); amd64_codegen_post(inst); } while (0)
+#define amd64_imul_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_imul_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0)
+#define amd64_imul_reg_mem_size(inst,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_imul_reg_mem((inst),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0)
+#define amd64_imul_reg_membase_size(inst,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_imul_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0)
+#define amd64_imul_reg_reg_imm_size(inst,dreg,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_imul_reg_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0)
+#define amd64_imul_reg_mem_imm_size(inst,reg,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_imul_reg_mem_imm((inst),((reg)&0x7),(mem),(imm)); amd64_codegen_post(inst); } while (0)
+#define amd64_imul_reg_membase_imm_size(inst,reg,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_imul_reg_membase_imm((inst),((reg)&0x7),((basereg)&0x7),(disp),(imm)); amd64_codegen_post(inst); } while (0)
+#define amd64_div_reg_size(inst,reg,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_div_reg((inst),((reg)&0x7),(is_signed)); amd64_codegen_post(inst); } while (0)
+#define amd64_div_mem_size(inst,mem,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_div_mem((inst),(mem),(is_signed)); amd64_codegen_post(inst); } while (0)
+#define amd64_div_membase_size(inst,basereg,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_div_membase((inst),((basereg)&0x7),(disp),(is_signed)); amd64_codegen_post(inst); } while (0)
+#define amd64_mov_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_mem_reg((inst),(mem),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0)
+//#define amd64_mov_regp_reg_size(inst,regp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(regp),0,(reg)); x86_mov_regp_reg((inst),(regp),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0)
+//#define amd64_mov_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_mov_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0)
+#define amd64_mov_memindex_reg_size(inst,basereg,disp,indexreg,shift,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_mov_memindex_reg((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0)
+#define amd64_mov_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_mov_reg_reg((inst),((dreg)&0x7),((reg)&0x7),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0)
+//#define amd64_mov_reg_mem_size(inst,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_reg_mem((inst),((reg)&0x7),(mem),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0)
+//#define amd64_mov_reg_membase_size(inst,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_mov_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0)
+//#define amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_mov_reg_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0)
+#define amd64_clear_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_clear_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0)
+//#define amd64_mov_reg_imm_size(inst,reg,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_reg_imm((inst),((reg)&0x7),(imm)); amd64_codegen_post(inst); } while (0)
+#define amd64_mov_mem_imm_size(inst,mem,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_mov_mem_imm((inst),(mem),(imm),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0)
+//#define amd64_mov_membase_imm_size(inst,basereg,disp,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_mov_membase_imm((inst),((basereg)&0x7),(disp),(imm),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0)
+#define amd64_mov_memindex_imm_size(inst,basereg,disp,indexreg,shift,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,(indexreg),(basereg)); x86_mov_memindex_imm((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(imm),(size) == 8 ? 4 : (size)); amd64_codegen_post(inst); } while (0)
+#define amd64_lea_mem_size(inst,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_lea_mem((inst),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0)
+//#define amd64_lea_membase_size(inst,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_lea_membase((inst),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0)
+#define amd64_lea_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_lea_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift)); amd64_codegen_post(inst); } while (0)
+#define amd64_widen_reg_size(inst,dreg,reg,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_widen_reg((inst),((dreg)&0x7),((reg)&0x7),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0)
+#define amd64_widen_mem_size(inst,dreg,mem,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,0); x86_widen_mem((inst),((dreg)&0x7),(mem),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0)
+#define amd64_widen_membase_size(inst,dreg,basereg,disp,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(basereg)); x86_widen_membase((inst),((dreg)&0x7),((basereg)&0x7),(disp),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0)
+#define amd64_widen_memindex_size(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),(indexreg),(basereg)); x86_widen_memindex((inst),((dreg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(is_signed),(is_half)); amd64_codegen_post(inst); } while (0)
+#define amd64_cdq_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_cdq(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_wait_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_wait(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_fp_op_mem_size(inst,opc,mem,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fp_op_mem((inst),(opc),(mem),(is_double)); amd64_codegen_post(inst); } while (0)
+#define amd64_fp_op_membase_size(inst,opc,basereg,disp,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fp_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_double)); amd64_codegen_post(inst); } while (0)
+#define amd64_fp_op_size(inst,opc,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fp_op((inst),(opc),(index)); amd64_codegen_post(inst); } while (0)
+#define amd64_fp_op_reg_size(inst,opc,index,pop_stack,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fp_op_reg((inst),(opc),(index),(pop_stack)); amd64_codegen_post(inst); } while (0)
+#define amd64_fp_int_op_membase_size(inst,opc,basereg,disp,is_int,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fp_int_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_int)); amd64_codegen_post(inst); } while (0)
+#define amd64_fstp_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fstp((inst),(index)); amd64_codegen_post(inst); } while (0)
+#define amd64_fcompp_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fcompp(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_fucompp_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fucompp(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_fnstsw_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fnstsw(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_fnstcw_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fnstcw((inst),(mem)); amd64_codegen_post(inst); } while (0)
+#define amd64_fnstcw_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fnstcw_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0)
+#define amd64_fldcw_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fldcw((inst),(mem)); amd64_codegen_post(inst); } while (0)
+#define amd64_fldcw_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fldcw_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0)
+#define amd64_fchs_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fchs(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_frem_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_frem(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_fxch_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fxch((inst),(index)); amd64_codegen_post(inst); } while (0)
+#define amd64_fcomi_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fcomi((inst),(index)); amd64_codegen_post(inst); } while (0)
+#define amd64_fcomip_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fcomip((inst),(index)); amd64_codegen_post(inst); } while (0)
+#define amd64_fucomi_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fucomi((inst),(index)); amd64_codegen_post(inst); } while (0)
+#define amd64_fucomip_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fucomip((inst),(index)); amd64_codegen_post(inst); } while (0)
+#define amd64_fld_size(inst,mem,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld((inst),(mem),(is_double)); amd64_codegen_post(inst); } while (0)
+//#define amd64_fld_membase_size(inst,basereg,disp,is_double,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fld_membase((inst),((basereg)&0x7),(disp),(is_double)); amd64_codegen_post(inst); } while (0)
+#define amd64_fld80_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld80_mem((inst),(mem)); amd64_codegen_post(inst); } while (0)
+#define amd64_fld80_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_fld80_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0)
+#define amd64_fild_size(inst,mem,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fild((inst),(mem),(is_long)); amd64_codegen_post(inst); } while (0)
+#define amd64_fild_membase_size(inst,basereg,disp,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fild_membase((inst),((basereg)&0x7),(disp),(is_long)); amd64_codegen_post(inst); } while (0)
+#define amd64_fld_reg_size(inst,index,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld_reg((inst),(index)); amd64_codegen_post(inst); } while (0)
+#define amd64_fldz_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fldz(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_fld1_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fld1(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_fldpi_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fldpi(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_fst_size(inst,mem,is_double,pop_stack,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fst((inst),(mem),(is_double),(pop_stack)); amd64_codegen_post(inst); } while (0)
+#define amd64_fst_membase_size(inst,basereg,disp,is_double,pop_stack,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fst_membase((inst),((basereg)&0x7),(disp),(is_double),(pop_stack)); amd64_codegen_post(inst); } while (0)
+#define amd64_fst80_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fst80_mem((inst),(mem)); amd64_codegen_post(inst); } while (0)
+#define amd64_fst80_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fst80_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0)
+#define amd64_fist_pop_size(inst,mem,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_fist_pop((inst),(mem),(is_long)); amd64_codegen_post(inst); } while (0)
+#define amd64_fist_pop_membase_size(inst,basereg,disp,is_long,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fist_pop_membase((inst),((basereg)&0x7),(disp),(is_long)); amd64_codegen_post(inst); } while (0)
+#define amd64_fstsw_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_fstsw(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_fist_membase_size(inst,basereg,disp,is_int,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_fist_membase((inst),((basereg)&0x7),(disp),(is_int)); amd64_codegen_post(inst); } while (0)
+//#define amd64_push_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_push_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0)
+#define amd64_push_regp_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_push_regp((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0)
+#define amd64_push_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_push_mem((inst),(mem)); amd64_codegen_post(inst); } while (0)
+//#define amd64_push_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_push_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0)
+#define amd64_push_memindex_size(inst,basereg,disp,indexreg,shift,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,(indexreg),(basereg)); x86_push_memindex((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift)); amd64_codegen_post(inst); } while (0)
+#define amd64_push_imm_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_push_imm((inst),(imm)); amd64_codegen_post(inst); } while (0)
+//#define amd64_pop_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_pop_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0)
+#define amd64_pop_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_pop_mem((inst),(mem)); amd64_codegen_post(inst); } while (0)
+#define amd64_pop_membase_size(inst,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_pop_membase((inst),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0)
+#define amd64_pushad_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_pushad(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_pushfd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_pushfd(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_popad_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_popad(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_popfd_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_popfd(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_loop_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_loop((inst),(imm)); amd64_codegen_post(inst); } while (0)
+#define amd64_loope_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_loope((inst),(imm)); amd64_codegen_post(inst); } while (0)
+#define amd64_loopne_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_loopne((inst),(imm)); amd64_codegen_post(inst); } while (0)
+#define amd64_jump32_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_jump32((inst),(imm)); amd64_codegen_post(inst); } while (0)
+#define amd64_jump8_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_jump8((inst),(imm)); amd64_codegen_post(inst); } while (0)
+#if !defined( __native_client_codegen__ )
+/* Defined above for Native Client, so they can be used in other macros */
+#define amd64_jump_reg_size(inst,reg,size) do { amd64_emit_rex ((inst),0,0,0,(reg)); x86_jump_reg((inst),((reg)&0x7)); } while (0)
+#define amd64_jump_mem_size(inst,mem,size) do { amd64_emit_rex ((inst),(size),0,0,0); x86_jump_mem((inst),(mem)); } while (0)
+#endif
+#define amd64_jump_disp_size(inst,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,0); x86_jump_disp((inst),(disp)); amd64_codegen_post(inst); } while (0)
+#define amd64_branch8_size(inst,cond,imm,is_signed,size) do { x86_branch8((inst),(cond),(imm),(is_signed)); } while (0)
+#define amd64_branch32_size(inst,cond,imm,is_signed,size) do { x86_branch32((inst),(cond),(imm),(is_signed)); } while (0)
+#define amd64_branch_size_body(inst,cond,target,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_branch((inst),(cond),(target),(is_signed)); amd64_codegen_post(inst); } while (0)
+#if defined(__default_codegen__)
+#define amd64_branch_size(inst,cond,target,is_signed,size) do { amd64_branch_size_body((inst),(cond),(target),(is_signed),(size)); } while (0)
+#elif defined(__native_client_codegen__)
+#define amd64_branch_size(inst,cond,target,is_signed,size) \
+ do { \
+ /* amd64_branch_size_body used twice in */ \
+ /* case of relocation by amd64_codegen_post */ \
+ guint8* branch_start; \
+ amd64_codegen_pre(inst); \
+ amd64_branch_size_body((inst),(cond),(target),(is_signed),(size)); \
+ inst = amd64_codegen_post(inst); \
+ branch_start = inst; \
+ amd64_branch_size_body((inst),(cond),(target),(is_signed),(size)); \
+ mono_amd64_patch(branch_start, (target)); \
+ } while (0)
+#endif
+
+#define amd64_branch_disp_size(inst,cond,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_branch_disp((inst),(cond),(disp),(is_signed)); amd64_codegen_post(inst); } while (0)
+#define amd64_set_reg_size(inst,cond,reg,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex((inst),1,0,0,(reg)); x86_set_reg((inst),(cond),((reg)&0x7),(is_signed)); amd64_codegen_post(inst); } while (0)
+#define amd64_set_mem_size(inst,cond,mem,is_signed,size) do { amd64_codegen_pre(inst); x86_set_mem((inst),(cond),(mem),(is_signed)); amd64_codegen_post(inst); } while (0)
+#define amd64_set_membase_size(inst,cond,basereg,disp,is_signed,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),0,0,0,(basereg)); x86_set_membase((inst),(cond),((basereg)&0x7),(disp),(is_signed)); amd64_codegen_post(inst); } while (0)
+//#define amd64_call_reg_size(inst,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_call_reg((inst),((reg)&0x7)); amd64_codegen_post(inst); } while (0)
+#define amd64_call_mem_size(inst,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_call_mem((inst),(mem)); amd64_codegen_post(inst); } while (0)
+
+#if defined(__default_codegen__)
+
+#define amd64_call_imm_size(inst,disp,size) do { x86_call_imm((inst),(disp)); } while (0)
+#define amd64_call_code_size(inst,target,size) do { x86_call_code((inst),(target)); } while (0)
+
+#elif defined(__native_client_codegen__)
+/* Size is ignored for Native Client calls, we restrict jumping to 32-bits */
+#define amd64_call_imm_size(inst,disp,size) \
+ do { \
+ amd64_codegen_pre((inst)); \
+ amd64_call_sequence_pre((inst)); \
+ x86_call_imm((inst),(disp)); \
+ amd64_call_sequence_post((inst)); \
+ amd64_codegen_post((inst)); \
+ } while (0)
+
+/* x86_call_code is called twice below, first so we can get the size of the */
+/* call sequence, and again so the exact offset from "inst" is used, since */
+/* the sequence could have moved from amd64_call_sequence_post. */
+/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */
+#define amd64_call_code_size(inst,target,size) \
+ do { \
+ amd64_codegen_pre((inst)); \
+ guint8* adjusted_start; \
+ guint8* call_start; \
+ amd64_call_sequence_pre((inst)); \
+ x86_call_code((inst),(target)); \
+ adjusted_start = amd64_call_sequence_post((inst)); \
+ call_start = adjusted_start; \
+ x86_call_code(adjusted_start, (target)); \
+ amd64_codegen_post((inst)); \
+ mono_amd64_patch(call_start, (target)); \
+ } while (0)
+
+#endif /*__native_client_codegen__*/
+
+//#define amd64_ret_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_ret(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_ret_imm_size(inst,imm,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_ret_imm((inst),(imm)); amd64_codegen_post(inst); } while (0)
+#define amd64_cmov_reg_size(inst,cond,is_signed,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmov_reg((inst),(cond),(is_signed),((dreg)&0x7),((reg)&0x7)); amd64_codegen_post(inst); } while (0)
+#define amd64_cmov_mem_size(inst,cond,is_signed,reg,mem,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_cmov_mem((inst),(cond),(is_signed),((reg)&0x7),(mem)); amd64_codegen_post(inst); } while (0)
+#define amd64_cmov_membase_size(inst,cond,is_signed,reg,basereg,disp,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(basereg)); x86_cmov_membase((inst),(cond),(is_signed),((reg)&0x7),((basereg)&0x7),(disp)); amd64_codegen_post(inst); } while (0)
+#define amd64_enter_size(inst,framesize) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_enter((inst),(framesize)); amd64_codegen_post(inst); } while (0)
+//#define amd64_leave_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_leave(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_sahf_size(inst,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_sahf(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_fsin_size(inst,size) do { amd64_codegen_pre(inst); x86_fsin(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_fcos_size(inst,size) do { amd64_codegen_pre(inst); x86_fcos(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_fabs_size(inst,size) do { amd64_codegen_pre(inst); x86_fabs(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_ftst_size(inst,size) do { amd64_codegen_pre(inst); x86_ftst(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_fxam_size(inst,size) do { amd64_codegen_pre(inst); x86_fxam(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_fpatan_size(inst,size) do { amd64_codegen_pre(inst); x86_fpatan(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_fprem_size(inst,size) do { amd64_codegen_pre(inst); x86_fprem(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_fprem1_size(inst,size) do { amd64_codegen_pre(inst); x86_fprem1(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_frndint_size(inst,size) do { amd64_codegen_pre(inst); x86_frndint(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_fsqrt_size(inst,size) do { amd64_codegen_pre(inst); x86_fsqrt(inst); amd64_codegen_post(inst); } while (0)
+#define amd64_fptan_size(inst,size) do { amd64_codegen_pre(inst); x86_fptan(inst); amd64_codegen_post(inst); } while (0)
+//#define amd64_padding_size(inst,size) do { amd64_codegen_pre(inst); x86_padding((inst),(size)); amd64_codegen_post(inst); } while (0)
+#define amd64_prolog_size(inst,frame_size,reg_mask,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_prolog((inst),(frame_size),(reg_mask)); amd64_codegen_post(inst); } while (0)
+#define amd64_epilog_size(inst,reg_mask,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,0); x86_epilog((inst),(reg_mask)); amd64_codegen_post(inst); } while (0)
+#define amd64_xadd_reg_reg_size(inst,dreg,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_xadd_reg_reg ((inst), (dreg), (reg), (size)); amd64_codegen_post(inst); } while (0)
+#define amd64_xadd_mem_reg_size(inst,mem,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),0,0,(reg)); x86_xadd_mem_reg((inst),(mem),((reg)&0x7), (size)); amd64_codegen_post(inst); } while (0)
+#define amd64_xadd_membase_reg_size(inst,basereg,disp,reg,size) do { amd64_codegen_pre(inst); amd64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_xadd_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size)); amd64_codegen_post(inst); } while (0)
+
+
+
+
+#define amd64_breakpoint(inst) amd64_breakpoint_size(inst,8)
+#define amd64_cld(inst) amd64_cld_size(inst,8)
+#define amd64_stosb(inst) amd64_stosb_size(inst,8)
+#define amd64_stosl(inst) amd64_stosl_size(inst,8)
+#define amd64_stosd(inst) amd64_stosd_size(inst,8)
+#define amd64_movsb(inst) amd64_movsb_size(inst,8)
+#define amd64_movsl(inst) amd64_movsl_size(inst,8)
+#define amd64_movsd(inst) amd64_movsd_size(inst,8)
+#define amd64_prefix(inst,p) amd64_prefix_size(inst,p,8)
+#define amd64_rdtsc(inst) amd64_rdtsc_size(inst,8)
+#define amd64_cmpxchg_reg_reg(inst,dreg,reg) amd64_cmpxchg_reg_reg_size(inst,dreg,reg,8)
+#define amd64_cmpxchg_mem_reg(inst,mem,reg) amd64_cmpxchg_mem_reg_size(inst,mem,reg,8)
+#define amd64_cmpxchg_membase_reg(inst,basereg,disp,reg) amd64_cmpxchg_membase_reg_size(inst,basereg,disp,reg,8)
+#define amd64_xchg_reg_reg(inst,dreg,reg,size) amd64_xchg_reg_reg_size(inst,dreg,reg,size)
+#define amd64_xchg_mem_reg(inst,mem,reg,size) amd64_xchg_mem_reg_size(inst,mem,reg,size)
+#define amd64_xchg_membase_reg(inst,basereg,disp,reg,size) amd64_xchg_membase_reg_size(inst,basereg,disp,reg,size)
+#define amd64_xadd_reg_reg(inst,dreg,reg,size) amd64_xadd_reg_reg_size(inst,dreg,reg,size)
+#define amd64_xadd_mem_reg(inst,mem,reg,size) amd64_xadd_mem_reg_size(inst,mem,reg,size)
+#define amd64_xadd_membase_reg(inst,basereg,disp,reg,size) amd64_xadd_membase_reg_size(inst,basereg,disp,reg,size)
+#define amd64_inc_mem(inst,mem) amd64_inc_mem_size(inst,mem,8)
+#define amd64_inc_membase(inst,basereg,disp) amd64_inc_membase_size(inst,basereg,disp,8)
+#define amd64_inc_reg(inst,reg) amd64_inc_reg_size(inst,reg,8)
+#define amd64_dec_mem(inst,mem) amd64_dec_mem_size(inst,mem,8)
+#define amd64_dec_membase(inst,basereg,disp) amd64_dec_membase_size(inst,basereg,disp,8)
+#define amd64_dec_reg(inst,reg) amd64_dec_reg_size(inst,reg,8)
+#define amd64_not_mem(inst,mem) amd64_not_mem_size(inst,mem,8)
+#define amd64_not_membase(inst,basereg,disp) amd64_not_membase_size(inst,basereg,disp,8)
+#define amd64_not_reg(inst,reg) amd64_not_reg_size(inst,reg,8)
+#define amd64_neg_mem(inst,mem) amd64_neg_mem_size(inst,mem,8)
+#define amd64_neg_membase(inst,basereg,disp) amd64_neg_membase_size(inst,basereg,disp,8)
+#define amd64_neg_reg(inst,reg) amd64_neg_reg_size(inst,reg,8)
+#define amd64_nop(inst) amd64_nop_size(inst,8)
+//#define amd64_alu_reg_imm(inst,opc,reg,imm) amd64_alu_reg_imm_size(inst,opc,reg,imm,8)
+#define amd64_alu_mem_imm(inst,opc,mem,imm) amd64_alu_mem_imm_size(inst,opc,mem,imm,8)
+#define amd64_alu_membase_imm(inst,opc,basereg,disp,imm) amd64_alu_membase_imm_size(inst,opc,basereg,disp,imm,8)
+#define amd64_alu_mem_reg(inst,opc,mem,reg) amd64_alu_mem_reg_size(inst,opc,mem,reg,8)
+#define amd64_alu_membase_reg(inst,opc,basereg,disp,reg) amd64_alu_membase_reg_size(inst,opc,basereg,disp,reg,8)
+//#define amd64_alu_reg_reg(inst,opc,dreg,reg) amd64_alu_reg_reg_size(inst,opc,dreg,reg,8)
+#define amd64_alu_reg8_reg8(inst,opc,dreg,reg,is_dreg_h,is_reg_h) amd64_alu_reg8_reg8_size(inst,opc,dreg,reg,is_dreg_h,is_reg_h,8)
+#define amd64_alu_reg_mem(inst,opc,reg,mem) amd64_alu_reg_mem_size(inst,opc,reg,mem,8)
+#define amd64_alu_reg_membase(inst,opc,reg,basereg,disp) amd64_alu_reg_membase_size(inst,opc,reg,basereg,disp,8)
+#define amd64_test_reg_imm(inst,reg,imm) amd64_test_reg_imm_size(inst,reg,imm,8)
+#define amd64_test_mem_imm(inst,mem,imm) amd64_test_mem_imm_size(inst,mem,imm,8)
+#define amd64_test_membase_imm(inst,basereg,disp,imm) amd64_test_membase_imm_size(inst,basereg,disp,imm,8)
+#define amd64_test_reg_reg(inst,dreg,reg) amd64_test_reg_reg_size(inst,dreg,reg,8)
+#define amd64_test_mem_reg(inst,mem,reg) amd64_test_mem_reg_size(inst,mem,reg,8)
+#define amd64_test_membase_reg(inst,basereg,disp,reg) amd64_test_membase_reg_size(inst,basereg,disp,reg,8)
+#define amd64_shift_reg_imm(inst,opc,reg,imm) amd64_shift_reg_imm_size(inst,opc,reg,imm,8)
+#define amd64_shift_mem_imm(inst,opc,mem,imm) amd64_shift_mem_imm_size(inst,opc,mem,imm,8)
+#define amd64_shift_membase_imm(inst,opc,basereg,disp,imm) amd64_shift_membase_imm_size(inst,opc,basereg,disp,imm,8)
+#define amd64_shift_reg(inst,opc,reg) amd64_shift_reg_size(inst,opc,reg,8)
+#define amd64_shift_mem(inst,opc,mem) amd64_shift_mem_size(inst,opc,mem,8)
+#define amd64_shift_membase(inst,opc,basereg,disp) amd64_shift_membase_size(inst,opc,basereg,disp,8)
+#define amd64_shrd_reg(inst,dreg,reg) amd64_shrd_reg_size(inst,dreg,reg,8)
+#define amd64_shrd_reg_imm(inst,dreg,reg,shamt) amd64_shrd_reg_imm_size(inst,dreg,reg,shamt,8)
+#define amd64_shld_reg(inst,dreg,reg) amd64_shld_reg_size(inst,dreg,reg,8)
+#define amd64_shld_reg_imm(inst,dreg,reg,shamt) amd64_shld_reg_imm_size(inst,dreg,reg,shamt,8)
+#define amd64_mul_reg(inst,reg,is_signed) amd64_mul_reg_size(inst,reg,is_signed,8)
+#define amd64_mul_mem(inst,mem,is_signed) amd64_mul_mem_size(inst,mem,is_signed,8)
+#define amd64_mul_membase(inst,basereg,disp,is_signed) amd64_mul_membase_size(inst,basereg,disp,is_signed,8)
+#define amd64_imul_reg_reg(inst,dreg,reg) amd64_imul_reg_reg_size(inst,dreg,reg,8)
+#define amd64_imul_reg_mem(inst,reg,mem) amd64_imul_reg_mem_size(inst,reg,mem,8)
+#define amd64_imul_reg_membase(inst,reg,basereg,disp) amd64_imul_reg_membase_size(inst,reg,basereg,disp,8)
+#define amd64_imul_reg_reg_imm(inst,dreg,reg,imm) amd64_imul_reg_reg_imm_size(inst,dreg,reg,imm,8)
+#define amd64_imul_reg_mem_imm(inst,reg,mem,imm) amd64_imul_reg_mem_imm_size(inst,reg,mem,imm,8)
+#define amd64_imul_reg_membase_imm(inst,reg,basereg,disp,imm) amd64_imul_reg_membase_imm_size(inst,reg,basereg,disp,imm,8)
+#define amd64_div_reg(inst,reg,is_signed) amd64_div_reg_size(inst,reg,is_signed,8)
+#define amd64_div_mem(inst,mem,is_signed) amd64_div_mem_size(inst,mem,is_signed,8)
+#define amd64_div_membase(inst,basereg,disp,is_signed) amd64_div_membase_size(inst,basereg,disp,is_signed,8)
+//#define amd64_mov_mem_reg(inst,mem,reg,size) amd64_mov_mem_reg_size(inst,mem,reg,size)
+//#define amd64_mov_regp_reg(inst,regp,reg,size) amd64_mov_regp_reg_size(inst,regp,reg,size)
+//#define amd64_mov_membase_reg(inst,basereg,disp,reg,size) amd64_mov_membase_reg_size(inst,basereg,disp,reg,size)
+#define amd64_mov_memindex_reg(inst,basereg,disp,indexreg,shift,reg,size) amd64_mov_memindex_reg_size(inst,basereg,disp,indexreg,shift,reg,size)
+//#define amd64_mov_reg_reg(inst,dreg,reg,size) amd64_mov_reg_reg_size(inst,dreg,reg,size)
+//#define amd64_mov_reg_mem(inst,reg,mem,size) amd64_mov_reg_mem_size(inst,reg,mem,size)
+//#define amd64_mov_reg_membase(inst,reg,basereg,disp,size) amd64_mov_reg_membase_size(inst,reg,basereg,disp,size)
+#define amd64_mov_reg_memindex(inst,reg,basereg,disp,indexreg,shift,size) amd64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size)
+#define amd64_clear_reg(inst,reg) amd64_clear_reg_size(inst,reg,8)
+//#define amd64_mov_reg_imm(inst,reg,imm) amd64_mov_reg_imm_size(inst,reg,imm,8)
+#define amd64_mov_mem_imm(inst,mem,imm,size) amd64_mov_mem_imm_size(inst,mem,imm,size)
+//#define amd64_mov_membase_imm(inst,basereg,disp,imm,size) amd64_mov_membase_imm_size(inst,basereg,disp,imm,size)
+#define amd64_mov_memindex_imm(inst,basereg,disp,indexreg,shift,imm,size) amd64_mov_memindex_imm_size(inst,basereg,disp,indexreg,shift,imm,size)
+#define amd64_lea_mem(inst,reg,mem) amd64_lea_mem_size(inst,reg,mem,8)
+//#define amd64_lea_membase(inst,reg,basereg,disp) amd64_lea_membase_size(inst,reg,basereg,disp,8)
+#define amd64_lea_memindex(inst,reg,basereg,disp,indexreg,shift) amd64_lea_memindex_size(inst,reg,basereg,disp,indexreg,shift,8)
+#define amd64_widen_reg(inst,dreg,reg,is_signed,is_half) amd64_widen_reg_size(inst,dreg,reg,is_signed,is_half,8)
+#define amd64_widen_mem(inst,dreg,mem,is_signed,is_half) amd64_widen_mem_size(inst,dreg,mem,is_signed,is_half,8)
+#define amd64_widen_membase(inst,dreg,basereg,disp,is_signed,is_half) amd64_widen_membase_size(inst,dreg,basereg,disp,is_signed,is_half,8)
+#define amd64_widen_memindex(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half) amd64_widen_memindex_size(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half,8)
+#define amd64_cdq(inst) amd64_cdq_size(inst,8)
+#define amd64_wait(inst) amd64_wait_size(inst,8)
+#define amd64_fp_op_mem(inst,opc,mem,is_double) amd64_fp_op_mem_size(inst,opc,mem,is_double,8)
+#define amd64_fp_op_membase(inst,opc,basereg,disp,is_double) amd64_fp_op_membase_size(inst,opc,basereg,disp,is_double,8)
+#define amd64_fp_op(inst,opc,index) amd64_fp_op_size(inst,opc,index,8)
+#define amd64_fp_op_reg(inst,opc,index,pop_stack) amd64_fp_op_reg_size(inst,opc,index,pop_stack,8)
+#define amd64_fp_int_op_membase(inst,opc,basereg,disp,is_int) amd64_fp_int_op_membase_size(inst,opc,basereg,disp,is_int,8)
+#define amd64_fstp(inst,index) amd64_fstp_size(inst,index,8)
+#define amd64_fcompp(inst) amd64_fcompp_size(inst,8)
+#define amd64_fucompp(inst) amd64_fucompp_size(inst,8)
+#define amd64_fnstsw(inst) amd64_fnstsw_size(inst,8)
+#define amd64_fnstcw(inst,mem) amd64_fnstcw_size(inst,mem,8)
+#define amd64_fnstcw_membase(inst,basereg,disp) amd64_fnstcw_membase_size(inst,basereg,disp,8)
+#define amd64_fldcw(inst,mem) amd64_fldcw_size(inst,mem,8)
+#define amd64_fldcw_membase(inst,basereg,disp) amd64_fldcw_membase_size(inst,basereg,disp,8)
+#define amd64_fchs(inst) amd64_fchs_size(inst,8)
+#define amd64_frem(inst) amd64_frem_size(inst,8)
+#define amd64_fxch(inst,index) amd64_fxch_size(inst,index,8)
+#define amd64_fcomi(inst,index) amd64_fcomi_size(inst,index,8)
+#define amd64_fcomip(inst,index) amd64_fcomip_size(inst,index,8)
+#define amd64_fucomi(inst,index) amd64_fucomi_size(inst,index,8)
+#define amd64_fucomip(inst,index) amd64_fucomip_size(inst,index,8)
+#define amd64_fld(inst,mem,is_double) amd64_fld_size(inst,mem,is_double,8)
+#define amd64_fld_membase(inst,basereg,disp,is_double) amd64_fld_membase_size(inst,basereg,disp,is_double,8)
+#define amd64_fld80_mem(inst,mem) amd64_fld80_mem_size(inst,mem,8)
+#define amd64_fld80_membase(inst,basereg,disp) amd64_fld80_membase_size(inst,basereg,disp,8)
+#define amd64_fild(inst,mem,is_long) amd64_fild_size(inst,mem,is_long,8)
+#define amd64_fild_membase(inst,basereg,disp,is_long) amd64_fild_membase_size(inst,basereg,disp,is_long,8)
+#define amd64_fld_reg(inst,index) amd64_fld_reg_size(inst,index,8)
+#define amd64_fldz(inst) amd64_fldz_size(inst,8)
+#define amd64_fld1(inst) amd64_fld1_size(inst,8)
+#define amd64_fldpi(inst) amd64_fldpi_size(inst,8)
+#define amd64_fst(inst,mem,is_double,pop_stack) amd64_fst_size(inst,mem,is_double,pop_stack,8)
+#define amd64_fst_membase(inst,basereg,disp,is_double,pop_stack) amd64_fst_membase_size(inst,basereg,disp,is_double,pop_stack,8)
+#define amd64_fst80_mem(inst,mem) amd64_fst80_mem_size(inst,mem,8)
+#define amd64_fst80_membase(inst,basereg,disp) amd64_fst80_membase_size(inst,basereg,disp,8)
+#define amd64_fist_pop(inst,mem,is_long) amd64_fist_pop_size(inst,mem,is_long,8)
+#define amd64_fist_pop_membase(inst,basereg,disp,is_long) amd64_fist_pop_membase_size(inst,basereg,disp,is_long,8)
+#define amd64_fstsw(inst) amd64_fstsw_size(inst,8)
+#define amd64_fist_membase(inst,basereg,disp,is_int) amd64_fist_membase_size(inst,basereg,disp,is_int,8)
+//#define amd64_push_reg(inst,reg) amd64_push_reg_size(inst,reg,8)
+#define amd64_push_regp(inst,reg) amd64_push_regp_size(inst,reg,8)
+#define amd64_push_mem(inst,mem) amd64_push_mem_size(inst,mem,8)
+//#define amd64_push_membase(inst,basereg,disp) amd64_push_membase_size(inst,basereg,disp,8)
+#define amd64_push_memindex(inst,basereg,disp,indexreg,shift) amd64_push_memindex_size(inst,basereg,disp,indexreg,shift,8)
+#define amd64_push_imm(inst,imm) amd64_push_imm_size(inst,imm,8)
+//#define amd64_pop_reg(inst,reg) amd64_pop_reg_size(inst,reg,8)
+#define amd64_pop_mem(inst,mem) amd64_pop_mem_size(inst,mem,8)
+#define amd64_pop_membase(inst,basereg,disp) amd64_pop_membase_size(inst,basereg,disp,8)
+#define amd64_pushad(inst) amd64_pushad_size(inst,8)
+#define amd64_pushfd(inst) amd64_pushfd_size(inst,8)
+#define amd64_popad(inst) amd64_popad_size(inst,8)
+#define amd64_popfd(inst) amd64_popfd_size(inst,8)
+#define amd64_loop(inst,imm) amd64_loop_size(inst,imm,8)
+#define amd64_loope(inst,imm) amd64_loope_size(inst,imm,8)
+#define amd64_loopne(inst,imm) amd64_loopne_size(inst,imm,8)
+#define amd64_jump32(inst,imm) amd64_jump32_size(inst,imm,8)
+#define amd64_jump8(inst,imm) amd64_jump8_size(inst,imm,8)
+#define amd64_jump_reg(inst,reg) amd64_jump_reg_size(inst,reg,8)
+#define amd64_jump_mem(inst,mem) amd64_jump_mem_size(inst,mem,8)
+#define amd64_jump_membase(inst,basereg,disp) amd64_jump_membase_size(inst,basereg,disp,8)
+#define amd64_jump_code(inst,target) amd64_jump_code_size(inst,target,8)
+#define amd64_jump_disp(inst,disp) amd64_jump_disp_size(inst,disp,8)
+#define amd64_branch8(inst,cond,imm,is_signed) amd64_branch8_size(inst,cond,imm,is_signed,8)
+#define amd64_branch32(inst,cond,imm,is_signed) amd64_branch32_size(inst,cond,imm,is_signed,8)
+#define amd64_branch(inst,cond,target,is_signed) amd64_branch_size(inst,cond,target,is_signed,8)
+#define amd64_branch_disp(inst,cond,disp,is_signed) amd64_branch_disp_size(inst,cond,disp,is_signed,8)
+#define amd64_set_reg(inst,cond,reg,is_signed) amd64_set_reg_size(inst,cond,reg,is_signed,8)
+#define amd64_set_mem(inst,cond,mem,is_signed) amd64_set_mem_size(inst,cond,mem,is_signed,8)
+#define amd64_set_membase(inst,cond,basereg,disp,is_signed) amd64_set_membase_size(inst,cond,basereg,disp,is_signed,8)
+#define amd64_call_imm(inst,disp) amd64_call_imm_size(inst,disp,8)
+//#define amd64_call_reg(inst,reg) amd64_call_reg_size(inst,reg,8)
+#define amd64_call_mem(inst,mem) amd64_call_mem_size(inst,mem,8)
+#define amd64_call_membase(inst,basereg,disp) amd64_call_membase_size(inst,basereg,disp,8)
+#define amd64_call_code(inst,target) amd64_call_code_size(inst,target,8)
+//#define amd64_ret(inst) amd64_ret_size(inst,8)
+#define amd64_ret_imm(inst,imm) amd64_ret_imm_size(inst,imm,8)
+#define amd64_cmov_reg(inst,cond,is_signed,dreg,reg) amd64_cmov_reg_size(inst,cond,is_signed,dreg,reg,8)
+#define amd64_cmov_mem(inst,cond,is_signed,reg,mem) amd64_cmov_mem_size(inst,cond,is_signed,reg,mem,8)
+#define amd64_cmov_membase(inst,cond,is_signed,reg,basereg,disp) amd64_cmov_membase_size(inst,cond,is_signed,reg,basereg,disp,8)
+#define amd64_enter(inst,framesize) amd64_enter_size(inst,framesize)
+//#define amd64_leave(inst) amd64_leave_size(inst,8)
+#define amd64_sahf(inst) amd64_sahf_size(inst,8)
+#define amd64_fsin(inst) amd64_fsin_size(inst,8)
+#define amd64_fcos(inst) amd64_fcos_size(inst,8)
+#define amd64_fabs(inst) amd64_fabs_size(inst,8)
+#define amd64_ftst(inst) amd64_ftst_size(inst,8)
+#define amd64_fxam(inst) amd64_fxam_size(inst,8)
+#define amd64_fpatan(inst) amd64_fpatan_size(inst,8)
+#define amd64_fprem(inst) amd64_fprem_size(inst,8)
+#define amd64_fprem1(inst) amd64_fprem1_size(inst,8)
+#define amd64_frndint(inst) amd64_frndint_size(inst,8)
+#define amd64_fsqrt(inst) amd64_fsqrt_size(inst,8)
+#define amd64_fptan(inst) amd64_fptan_size(inst,8)
+#define amd64_padding(inst,size) amd64_padding_size(inst,size)
+#define amd64_prolog(inst,frame,reg_mask) amd64_prolog_size(inst,frame,reg_mask,8)
+#define amd64_epilog(inst,reg_mask) amd64_epilog_size(inst,reg_mask,8)
+
+#endif // AMD64_H
--- /dev/null
+
+#include "qmljs_objects.h"
+#include "qv4codegen_p.h"
+
+#include <QtCore>
+#include <private/qqmljsengine_p.h>
+#include <private/qqmljslexer_p.h>
+#include <private/qqmljsparser_p.h>
+#include <private/qqmljsast_p.h>
+
+#include <iostream>
+
+int main(int argc, char *argv[])
+{
+ using namespace QQmlJS;
+
+ GC_INIT();
+
+ QCoreApplication app(argc, argv);
+ QStringList args = app.arguments();
+ args.removeFirst();
+
+ Engine engine;
+ foreach (const QString &fn, args) {
+ QFile file(fn);
+ if (file.open(QFile::ReadOnly)) {
+ const QString code = QString::fromUtf8(file.readAll());
+ file.close();
+
+ Lexer lexer(&engine);
+ lexer.setCode(code, 1, false);
+ Parser parser(&engine);
+
+ const bool parsed = parser.parseProgram();
+
+ foreach (const DiagnosticMessage &m, parser.diagnosticMessages()) {
+ std::cerr << qPrintable(fn) << ':' << m.loc.startLine << ':' << m.loc.startColumn
+ << ": error: " << qPrintable(m.message) << std::endl;
+ }
+
+ if (parsed) {
+ using namespace AST;
+ Program *program = AST::cast<Program *>(parser.rootNode());
+
+ Codegen cg;
+ cg(program);
+ }
+ }
+ }
+}
--- /dev/null
+
+#include "qmljs_objects.h"
+#include <cassert>
+
+bool Object::get(String *name, Value *result)
+{
+ if (Property *prop = getProperty(name)) {
+ *result = prop->value;
+ return true;
+ }
+
+ __qmljs_init_undefined(0, result);
+ return false;
+}
+
+Property *Object::getOwnProperty(String *name)
+{
+ if (members) {
+ if (Property *prop = members->find(name)) {
+ return prop;
+ }
+ }
+ return 0;
+}
+
+Property *Object::getProperty(String *name)
+{
+ if (Property *prop = getOwnProperty(name))
+ return prop;
+ else if (prototype)
+ return prototype->getProperty(name);
+ return 0;
+}
+
+void Object::put(String *name, const Value &value, bool flag)
+{
+ if (! members)
+ members = new (GC) Table();
+
+ members->insert(name, value);
+}
+
+bool Object::canPut(String *name)
+{
+ if (Property *prop = getOwnProperty(name))
+ return true;
+ if (! prototype)
+ return extensible;
+ if (Property *inherited = prototype->getProperty(name)) {
+ return inherited->isWritable();
+ } else {
+ return extensible;
+ }
+ return true;
+}
+
+bool Object::hasProperty(String *name) const
+{
+ if (members)
+ return members->find(name) != 0;
+
+ return false;
+}
+
+bool Object::deleteProperty(String *name, bool flag)
+{
+ if (members)
+ return members->remove(name);
+
+ return false;
+}
+
+void Object::defaultValue(Value *result, int typeHint)
+{
+ Context *ctx = 0; // ###
+
+ if (typeHint == STRING_HINT) {
+ if (asFunctionObject() != 0)
+ __qmljs_init_string(ctx, result, String::get(ctx, QLatin1String("function")));
+ else
+ __qmljs_init_string(ctx, result, String::get(ctx, QLatin1String("object")));
+ } else {
+ __qmljs_init_undefined(ctx, result);
+ }
+}
+
+
+bool FunctionObject::hasInstance(const Value &value) const
+{
+ Q_UNUSED(value);
+ return false;
+}
+
+Value FunctionObject::call(const Value &thisObject, const Value args[], unsigned argc)
+{
+ (void) thisObject;
+
+ Value v;
+ __qmljs_init_undefined(0, &v);
+ return v;
+}
+
+Value FunctionObject::construct(const Value args[], unsigned argc)
+{
+ Value thisObject;
+ __qmljs_init_object(0, &thisObject, new (GC) Object);
+ call(thisObject, args, argc);
+ return thisObject;
+}
--- /dev/null
+#ifndef QMLJS_OBJECTS_H
+#define QMLJS_OBJECTS_H
+
+#include "qmljs_runtime.h"
+
+#include <gc/gc_cpp.h>
+#include <QtCore/QString>
+#include <QtCore/QHash>
+
+struct Value;
+struct Object;
+struct BooleanObject;
+struct NumberObject;
+struct StringObject;
+struct ArrayObject;
+struct FunctionObject;
+struct ErrorObject;
+
+struct String: gc_cleanup {
+ String(const QString &text)
+ : _text(text), _hashValue(0) {}
+
+ inline const QString &text() const {
+ return _text;
+ }
+
+ inline unsigned hashValue() const {
+ if (! _hashValue)
+ _hashValue = qHash(_text);
+
+ return _hashValue;
+ }
+
+ static String *get(Context *ctx, const QString &s) {
+ return new (GC) String(s);
+ }
+
+private:
+ QString _text;
+ mutable unsigned _hashValue;
+};
+
+struct Property: gc {
+ String *name;
+ Value value;
+ PropertyAttributes flags;
+ Property *next;
+
+ Property(String *name, const Value &value, PropertyAttributes flags = NoAttributes)
+ : name(name), value(value), flags(flags), next(0) {}
+
+ inline bool isWritable() const { return flags & WritableAttribute; }
+ inline bool isEnumerable() const { return flags & EnumerableAttribute; }
+ inline bool isConfigurable() const { return flags & ConfigurableAttribute; }
+
+ inline bool hasName(String *n) const {
+ if (name == n || (name->hashValue() == n->hashValue() && name->text() == n->text()))
+ return true;
+ return false;
+ }
+
+ inline unsigned hashValue() const {
+ return name->hashValue();
+ }
+};
+
+class Table: public gc
+{
+ Property **_properties;
+ Property **_buckets;
+ int _propertyCount;
+ int _bucketCount;
+ int _allocated;
+
+public:
+ Table()
+ : _properties(0)
+ , _buckets(0)
+ , _propertyCount(-1)
+ , _bucketCount(11)
+ , _allocated(0) {}
+
+ bool empty() const { return _propertyCount == -1; }
+ unsigned size() const { return _propertyCount + 1; }
+
+ typedef Property **iterator;
+ iterator begin() const { return _properties; }
+ iterator end() const { return _properties + (_propertyCount + 1); }
+
+ bool remove(String *name)
+ {
+ if (_properties) {
+ const unsigned hash = name->hashValue() % _bucketCount;
+
+ if (Property *prop = _buckets[hash]) {
+ if (prop->hasName(name)) {
+ _buckets[hash] = prop->next;
+ return true;
+ }
+
+ do {
+ Property *next = prop->next;
+
+ if (next && next->hasName(name)) {
+ prop->next = next->next;
+ return true;
+ }
+ prop = next;
+ } while (prop);
+ }
+ }
+
+ return false;
+ }
+
+ Property *find(String *name) const
+ {
+ if (! _properties)
+ return 0;
+
+ for (Property *prop = _buckets[name->hashValue() % _bucketCount]; prop; prop = prop->next) {
+ if (prop->hasName(name))
+ return prop;
+ }
+
+ return 0;
+ }
+
+ Property *insert(String *name, const Value &value)
+ {
+ if (Property *prop = find(name)) {
+ prop->value = value;
+ return prop;
+ }
+
+ if (++_propertyCount == _allocated) {
+ if (! _allocated)
+ _allocated = 4;
+ else
+ _allocated *= 2;
+
+ Property **properties = new (GC) Property*[_allocated];
+ std::copy(_properties, _properties + _propertyCount, properties);
+ _properties = properties;
+ }
+
+ Property *prop = new (GC) Property(name, value);
+ _properties[_propertyCount] = prop;
+
+ if (! _buckets || 3 * _propertyCount >= 2 * _bucketCount) {
+ rehash();
+ } else {
+ Property *&bucket = _buckets[prop->hashValue() % _bucketCount];
+ prop->next = bucket;
+ bucket = prop;
+ }
+
+ return prop;
+ }
+
+private:
+ void rehash()
+ {
+ if (_bucketCount)
+ _bucketCount *= 2; // ### next prime
+
+ _buckets = new (GC) Property *[_bucketCount];
+ std::fill(_buckets, _buckets + _bucketCount, (Property *) 0);
+
+ for (int i = 0; i <= _propertyCount; ++i) {
+ Property *prop = _properties[i];
+ Property *&bucket = _buckets[prop->name->hashValue() % _bucketCount];
+ prop->next = bucket;
+ bucket = prop;
+ }
+ }
+};
+
+struct Object: gc_cleanup {
+ Object *prototype;
+ String *klass;
+ Table *members;
+ bool extensible;
+
+ Object()
+ : prototype(0)
+ , klass(0)
+ , members(0)
+ , extensible(true) {}
+
+ virtual ~Object() {}
+
+ virtual FunctionObject *asFunctionObject() { return 0; }
+
+ virtual bool get(String *name, Value *result);
+ virtual Property *getOwnProperty(String *name);
+ virtual Property *getProperty(String *name);
+ virtual void put(String *name, const Value &value, bool flag = 0);
+ virtual bool canPut(String *name);
+ virtual bool hasProperty(String *name) const;
+ virtual bool deleteProperty(String *name, bool flag);
+ virtual void defaultValue(Value *result, int typeHint);
+ // ### TODO: defineOwnProperty(name, descriptor, boolean) -> boolean
+};
+
+struct BooleanObject: Object {
+ Value value;
+ BooleanObject(const Value &value): value(value) {}
+ virtual void defaultValue(Value *result, int typehint) { *result = value; }
+};
+
+struct NumberObject: Object {
+ Value value;
+ NumberObject(const Value &value): value(value) {}
+ virtual void defaultValue(Value *result, int typehint) { *result = value; }
+};
+
+struct StringObject: Object {
+ Value value;
+ StringObject(const Value &value): value(value) {}
+ virtual void defaultValue(Value *result, int typehint) { *result = value; }
+};
+
+struct ArrayObject: Object {
+};
+
+struct FunctionObject: Object {
+ Object *scope;
+ String **formalParameterList;
+
+ FunctionObject(Object *scope = 0): scope(scope), formalParameterList(0) {}
+ virtual FunctionObject *asFunctionObject() { return this; }
+
+ virtual bool hasInstance(const Value &value) const;
+ virtual Value call(const Value &thisObject, const Value args[], unsigned argc);
+ virtual Value construct(const Value args[], unsigned argc);
+};
+
+struct ErrorObject: Object {
+ String *message;
+ ErrorObject(String *message): message(message) {}
+};
+
+struct ArgumentsObject: Object {
+};
+
+struct Context: gc {
+ Value activation;
+ Value thisObject;
+ Object *scope;
+ Value *arguments;
+ unsigned argumentCount;
+
+ Context()
+ : scope(0)
+ , arguments(0)
+ , argumentCount(0)
+ {
+ activation.type = NULL_TYPE;
+ thisObject.type = NULL_TYPE;
+ }
+};
+
+#endif // QMLJS_OBJECTS_H
--- /dev/null
+
+#include "qmljs_runtime.h"
+#include "qmljs_objects.h"
+#include <cstdio>
+#include <cassert>
+
+Value Value::string(Context *ctx, const QString &s)
+{
+ return string(ctx, String::get(ctx, s));
+}
+
+extern "C" {
+
+void __qmljs_string_literal_undefined(Context *ctx, Value *result)
+{
+ __qmljs_init_string(ctx, result, String::get(ctx, QLatin1String("undefined")));
+}
+
+void __qmljs_string_literal_null(Context *ctx, Value *result)
+{
+ __qmljs_init_string(ctx, result, String::get(ctx, QLatin1String("null")));
+}
+
+void __qmljs_string_literal_true(Context *ctx, Value *result)
+{
+ __qmljs_init_string(ctx, result, String::get(ctx, QLatin1String("true")));
+}
+
+void __qmljs_string_literal_false(Context *ctx, Value *result)
+{
+ __qmljs_init_string(ctx, result, String::get(ctx, QLatin1String("false")));
+}
+
+void __qmljs_string_literal_object(Context *ctx, Value *result)
+{
+ __qmljs_init_string(ctx, result, String::get(ctx, QLatin1String("object")));
+}
+
+void __qmljs_string_literal_boolean(Context *ctx, Value *result)
+{
+ __qmljs_init_string(ctx, result, String::get(ctx, QLatin1String("boolean")));
+}
+
+void __qmljs_string_literal_number(Context *ctx, Value *result)
+{
+ __qmljs_init_string(ctx, result, String::get(ctx, QLatin1String("number")));
+}
+
+void __qmljs_string_literal_string(Context *ctx, Value *result)
+{
+ __qmljs_init_string(ctx, result, String::get(ctx, QLatin1String("string")));
+}
+
+void __qmljs_string_literal_function(Context *ctx, Value *result)
+{
+ __qmljs_init_string(ctx, result, String::get(ctx, QLatin1String("function")));
+}
+
+void __qmljs_delete(Context *ctx, Value *result, const Value *value)
+{
+ (void) ctx;
+ (void) result;
+ (void) value;
+}
+
+void __qmljs_instanceof(Context *ctx, Value *result, const Value *left, const Value *right)
+{
+ if (right->type == OBJECT_TYPE) {
+ if (FunctionObject *function = right->objectValue->asFunctionObject()) {
+ bool r = function->hasInstance(*left);
+ __qmljs_init_boolean(ctx, result, r);
+ return;
+ }
+ }
+
+ __qmljs_throw_type_error(ctx, result);
+}
+
+void __qmljs_in(Context *ctx, Value *result, const Value *left, const Value *right)
+{
+ if (right->type == OBJECT_TYPE) {
+ Value s;
+ __qmljs_to_string(ctx, &s, left);
+ bool r = right->objectValue->hasProperty(s.stringValue);
+ __qmljs_init_boolean(ctx, result, r);
+ } else {
+ __qmljs_throw_type_error(ctx, result);
+ }
+}
+
+int __qmljs_string_length(Context *, String *string)
+{
+ return string->text().length();
+}
+
+double __qmljs_string_to_number(Context *, String *string)
+{
+ bool ok;
+ return string->text().toDouble(&ok); // ### TODO
+}
+
+void __qmljs_string_from_number(Context *ctx, Value *result, double number)
+{
+ String *string = String::get(ctx, QString::number(number, 'g', 16));
+ __qmljs_init_string(ctx, result, string);
+}
+
+bool __qmljs_string_compare(Context *, String *left, String *right)
+{
+ return left->text() < right->text();
+}
+
+bool __qmljs_string_equal(Context *, String *left, String *right)
+{
+ return left == right ||
+ (left->hashValue() == right->hashValue() &&
+ left->text() == right->text());
+}
+
+String *__qmljs_string_concat(Context *ctx, String *first, String *second)
+{
+ return String::get(ctx, first->text() + second->text());
+}
+
+bool __qmljs_is_function(Context *, const Value *value)
+{
+ return value->objectValue->asFunctionObject() != 0;
+}
+
+void __qmljs_object_default_value(Context *ctx, Value *result, Object *object, int typeHint)
+{
+ object->defaultValue(result, typeHint);
+}
+
+void __qmljs_throw_type_error(Context *ctx, Value *result)
+{
+ __qmljs_init_object(ctx, result, new (GC) ErrorObject(String::get(ctx, QLatin1String("type error"))));
+}
+
+void __qmljs_new_boolean_object(Context *ctx, Value *result, bool boolean)
+{
+ Value value;
+ __qmljs_init_boolean(ctx, &value, boolean);
+ __qmljs_init_object(ctx, result, new (GC) BooleanObject(value));
+}
+
+void __qmljs_new_number_object(Context *ctx, Value *result, double number)
+{
+ Value value;
+ __qmljs_init_number(ctx, &value, number);
+ __qmljs_init_object(ctx, result, new (GC) NumberObject(value));
+}
+
+void __qmljs_new_string_object(Context *ctx, Value *result, String *string)
+{
+ Value value;
+ __qmljs_init_string(ctx, &value, string);
+ __qmljs_init_object(ctx, result, new (GC) StringObject(value));
+}
+
+void __qmljs_set_property(Context *ctx, Value *object, String *name, Value *value)
+{
+ object->objectValue->put(name, *value, /*flags*/ 0);
+}
+
+void __qmljs_set_property_number(Context *ctx, Value *object, String *name, double number)
+{
+ Value value;
+ __qmljs_init_number(ctx, &value, number);
+ object->objectValue->put(name, value, /*flag*/ 0);
+}
+
+void __qmljs_set_property_string(Context *ctx, Value *object, String *name, String *s)
+{
+ Value value;
+ __qmljs_init_string(ctx, &value, s);
+ object->objectValue->put(name, value, /*flag*/ 0);
+}
+
+void __qmljs_set_activation_property(Context *ctx, String *name, Value *value)
+{
+ __qmljs_set_property(ctx, &ctx->activation, name, value);
+}
+
+void __qmljs_set_activation_property_number(Context *ctx, String *name, double value)
+{
+ __qmljs_set_property_number(ctx, &ctx->activation, name, value);
+}
+
+void __qmljs_set_activation_property_string(Context *ctx, String *name, String *value)
+{
+ __qmljs_set_property_string(ctx, &ctx->activation, name, value);
+}
+
+void __qmljs_get_property(Context *ctx, Value *result, Value *object, String *name)
+{
+ Q_ASSERT(object->type == OBJECT_TYPE);
+ object->objectValue->get(name, result);
+}
+
+void __qmljs_get_activation_property(Context *ctx, Value *result, String *name)
+{
+ __qmljs_get_property(ctx, result, &ctx->activation, name);
+}
+
+void __qmljs_get_activation(Context *ctx, Value *result)
+{
+ *result = ctx->activation;
+}
+
+void __qmljs_get_thisObject(Context *ctx, Value *result)
+{
+ *result = ctx->thisObject;
+}
+
+void __qmljs_compare(Context *ctx, Value *result, const Value *x, const Value *y, bool leftFirst)
+{
+ Value px, py;
+
+ if (leftFirst) {
+ __qmljs_to_primitive(ctx, &px, x, NUMBER_HINT);
+ __qmljs_to_primitive(ctx, &py, y, NUMBER_HINT);
+ } else {
+ __qmljs_to_primitive(ctx, &py, x, NUMBER_HINT);
+ __qmljs_to_primitive(ctx, &px, y, NUMBER_HINT);
+ }
+
+ if (px.type == STRING_TYPE && py.type == STRING_TYPE) {
+ bool r = __qmljs_string_compare(ctx, px.stringValue, py.stringValue);
+ __qmljs_init_boolean(ctx, result, r);
+ } else {
+ double nx = __qmljs_to_number(ctx, &px);
+ double ny = __qmljs_to_number(ctx, &py);
+ if (isnan(nx) || isnan(ny)) {
+ __qmljs_init_undefined(ctx, result);
+ } else {
+ const int sx = signbit(nx);
+ const int sy = signbit(ny);
+ const bool ix = isinf(nx);
+ const bool iy = isinf(ny);
+ bool r = false;
+
+ if (ix && !sx) {
+ r = false;
+ } else if (iy && !sy) {
+ r = true;
+ } else if (iy && sy) {
+ r = false;
+ } else if (ix && sx) {
+ r = true;
+ } else if (nx == ny) {
+ r = false;
+ } else {
+ r = nx < ny;
+ }
+ __qmljs_init_boolean(ctx, result, r);
+ }
+ }
+}
+
+bool __qmljs_equal(Context *ctx, const Value *x, const Value *y)
+{
+ if (x->type == y->type) {
+ switch ((ValueType) x->type) {
+ case UNDEFINED_TYPE:
+ return true;
+ case NULL_TYPE:
+ return true;
+ case BOOLEAN_TYPE:
+ return x->booleanValue == y->booleanValue;
+ break;
+ case NUMBER_TYPE:
+ return x->numberValue == y->numberValue;
+ case STRING_TYPE:
+ return __qmljs_string_equal(ctx, x->stringValue, y->stringValue);
+ case OBJECT_TYPE:
+ return x->objectValue == y->objectValue;
+ }
+ // unreachable
+ } else {
+ if (x->type == NULL_TYPE && y->type == UNDEFINED_TYPE) {
+ return true;
+ } else if (x->type == UNDEFINED_TYPE && y->type == NULL_TYPE) {
+ return true;
+ } else if (x->type == NUMBER_TYPE && y->type == STRING_TYPE) {
+ Value ny;
+ __qmljs_init_number(ctx, &ny, __qmljs_to_number(ctx, y));
+ return __qmljs_equal(ctx, x, &ny);
+ } else if (x->type == STRING_TYPE && y->type == NUMBER_TYPE) {
+ Value nx;
+ __qmljs_init_number(ctx, &nx, __qmljs_to_number(ctx, x));
+ return __qmljs_equal(ctx, &nx, y);
+ } else if (x->type == BOOLEAN_TYPE) {
+ Value nx;
+ __qmljs_init_number(ctx, &nx, (double) x->booleanValue);
+ return __qmljs_equal(ctx, &nx, y);
+ } else if (y->type == BOOLEAN_TYPE) {
+ Value ny;
+ __qmljs_init_number(ctx, &ny, (double) y->booleanValue);
+ return __qmljs_equal(ctx, x, &ny);
+ } else if ((x->type == NUMBER_TYPE || x->type == STRING_TYPE) && y->type == OBJECT_TYPE) {
+ Value py;
+ __qmljs_to_primitive(ctx, &py, y, PREFERREDTYPE_HINT);
+ return __qmljs_equal(ctx, x, &py);
+ } else if (x->type == OBJECT_TYPE && (y->type == NUMBER_TYPE || y->type == STRING_TYPE)) {
+ Value px;
+ __qmljs_to_primitive(ctx, &px, x, PREFERREDTYPE_HINT);
+ return __qmljs_equal(ctx, &px, y);
+ }
+ }
+
+ return false;
+}
+
+void __qmljs_call(Context *ctx, Value *result, const Value *function,
+ const Value *thisObject, const Value *arguments, int argc)
+{
+ if (function->type != OBJECT_TYPE) {
+ __qmljs_throw_type_error(ctx, result);
+ } else if (FunctionObject *f = function->objectValue->asFunctionObject()) {
+ *result = f->call(*thisObject, arguments, argc);
+ } else {
+ __qmljs_throw_type_error(ctx, result);
+ }
+}
+
+void __qmjs_construct(Context *ctx, Value *result, const Value *function, const Value *arguments, int argc)
+{
+ if (function->type != OBJECT_TYPE) {
+ __qmljs_throw_type_error(ctx, result);
+ } else if (FunctionObject *f = function->objectValue->asFunctionObject()) {
+ *result = f->construct(arguments, argc);
+ } else {
+ __qmljs_throw_type_error(ctx, result);
+ }
+}
+
+
+} // extern "C"
--- /dev/null
+#ifndef QMLJS_RUNTIME_H
+#define QMLJS_RUNTIME_H
+
+#include <QtCore/QString>
+#include <QtCore/QDebug>
+#include <math.h>
+
+enum ValueType {
+ UNDEFINED_TYPE,
+ NULL_TYPE,
+ BOOLEAN_TYPE,
+ NUMBER_TYPE,
+ STRING_TYPE,
+ OBJECT_TYPE
+};
+
+enum TypeHint {
+ PREFERREDTYPE_HINT,
+ NUMBER_HINT,
+ STRING_HINT
+};
+
+enum PropertyAttributes {
+ NoAttributes = 0,
+ ValueAttribute = 1,
+ WritableAttribute = 2,
+ EnumerableAttribute = 4,
+ ConfigurableAttribute = 8
+};
+
+struct Value;
+struct Object;
+struct String;
+struct Context;
+
+extern "C" {
+
+// constructors
+void __qmljs_init_undefined(Context *ctx, Value *result);
+void __qmljs_init_null(Context *ctx, Value *result);
+void __qmljs_init_boolean(Context *ctx, Value *result, bool value);
+void __qmljs_init_number(Context *ctx, Value *result, double number);
+void __qmljs_init_string(Context *ctx, Value *result, String *string);
+void __qmljs_init_object(Context *ctx, Value *result, Object *object);
+
+bool __qmljs_is_function(Context *ctx, const Value *value);
+
+// string literals
+void __qmljs_string_literal_undefined(Context *ctx, Value *result);
+void __qmljs_string_literal_null(Context *ctx, Value *result);
+void __qmljs_string_literal_true(Context *ctx, Value *result);
+void __qmljs_string_literal_false(Context *ctx, Value *result);
+void __qmljs_string_literal_object(Context *ctx, Value *result);
+void __qmljs_string_literal_boolean(Context *ctx, Value *result);
+void __qmljs_string_literal_number(Context *ctx, Value *result);
+void __qmljs_string_literal_string(Context *ctx, Value *result);
+void __qmljs_string_literal_function(Context *ctx, Value *result);
+
+// strings
+int __qmljs_string_length(Context *ctx, String *string);
+double __qmljs_string_to_number(Context *ctx, String *string);
+void __qmljs_string_from_number(Context *ctx, Value *result, double number);
+bool __qmljs_string_compare(Context *ctx, String *left, String *right);
+bool __qmljs_string_equal(Context *ctx, String *left, String *right);
+String *__qmljs_string_concat(Context *ctx, String *first, String *second);
+
+// objects
+void __qmljs_object_default_value(Context *ctx, Value *result, Object *object, int typeHint);
+void __qmljs_throw_type_error(Context *ctx, Value *result);
+void __qmljs_new_boolean_object(Context *ctx, Value *result, bool boolean);
+void __qmljs_new_number_object(Context *ctx, Value *result, double n);
+void __qmljs_new_string_object(Context *ctx, Value *result, String *string);
+void __qmljs_set_property(Context *ctx, Value *object, String *name, Value *value);
+void __qmljs_set_property_number(Context *ctx, Value *object, String *name, double value);
+void __qmljs_set_property_string(Context *ctx, Value *object, String *name, String *value);
+void __qmljs_set_activation_property(Context *ctx, String *name, Value *value);
+void __qmljs_set_activation_property_number(Context *ctx, String *name, double value);
+void __qmljs_set_activation_property_string(Context *ctx, String *name, String *value);
+void __qmljs_get_property(Context *ctx, Value *result, Value *object, String *name);
+void __qmljs_get_activation_property(Context *ctx, Value *result, String *name);
+
+// context
+void __qmljs_get_activation(Context *ctx, Value *result);
+void __qmljs_get_thisObject(Context *ctx, Value *result);
+
+// type conversion and testing
+void __qmljs_to_primitive(Context *ctx, Value *result, const Value *value, int typeHint);
+bool __qmljs_to_boolean(Context *ctx, const Value *value);
+double __qmljs_to_number(Context *ctx, const Value *value);
+double __qmljs_to_integer(Context *ctx, const Value *value);
+int __qmljs_to_int32(Context *ctx, const Value *value);
+unsigned __qmljs_to_uint32(Context *ctx, const Value *value);
+unsigned short __qmljs_to_uint16(Context *ctx, const Value *value);
+void __qmljs_to_string(Context *ctx, Value *result, const Value *value);
+void __qmljs_to_object(Context *ctx, Value *result, const Value *value);
+bool __qmljs_check_object_coercible(Context *ctx, Value *result, const Value *value);
+bool __qmljs_is_callable(Context *ctx, const Value *value);
+void __qmljs_default_value(Context *ctx, Value *result, const Value *value, int typeHint);
+
+void __qmljs_compare(Context *ctx, Value *result, const Value *left, const Value *right, bool leftFlag);
+bool __qmljs_equal(Context *ctx, const Value *x, const Value *y);
+bool __qmljs_strict_equal(Context *ctx, const Value *x, const Value *y);
+
+// unary operators
+void __qmljs_delete(Context *ctx, Value *result, const Value *value);
+void __qmljs_typeof(Context *ctx, Value *result, const Value *value);
+void __qmljs_postincr(Context *ctx, Value *result, const Value *value);
+void __qmljs_postdecr(Context *ctx, Value *result, const Value *value);
+void __qmljs_uplus(Context *ctx, Value *result, const Value *value);
+void __qmljs_uminus(Context *ctx, Value *result, const Value *value);
+void __qmljs_compl(Context *ctx, Value *result, const Value *value);
+void __qmljs_not(Context *ctx, Value *result, const Value *value);
+void __qmljs_preincr(Context *ctx, Value *result, const Value *value);
+void __qmljs_predecr(Context *ctx, Value *result, const Value *value);
+
+// binary operators
+void __qmljs_instanceof(Context *ctx, Value *result, const Value *left, const Value *right);
+void __qmljs_in(Context *ctx, Value *result, const Value *left, const Value *right);
+void __qmljs_bit_or(Context *ctx, Value *result, const Value *left, const Value *right);
+void __qmljs_bit_xor(Context *ctx, Value *result, const Value *left, const Value *right);
+void __qmljs_bit_and(Context *ctx, Value *result, const Value *left,const Value *right);
+void __qmljs_add(Context *ctx, Value *result, const Value *left, const Value *right);
+void __qmljs_sub(Context *ctx, Value *result, const Value *left, const Value *right);
+void __qmljs_mul(Context *ctx, Value *result, const Value *left,const Value *right);
+void __qmljs_div(Context *ctx, Value *result, const Value *left, const Value *right);
+void __qmljs_mod(Context *ctx, Value *result, const Value *left, const Value *right);
+void __qmljs_shl(Context *ctx, Value *result, const Value *left, const Value *right);
+void __qmljs_shr(Context *ctx, Value *result, const Value *left, const Value *right);
+void __qmljs_ushr(Context *ctx, Value *result, const Value *left, const Value *right);
+void __qmljs_gt(Context *ctx, Value *result, const Value *left, const Value *right);
+void __qmljs_lt(Context *ctx, Value *result, const Value *left, const Value *right);
+void __qmljs_ge(Context *ctx, Value *result, const Value *left, const Value *right);
+void __qmljs_le(Context *ctx, Value *result, const Value *left, const Value *right);
+void __qmljs_eq(Context *ctx, Value *result, const Value *left, const Value *right);
+void __qmljs_ne(Context *ctx, Value *result, const Value *left, const Value *right);
+void __qmljs_se(Context *ctx, Value *result, const Value *left, const Value *right);
+void __qmljs_sne(Context *ctx, Value *result, const Value *left, const Value *right);
+
+void __qmljs_call(Context *ctx, Value *result, const Value *function, const Value *thisObject, const Value *arguments, int argc);
+void __qmjs_construct(Context *ctx, Value *result, const Value *function, const Value *arguments, int argc);
+
+} // extern "C"
+
+struct Value {
+ int type;
+ union {
+ bool booleanValue;
+ double numberValue;
+ Object *objectValue;
+ String *stringValue;
+ };
+
+ static inline Value boolean(Context *ctx, bool value) {
+ Value v;
+ __qmljs_init_boolean(ctx, &v, value);
+ return v;
+ }
+
+ static inline Value number(Context *ctx, double value) {
+ Value v;
+ __qmljs_init_number(ctx, &v, value);
+ return v;
+ }
+
+ static inline Value object(Context *ctx, Object *value) {
+ Value v;
+ __qmljs_init_object(ctx, &v, value);
+ return v;
+ }
+
+ static inline Value string(Context *ctx, String *value) {
+ Value v;
+ __qmljs_init_string(ctx, &v, value);
+ return v;
+ }
+
+ static Value string(Context *ctx, const QString &string);
+};
+
+extern "C" {
+
+// constructors
+inline void __qmljs_init_undefined(Context *, Value *result)
+{
+ result->type = UNDEFINED_TYPE;
+}
+
+inline void __qmljs_init_null(Context *, Value *result)
+{
+ result->type = NULL_TYPE;
+}
+
+inline void __qmljs_init_boolean(Context *, Value *result, bool value)
+{
+ result->type = BOOLEAN_TYPE;
+ result->booleanValue = value;
+}
+
+inline void __qmljs_init_number(Context *, Value *result, double value)
+{
+ result->type = NUMBER_TYPE;
+ result->numberValue = value;
+}
+
+inline void __qmljs_init_string(Context *, Value *result, String *value)
+{
+ result->type = STRING_TYPE;
+ result->stringValue = value;
+}
+
+inline void __qmljs_init_object(Context *, Value *result, Object *object)
+{
+ result->type = OBJECT_TYPE;
+ result->objectValue = object;
+}
+
+
+// type conversion and testing
+inline void __qmljs_to_primitive(Context *ctx, Value *result, const Value *value, int typeHint)
+{
+ switch ((ValueType) value->type) {
+ case UNDEFINED_TYPE:
+ case NULL_TYPE:
+ case BOOLEAN_TYPE:
+ case NUMBER_TYPE:
+ case STRING_TYPE:
+ *result = *value;
+ break;
+ case OBJECT_TYPE:
+ __qmljs_default_value(ctx, result, value, typeHint);
+ break;
+ }
+}
+
+inline bool __qmljs_to_boolean(Context *ctx, const Value *value)
+{
+ switch ((ValueType) value->type) {
+ case UNDEFINED_TYPE:
+ case NULL_TYPE:
+ return false;
+ case BOOLEAN_TYPE:
+ return value->booleanValue;
+ case NUMBER_TYPE:
+ if (! value->numberValue || isnan(value->numberValue))
+ return false;
+ return true;
+ case STRING_TYPE:
+ return __qmljs_string_length(ctx, value->stringValue) > 0;
+ case OBJECT_TYPE:
+ return true;
+ }
+ Q_ASSERT(!"unreachable");
+ return false;
+}
+
+inline double __qmljs_to_number(Context *ctx, const Value *value)
+{
+ switch ((ValueType) value->type) {
+ case UNDEFINED_TYPE:
+ return nan("");
+ case NULL_TYPE:
+ return 0;
+ case BOOLEAN_TYPE:
+ return (double) value->booleanValue;
+ case NUMBER_TYPE:
+ return value->numberValue;
+ case STRING_TYPE:
+ return __qmljs_string_to_number(ctx, value->stringValue);
+ case OBJECT_TYPE: {
+ Value prim;
+ __qmljs_to_primitive(ctx, &prim, value, NUMBER_HINT);
+ return __qmljs_to_number(ctx, &prim);
+ }
+ } // switch
+ return 0; // unreachable
+}
+
+inline double __qmljs_to_integer(Context *ctx, const Value *value)
+{
+ const double number = __qmljs_to_number(ctx, value);
+ if (isnan(number))
+ return +0;
+ else if (! number || isinf(number))
+ return number;
+ return signbit(number) * floor(fabs(number));
+}
+
+inline int __qmljs_to_int32(Context *ctx, const Value *value)
+{
+ const double number = __qmljs_to_number(ctx, value);
+ if (! number || isnan(number) || isinf(number))
+ return +0;
+ return (int) trunc(number); // ###
+}
+
+inline unsigned __qmljs_to_uint32(Context *ctx, const Value *value)
+{
+ const double number = __qmljs_to_number(ctx, value);
+ if (! number || isnan(number) || isinf(number))
+ return +0;
+ return (unsigned) trunc(number); // ###
+}
+
+inline unsigned short __qmljs_to_uint16(Context *ctx, const Value *value)
+{
+ const double number = __qmljs_to_number(ctx, value);
+ if (! number || isnan(number) || isinf(number))
+ return +0;
+ return (unsigned short) trunc(number); // ###
+}
+
+inline void __qmljs_to_string(Context *ctx, Value *result, const Value *value)
+{
+ switch ((ValueType) value->type) {
+ case UNDEFINED_TYPE:
+ __qmljs_string_literal_undefined(ctx, result);
+ break;
+ case NULL_TYPE:
+ __qmljs_string_literal_null(ctx, result);
+ break;
+ case BOOLEAN_TYPE:
+ if (value->booleanValue)
+ __qmljs_string_literal_true(ctx, result);
+ else
+ __qmljs_string_literal_false(ctx, result);
+ break;
+ case NUMBER_TYPE:
+ __qmljs_string_from_number(ctx, result, value->numberValue);
+ break;
+ case STRING_TYPE:
+ *result = *value;
+ break;
+ case OBJECT_TYPE: {
+ Value prim;
+ __qmljs_to_primitive(ctx, &prim, value, STRING_HINT);
+ __qmljs_to_string(ctx, result, &prim);
+ break;
+ }
+
+ } // switch
+}
+
+inline void __qmljs_to_object(Context *ctx, Value *result, const Value *value)
+{
+ switch ((ValueType) value->type) {
+ case UNDEFINED_TYPE:
+ case NULL_TYPE:
+ __qmljs_throw_type_error(ctx, result);
+ break;
+ case BOOLEAN_TYPE:
+ __qmljs_new_boolean_object(ctx, result, value->booleanValue);
+ break;
+ case NUMBER_TYPE:
+ __qmljs_new_number_object(ctx, result, value->numberValue);
+ break;
+ case STRING_TYPE:
+ __qmljs_new_string_object(ctx, result, value->stringValue);
+ break;
+ case OBJECT_TYPE:
+ *result = *value;
+ break;
+ }
+}
+
+inline bool __qmljs_check_object_coercible(Context *ctx, Value *result, const Value *value)
+{
+ switch ((ValueType) value->type) {
+ case UNDEFINED_TYPE:
+ case NULL_TYPE:
+ __qmljs_throw_type_error(ctx, result);
+ return false;
+ case BOOLEAN_TYPE:
+ case NUMBER_TYPE:
+ case STRING_TYPE:
+ case OBJECT_TYPE:
+ return true;
+ }
+}
+
+inline bool __qmljs_is_callable(Context *ctx, const Value *value)
+{
+ if (value->type == OBJECT_TYPE)
+ return __qmljs_is_function(ctx, value);
+ else
+ return false;
+}
+
+inline void __qmljs_default_value(Context *ctx, Value *result, const Value *value, int typeHint)
+{
+ if (value->type == OBJECT_TYPE)
+ __qmljs_object_default_value(ctx, result, value->objectValue, typeHint);
+ else
+ __qmljs_init_undefined(ctx, result);
+}
+
+
+// unary operators
+inline void __qmljs_typeof(Context *ctx, Value *result, const Value *value)
+{
+ switch ((ValueType) value->type) {
+ case UNDEFINED_TYPE:
+ __qmljs_string_literal_undefined(ctx, result);
+ break;
+ case NULL_TYPE:
+ __qmljs_string_literal_object(ctx, result);
+ break;
+ case BOOLEAN_TYPE:
+ __qmljs_string_literal_boolean(ctx, result);
+ break;
+ case NUMBER_TYPE:
+ __qmljs_string_literal_number(ctx, result);
+ break;
+ case STRING_TYPE:
+ __qmljs_string_literal_string(ctx, result);
+ break;
+ case OBJECT_TYPE:
+ if (__qmljs_is_callable(ctx, value))
+ __qmljs_string_literal_function(ctx, result);
+ else
+ __qmljs_string_literal_object(ctx, result); // ### implementation-defined
+ break;
+ }
+}
+
+inline void __qmljs_uplus(Context *ctx, Value *result, const Value *value)
+{
+ double n = __qmljs_to_number(ctx, value);
+ __qmljs_init_number(ctx, result, n);
+}
+
+inline void __qmljs_uminus(Context *ctx, Value *result, const Value *value)
+{
+ double n = __qmljs_to_number(ctx, value);
+ __qmljs_init_number(ctx, result, -n);
+}
+
+inline void __qmljs_compl(Context *ctx, Value *result, const Value *value)
+{
+ int n = __qmljs_to_int32(ctx, value);
+ __qmljs_init_number(ctx, result, ~n);
+}
+
+inline void __qmljs_not(Context *ctx, Value *result, const Value *value)
+{
+ bool b = __qmljs_to_boolean(ctx, value);
+ __qmljs_init_number(ctx, result, !b);
+}
+
+// binary operators
+inline void __qmljs_bit_or(Context *ctx, Value *result, const Value *left, const Value *right)
+{
+ int lval = __qmljs_to_int32(ctx, left);
+ int rval = __qmljs_to_int32(ctx, right);
+ __qmljs_init_number(ctx, result, lval | rval);
+}
+
+inline void __qmljs_bit_xor(Context *ctx, Value *result, const Value *left, const Value *right)
+{
+ int lval = __qmljs_to_int32(ctx, left);
+ int rval = __qmljs_to_int32(ctx, right);
+ __qmljs_init_number(ctx, result, lval ^ rval);
+}
+
+inline void __qmljs_bit_and(Context *ctx, Value *result, const Value *left, const Value *right)
+{
+ int lval = __qmljs_to_int32(ctx, left);
+ int rval = __qmljs_to_int32(ctx, right);
+ __qmljs_init_number(ctx, result, lval & rval);
+}
+
+inline void __qmljs_add(Context *ctx, Value *result, const Value *left, const Value *right)
+{
+ Value pleft, pright;
+ __qmljs_to_primitive(ctx, &pleft, left, PREFERREDTYPE_HINT);
+ __qmljs_to_primitive(ctx, &pright, right, PREFERREDTYPE_HINT);
+ if (pleft.type == STRING_TYPE || pright.type == STRING_TYPE) {
+ if (pleft.type != STRING_TYPE)
+ __qmljs_to_string(ctx, &pleft, &pleft);
+ if (pright.type != STRING_TYPE)
+ __qmljs_to_string(ctx, &pright, &pright);
+ String *string = __qmljs_string_concat(ctx, pleft.stringValue, pright.stringValue);
+ __qmljs_init_string(ctx, result, string);
+ } else {
+ double x = __qmljs_to_number(ctx, &pleft);
+ double y = __qmljs_to_number(ctx, &pright);
+ __qmljs_init_number(ctx, result, x + y);
+ }
+}
+
+inline void __qmljs_sub(Context *ctx, Value *result, const Value *left, const Value *right)
+{
+ double lval = __qmljs_to_number(ctx, left);
+ double rval = __qmljs_to_number(ctx, right);
+ __qmljs_init_number(ctx, result, lval - rval);
+}
+
+inline void __qmljs_mul(Context *ctx, Value *result, const Value *left, const Value *right)
+{
+ double lval = __qmljs_to_number(ctx, left);
+ double rval = __qmljs_to_number(ctx, right);
+ __qmljs_init_number(ctx, result, lval * rval);
+}
+
+inline void __qmljs_div(Context *ctx, Value *result, const Value *left, const Value *right)
+{
+ double lval = __qmljs_to_number(ctx, left);
+ double rval = __qmljs_to_number(ctx, right);
+ __qmljs_init_number(ctx, result, lval * rval);
+}
+
+inline void __qmljs_mod(Context *ctx, Value *result, const Value *left, const Value *right)
+{
+ double lval = __qmljs_to_number(ctx, left);
+ double rval = __qmljs_to_number(ctx, right);
+ __qmljs_init_number(ctx, result, fmod(lval, rval));
+}
+
+inline void __qmljs_shl(Context *ctx, Value *result, const Value *left, const Value *right)
+{
+ int lval = __qmljs_to_int32(ctx, left);
+ unsigned rval = __qmljs_to_uint32(ctx, right);
+ __qmljs_init_number(ctx, result, lval << rval);
+}
+
+inline void __qmljs_shr(Context *ctx, Value *result, const Value *left, const Value *right)
+{
+ int lval = __qmljs_to_int32(ctx, left);
+ unsigned rval = __qmljs_to_uint32(ctx, right);
+ __qmljs_init_number(ctx, result, lval >> rval);
+}
+
+inline void __qmljs_ushr(Context *ctx, Value *result, const Value *left, const Value *right)
+{
+ unsigned lval = __qmljs_to_uint32(ctx, left);
+ unsigned rval = __qmljs_to_uint32(ctx, right);
+ __qmljs_init_number(ctx, result, lval << rval);
+}
+
+inline void __qmljs_gt(Context *ctx, Value *result, const Value *left, const Value *right)
+{
+ __qmljs_compare(ctx, result, right, left, false);
+
+ if (result->type == UNDEFINED_TYPE)
+ __qmljs_init_boolean(ctx, result, false);
+}
+
+inline void __qmljs_lt(Context *ctx, Value *result, const Value *left, const Value *right)
+{
+ __qmljs_compare(ctx, result, left, right, true);
+
+ if (result->type == UNDEFINED_TYPE)
+ __qmljs_init_boolean(ctx, result,false);
+}
+
+inline void __qmljs_ge(Context *ctx, Value *result, const Value *left, const Value *right)
+{
+ __qmljs_compare(ctx, result, right, left, false);
+
+ bool r = ! (result->type == UNDEFINED_TYPE ||
+ (result->type == BOOLEAN_TYPE && result->booleanValue == true));
+
+ __qmljs_init_boolean(ctx, result, r);
+}
+
+inline void __qmljs_le(Context *ctx, Value *result, const Value *left, const Value *right)
+{
+ __qmljs_compare(ctx, result, left, right, true);
+
+ bool r = ! (result->type == UNDEFINED_TYPE ||
+ (result->type == BOOLEAN_TYPE && result->booleanValue == true));
+
+ __qmljs_init_boolean(ctx, result, r);
+}
+
+inline void __qmljs_eq(Context *ctx, Value *result, const Value *left, const Value *right)
+{
+ bool r = __qmljs_equal(ctx, left, right);
+ __qmljs_init_boolean(ctx, result, r);
+}
+
+inline void __qmljs_ne(Context *ctx, Value *result, const Value *left, const Value *right)
+{
+ bool r = ! __qmljs_equal(ctx, left, right);
+ __qmljs_init_boolean(ctx, result, r);
+}
+
+inline void __qmljs_se(Context *ctx, Value *result, const Value *left, const Value *right)
+{
+ bool r = __qmljs_strict_equal(ctx, left, right);
+ __qmljs_init_boolean(ctx, result, r);
+}
+
+inline void __qmljs_sne(Context *ctx, Value *result, const Value *left, const Value *right)
+{
+ bool r = ! __qmljs_strict_equal(ctx, left, right);
+ __qmljs_init_boolean(ctx, result, r);
+}
+
+inline bool __qmljs_strict_equal(Context *ctx, const Value *x, const Value *y)
+{
+ if (x->type != y->type)
+ return false;
+
+ switch ((ValueType) x->type) {
+ case UNDEFINED_TYPE:
+ case NULL_TYPE:
+ return true;
+ case BOOLEAN_TYPE:
+ return x->booleanValue == y->booleanValue;
+ case NUMBER_TYPE:
+ return x->numberValue == y->numberValue;
+ case STRING_TYPE:
+ return __qmljs_string_equal(ctx, x->stringValue, y->stringValue);
+ case OBJECT_TYPE:
+ return x->objectValue == y->objectValue;
+ }
+ Q_ASSERT(!"unreachable");
+ return false;
+}
+
+} // extern "C"
+
+#endif // QMLJS_RUNTIME_H
--- /dev/null
+#include "qv4codegen_p.h"
+#include "qv4isel_p.h"
+
+#include <QtCore/QStringList>
+#include <QtCore/QSet>
+#include <QtCore/QBuffer>
+#include <private/qqmljsast_p.h>
+#include <typeinfo>
+#include <iostream>
+
+using namespace QQmlJS;
+using namespace AST;
+
+namespace {
+QTextStream qout(stdout, QIODevice::WriteOnly);
+
+class IntSet
+{
+ IntSet(const IntSet &other);
+ IntSet &operator = (const IntSet &other);
+
+public:
+ unsigned *info;
+ unsigned *check;
+ unsigned count;
+ unsigned size;
+
+ IntSet(unsigned size = 32)
+ : info(new unsigned[size])
+ , check(new unsigned[size])
+ , count(0)
+ , size(size)
+ {
+ }
+
+ ~IntSet() {
+ delete[] info;
+ delete[] check;
+ }
+
+ typedef const unsigned *const_iterator;
+ typedef const_iterator iterator;
+ const_iterator begin() const { return info; }
+ const_iterator end() const { return info + count; }
+
+ bool empty() const { return count == 0; }
+ void clear() { count = 0; }
+
+ inline void insert(unsigned value) {
+ if (! contains(value)) {
+ info[count] = value;
+ check[value] = count;
+ ++count;
+ }
+ }
+
+ inline void remove(unsigned value) {
+ const unsigned index = check[value];
+ if (index < count && info[index] == value) {
+ if (--count) {
+ const int v = info[count];
+ check[v] = index;
+ info[index] = v;
+ }
+ }
+ }
+
+ inline bool contains(unsigned value) const {
+ const unsigned index = check[value];
+ return index < count && info[index] == value;
+ }
+
+ template <typename _It>
+ void insert(_It first, _It last) {
+ for (; first != last; ++first)
+ insert(*first);
+ }
+
+ template <typename _It>
+ void remove(_It first, _It last) {
+ for (; first != last; ++first)
+ remove(*first);
+ }
+
+ void insert(const IntSet &other) {
+ insert(other.begin(), other.end());
+ }
+
+ bool isEqualTo(const unsigned *it, unsigned length) const {
+ if (count == length) {
+ for (unsigned i = 0; i < length; ++i) {
+ if (! contains(it[i]))
+ return false;
+ }
+ return true;
+ }
+ return false;
+ }
+
+ bool operator == (const IntSet &other) const {
+ if (count == other.count) {
+ for (unsigned i = 0; i < count; ++i) {
+ if (! other.contains(info[i]))
+ return false;
+ }
+ return true;
+ }
+ return false;
+ }
+
+ bool operator != (const IntSet &other) const {
+ return ! operator == (other);
+ }
+};
+
+void edge(IR::BasicBlock *source, IR::BasicBlock *target)
+{
+ if (! source->out.contains(target))
+ source->out.append(target);
+
+ if (! target->in.contains(source))
+ target->in.append(source);
+}
+
+void dfs(IR::BasicBlock *block,
+ QSet<IR::BasicBlock *> *V,
+ QVector<IR::BasicBlock *> *blocks)
+{
+ if (! V->contains(block)) {
+ V->insert(block);
+
+ foreach (IR::BasicBlock *succ, block->out)
+ dfs(succ, V, blocks);
+
+ blocks->append(block);
+ }
+}
+
+struct ComputeUseDef: IR::StmtVisitor, IR::ExprVisitor
+{
+ IR::Function *_function;
+ IR::Stmt *_stmt;
+
+ ComputeUseDef(IR::Function *function)
+ : _function(function)
+ , _stmt(0) {}
+
+ void operator()(IR::Stmt *s) {
+ qSwap(_stmt, s);
+ _stmt->accept(this);
+ qSwap(_stmt, s);
+ }
+
+ virtual void visitConst(IR::Const *) {}
+ virtual void visitString(IR::String *) {}
+ virtual void visitName(IR::Name *) {}
+ virtual void visitClosure(IR::Closure *) {}
+ virtual void visitUnop(IR::Unop *e) { e->expr->accept(this); }
+ virtual void visitBinop(IR::Binop *e) { e->left->accept(this); e->right->accept(this); }
+ virtual void visitSubscript(IR::Subscript *e) { e->base->accept(this); e->index->accept(this); }
+ virtual void visitMember(IR::Member *e) { e->base->accept(this); }
+ virtual void visitExp(IR::Exp *s) { s->expr->accept(this); }
+ virtual void visitEnter(IR::Enter *) {}
+ virtual void visitLeave(IR::Leave *) {}
+ virtual void visitJump(IR::Jump *) {}
+ virtual void visitCJump(IR::CJump *s) { s->cond->accept(this); }
+ virtual void visitRet(IR::Ret *s) { s->expr->accept(this); }
+
+ virtual void visitTemp(IR::Temp *e) {
+ if (! _stmt->uses.contains(e->index))
+ _stmt->uses.append(e->index);
+ }
+
+ virtual void visitCall(IR::Call *e) {
+ e->base->accept(this);
+ for (IR::ExprList *it = e->args; it; it = it->next)
+ it->expr->accept(this);
+ }
+
+ virtual void visitNew(IR::New *e) {
+ e->base->accept(this);
+ for (IR::ExprList *it = e->args; it; it = it->next)
+ it->expr->accept(this);
+ }
+
+ virtual void visitMove(IR::Move *s) {
+ if (IR::Temp *t = s->target->asTemp()) {
+ if (! _stmt->defs.contains(t->index))
+ _stmt->defs.append(t->index);
+ } else {
+ s->target->accept(this);
+ }
+ s->source->accept(this);
+ }
+};
+
+struct RegAlloc: IR::StmtVisitor, IR::ExprVisitor
+{
+ QHash<unsigned, unsigned> RD;
+ QHash<unsigned, unsigned> RA;
+ QList<unsigned> _freeRegisters;
+ QSet<IR::Temp *> _processed;
+ QSet<unsigned> _kill;
+ IR::Stmt *_stmt;
+ unsigned _registerCount;
+
+ RegAlloc(): _stmt(0), _registerCount(1) {}
+
+ void operator()(IR::Stmt *s) {
+ _kill.clear();
+ qSwap(_stmt, s);
+ _stmt->accept(this);
+ qSwap(_stmt, s);
+ foreach (unsigned r, _kill)
+ freeRegister(r);
+ }
+
+ unsigned newReg()
+ {
+ if (! _freeRegisters.isEmpty())
+ return _freeRegisters.takeLast();
+
+ return _registerCount++;
+ }
+
+ void freeRegister(unsigned virtualRegister)
+ {
+ unsigned physRegister = RA.value(virtualRegister);
+ RA.remove(virtualRegister);
+ RD.remove(physRegister);
+
+ if (! _freeRegisters.contains(physRegister))
+ _freeRegisters.append(physRegister);
+ }
+
+ unsigned getReg(IR::Temp *t)
+ {
+ const int virtualRegister = t->index;
+
+ if (unsigned reg = RA.value(virtualRegister)) {
+ return reg;
+ }
+
+ const unsigned reg = newReg();
+ RA[virtualRegister] = reg;
+ RD[reg] = virtualRegister;
+ return reg;
+ }
+
+ virtual void visitConst(IR::Const *) {}
+ virtual void visitString(IR::String *) {}
+ virtual void visitName(IR::Name *) {}
+ virtual void visitClosure(IR::Closure *) {}
+
+ virtual void visitTemp(IR::Temp *e) {
+ const unsigned virtualRegister = e->index;
+ e->index = getReg(e);
+ if (! _stmt->liveOut.contains(virtualRegister))
+ _kill.insert(virtualRegister);
+ }
+
+ virtual void visitUnop(IR::Unop *e) { e->expr->accept(this); }
+ virtual void visitBinop(IR::Binop *e) { e->left->accept(this); e->right->accept(this); }
+ virtual void visitCall(IR::Call *e) {
+ e->base->accept(this);
+ for (IR::ExprList *it = e->args; it; it = it->next)
+ it->expr->accept(this);
+ }
+
+ virtual void visitNew(IR::New *e) {
+ e->base->accept(this);
+ for (IR::ExprList *it = e->args; it; it = it->next)
+ it->expr->accept(this);
+ }
+
+ virtual void visitSubscript(IR::Subscript *e) { e->base->accept(this); e->index->accept(this); }
+ virtual void visitMember(IR::Member *e) { e->base->accept(this); }
+
+ virtual void visitExp(IR::Exp *s) { s->expr->accept(this); }
+ virtual void visitEnter(IR::Enter *) {}
+ virtual void visitLeave(IR::Leave *) {}
+
+ virtual void visitMove(IR::Move *s) {
+ s->source->accept(this);
+ s->target->accept(this);
+ }
+
+ virtual void visitJump(IR::Jump *) {}
+ virtual void visitCJump(IR::CJump *s) { s->cond->accept(this); }
+ virtual void visitRet(IR::Ret *s) { s->expr->accept(this); }
+};
+
+void liveness(IR::Function *function)
+{
+ QSet<IR::BasicBlock *> V;
+ QVector<IR::BasicBlock *> blocks;
+
+ //
+ // compute the CFG
+ //
+ foreach (IR::BasicBlock *block, function->basicBlocks) {
+ if (IR::Stmt *term = block->terminator()) {
+ if (IR::Jump *j = term->asJump())
+ edge(block, j->target);
+ else if (IR::CJump *cj = term->asCJump()) {
+ edge(block, cj->iftrue);
+ edge(block, cj->iffalse);
+ }
+ }
+ }
+
+ ComputeUseDef computeUseDef(function);
+ foreach (IR::BasicBlock *block, function->basicBlocks) {
+ foreach (IR::Stmt *s, block->statements)
+ computeUseDef(s);
+ }
+
+ dfs(function->basicBlocks.first(), &V, &blocks);
+
+ IntSet liveOut(function->tempCount);
+ bool changed;
+
+ do {
+ changed = false;
+
+ foreach (IR::BasicBlock *block, blocks) {
+ liveOut.clear();
+
+ foreach (IR::BasicBlock *succ, block->out)
+ liveOut.insert(succ->liveIn.begin(), succ->liveIn.end());
+
+ if (block->out.isEmpty() || liveOut.empty() || ! liveOut.isEqualTo(block->liveOut.constData(), block->liveOut.size())) {
+ block->liveOut.resize(liveOut.count);
+ qCopy(liveOut.begin(), liveOut.end(), block->liveOut.begin());
+
+ for (int i = block->statements.size() - 1; i != -1; --i) {
+ IR::Stmt *stmt = block->statements.at(i);
+ stmt->liveOut.resize(liveOut.count);
+ qCopy(liveOut.begin(), liveOut.end(), stmt->liveOut.begin());
+ liveOut.remove(stmt->defs.begin(), stmt->defs.end());
+ liveOut.insert(stmt->uses.begin(), stmt->uses.end());
+ stmt->liveIn.resize(liveOut.count);
+ qCopy(liveOut.begin(), liveOut.end(), stmt->liveIn.begin());
+ }
+
+ if (! changed && ! liveOut.isEqualTo(block->liveIn.constData(), block->liveIn.size()))
+ changed = true;
+
+ block->liveIn.resize(liveOut.count);
+ qCopy(liveOut.begin(), liveOut.end(), block->liveIn.begin());
+ }
+ }
+ } while (changed);
+}
+
+struct FindLocals: Visitor
+{
+ using Visitor::visit;
+
+ QList<QStringRef> locals;
+
+ virtual bool visit(VariableDeclaration *ast)
+ {
+ if (! locals.contains(ast->name))
+ locals.append(ast->name);
+ return true;
+ }
+
+ virtual bool visit(FunctionExpression *ast)
+ {
+ if (! locals.contains(ast->name))
+ locals.append(ast->name);
+ return false;
+ }
+
+ virtual bool visit(FunctionDeclaration *ast)
+ {
+ if (! locals.contains(ast->name))
+ locals.append(ast->name);
+ return false;
+ }
+};
+
+} // end of anonymous namespace
+
+Codegen::Codegen()
+ : _function(0)
+ , _block(0)
+ , _exitBlock(0)
+ , _returnAddress(0)
+{
+}
+
+void Codegen::operator()(AST::Program *node)
+{
+ IR::Module module;
+ _module = &module;
+
+ IR::Function *globalCode = _module->newFunction(QLatin1String("%entry"));
+ _function = globalCode;
+ _block = _function->newBasicBlock();
+ _exitBlock = _function->newBasicBlock();
+ _returnAddress = _block->newTemp();
+ _exitBlock->RET(_exitBlock->TEMP(_returnAddress), IR::UndefinedType);
+
+ program(node);
+
+ if (! _block->isTerminated()) {
+ _block->JUMP(_exitBlock);
+ }
+
+ InstructionSelection isel(_module);
+
+ foreach (IR::Function *function, _module->functions) {
+ linearize(function);
+ isel.visitFunction(function);
+ }
+}
+
+IR::Expr *Codegen::member(IR::Expr *base, const QString *name)
+{
+ if (base->asTemp() || base->asName())
+ return _block->MEMBER(base, name);
+ else {
+ const unsigned t = _block->newTemp();
+ _block->MOVE(_block->TEMP(t), base);
+ return _block->MEMBER(_block->TEMP(t), name);
+ }
+}
+
+IR::Expr *Codegen::subscript(IR::Expr *base, IR::Expr *index)
+{
+ if (base->asTemp() || base->asName())
+ return _block->SUBSCRIPT(base, index);
+ else {
+ const unsigned t = _block->newTemp();
+ _block->MOVE(_block->TEMP(t), base);
+ return _block->SUBSCRIPT(_block->TEMP(t), index);
+ }
+}
+
+IR::Expr *Codegen::argument(IR::Expr *expr)
+{
+ if (expr && ! expr->asTemp()) {
+ const unsigned t = _block->newTemp();
+ _block->MOVE(_block->TEMP(t), expr);
+ expr = _block->TEMP(t);
+ }
+ return expr;
+}
+
+IR::Expr *Codegen::binop(IR::AluOp op, IR::Expr *left, IR::Expr *right)
+{
+ if (left && ! left->asTemp()) {
+ const unsigned t = _block->newTemp();
+ _block->MOVE(_block->TEMP(t), left);
+ left = _block->TEMP(t);
+ }
+
+ if (right && ! right->asTemp()) {
+ const unsigned t = _block->newTemp();
+ _block->MOVE(_block->TEMP(t), right);
+ right = _block->TEMP(t);
+ }
+
+ return _block->BINOP(op, left, right);
+}
+
+void Codegen::accept(Node *node)
+{
+ if (node)
+ node->accept(this);
+}
+
+void Codegen::statement(Statement *ast)
+{
+ accept(ast);
+}
+
+void Codegen::statement(ExpressionNode *ast)
+{
+ if (ast) {
+ Result r(nx);
+ qSwap(_expr, r);
+ accept(ast);
+ qSwap(_expr, r);
+ if (r.format == ex) {
+ _block->EXP(*r);
+ }
+ }
+}
+
+void Codegen::condition(ExpressionNode *ast, IR::BasicBlock *iftrue, IR::BasicBlock *iffalse)
+{
+ if (ast) {
+ Result r(iftrue, iffalse);
+ qSwap(_expr, r);
+ accept(ast);
+ qSwap(_expr, r);
+ if (r.format == ex) {
+ _block->CJUMP(*r, r.iftrue, r.iffalse);
+ }
+ }
+}
+
+Codegen::Result Codegen::expression(ExpressionNode *ast)
+{
+ Result r;
+ if (ast) {
+ qSwap(_expr, r);
+ accept(ast);
+ qSwap(_expr, r);
+ }
+ return r;
+}
+
+QString Codegen::propertyName(PropertyName *ast)
+{
+ QString p;
+ if (ast) {
+ qSwap(_property, p);
+ accept(ast);
+ qSwap(_property, p);
+ }
+ return p;
+}
+
+Codegen::Result Codegen::sourceElement(SourceElement *ast)
+{
+ Result r(nx);
+ if (ast) {
+ qSwap(_expr, r);
+ accept(ast);
+ qSwap(_expr, r);
+ }
+ return r;
+}
+
+Codegen::UiMember Codegen::uiObjectMember(UiObjectMember *ast)
+{
+ UiMember m;
+ if (ast) {
+ qSwap(_uiMember, m);
+ accept(ast);
+ qSwap(_uiMember, m);
+ }
+ return m;
+}
+
+void Codegen::argumentList(ArgumentList *)
+{
+ Q_ASSERT(!"not implemented");
+}
+
+void Codegen::caseBlock(CaseBlock *)
+{
+ Q_ASSERT(!"not implemented");
+}
+
+void Codegen::caseClause(CaseClause *)
+{
+ Q_ASSERT(!"not implemented");
+}
+
+void Codegen::caseClauses(CaseClauses *)
+{
+ Q_ASSERT(!"not implemented");
+}
+
+void Codegen::catchNode(Catch *)
+{
+ Q_ASSERT(!"not implemented");
+}
+
+void Codegen::defaultClause(DefaultClause *)
+{
+ Q_ASSERT(!"not implemented");
+}
+
+void Codegen::elementList(ElementList *)
+{
+ Q_ASSERT(!"not implemented");
+}
+
+void Codegen::elision(Elision *)
+{
+ Q_ASSERT(!"not implemented");
+}
+
+void Codegen::finallyNode(Finally *)
+{
+ Q_ASSERT(!"not implemented");
+}
+
+void Codegen::formalParameterList(FormalParameterList *)
+{
+ Q_ASSERT(!"not implemented");
+}
+
+void Codegen::functionBody(FunctionBody *ast)
+{
+ if (ast)
+ sourceElements(ast->elements);
+}
+
+void Codegen::program(Program *ast)
+{
+ if (ast)
+ sourceElements(ast->elements);
+}
+
+void Codegen::propertyNameAndValueList(PropertyNameAndValueList *)
+{
+ Q_ASSERT(!"not implemented");
+}
+
+void Codegen::sourceElements(SourceElements *ast)
+{
+ for (SourceElements *it = ast; it; it = it->next) {
+ sourceElement(it->element);
+ }
+}
+
+void Codegen::statementList(StatementList *)
+{
+ Q_ASSERT(!"not implemented");
+}
+
+void Codegen::uiArrayMemberList(UiArrayMemberList *)
+{
+ Q_ASSERT(!"not implemented");
+}
+
+void Codegen::uiImport(UiImport *)
+{
+ Q_ASSERT(!"not implemented");
+}
+
+void Codegen::uiImportList(UiImportList *)
+{
+ Q_ASSERT(!"not implemented");
+}
+
+void Codegen::uiObjectInitializer(UiObjectInitializer *)
+{
+ Q_ASSERT(!"not implemented");
+}
+
+void Codegen::uiObjectMemberList(UiObjectMemberList *)
+{
+ Q_ASSERT(!"not implemented");
+}
+
+void Codegen::uiParameterList(UiParameterList *)
+{
+ Q_ASSERT(!"not implemented");
+}
+
+void Codegen::uiProgram(UiProgram *)
+{
+ Q_ASSERT(!"not implemented");
+}
+
+void Codegen::uiQualifiedId(UiQualifiedId *)
+{
+ Q_ASSERT(!"not implemented");
+}
+
+void Codegen::variableDeclaration(VariableDeclaration *ast)
+{
+ if (ast->expression) {
+ Result expr = expression(ast->expression);
+
+ if (expr.code)
+ _block->MOVE(_block->NAME(ast->name.toString(), ast->identifierToken.startLine, ast->identifierToken.startColumn), *expr);
+ }
+}
+
+void Codegen::variableDeclarationList(VariableDeclarationList *ast)
+{
+ for (VariableDeclarationList *it = ast; it; it = it->next) {
+ variableDeclaration(it->declaration);
+ }
+}
+
+
+bool Codegen::visit(ArgumentList *)
+{
+ Q_ASSERT(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(CaseBlock *)
+{
+ Q_ASSERT(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(CaseClause *)
+{
+ Q_ASSERT(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(CaseClauses *)
+{
+ Q_ASSERT(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(Catch *)
+{
+ Q_ASSERT(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(DefaultClause *)
+{
+ Q_ASSERT(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(ElementList *)
+{
+ Q_ASSERT(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(Elision *)
+{
+ Q_ASSERT(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(Finally *)
+{
+ Q_ASSERT(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(FormalParameterList *)
+{
+ Q_ASSERT(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(FunctionBody *)
+{
+ Q_ASSERT(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(Program *)
+{
+ Q_ASSERT(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(PropertyNameAndValueList *)
+{
+ Q_ASSERT(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(SourceElements *)
+{
+ Q_ASSERT(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(StatementList *)
+{
+ Q_ASSERT(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(UiArrayMemberList *)
+{
+ Q_ASSERT(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(UiImport *)
+{
+ Q_ASSERT(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(UiImportList *)
+{
+ Q_ASSERT(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(UiObjectInitializer *)
+{
+ Q_ASSERT(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(UiObjectMemberList *)
+{
+ Q_ASSERT(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(UiParameterList *)
+{
+ Q_ASSERT(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(UiProgram *)
+{
+ Q_ASSERT(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(UiQualifiedId *)
+{
+ Q_ASSERT(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(VariableDeclaration *)
+{
+ Q_ASSERT(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(VariableDeclarationList *)
+{
+ Q_ASSERT(!"unreachable");
+ return false;
+}
+
+bool Codegen::visit(Expression *ast)
+{
+ statement(ast->left);
+ accept(ast->right);
+ return false;
+}
+
+bool Codegen::visit(ArrayLiteral *ast)
+{
+ const unsigned t = _block->newTemp();
+ _block->MOVE(_block->TEMP(t), _block->NEW(_block->NAME(QLatin1String("Array"), ast->firstSourceLocation().startLine, ast->firstSourceLocation().startColumn)));
+ int index = 0;
+ for (ElementList *it = ast->elements; it; it = it->next) {
+ for (Elision *elision = it->elision; elision; elision = elision->next)
+ ++index;
+ Result expr = expression(it->expression);
+ _block->MOVE(subscript(_block->TEMP(t), _block->CONST(IR::NumberType, index)), *expr);
+ ++index;
+ }
+ for (Elision *elision = ast->elision; elision; elision = elision->next)
+ ++index; // ### set the size of the array
+ _expr.code = _block->TEMP(t);
+ return false;
+}
+
+bool Codegen::visit(ArrayMemberExpression *ast)
+{
+ Result base = expression(ast->base);
+ Result index = expression(ast->expression);
+ _expr.code = subscript(*base, *index);
+ return false;
+}
+
+static IR::AluOp baseOp(int op)
+{
+ switch ((QSOperator::Op) op) {
+ case QSOperator::InplaceAnd: return IR::OpAnd;
+ case QSOperator::InplaceSub: return IR::OpSub;
+ case QSOperator::InplaceDiv: return IR::OpDiv;
+ case QSOperator::InplaceAdd: return IR::OpAdd;
+ case QSOperator::InplaceLeftShift: return IR::OpLShift;
+ case QSOperator::InplaceMod: return IR::OpMod;
+ case QSOperator::InplaceMul: return IR::OpMul;
+ case QSOperator::InplaceOr: return IR::OpBitOr;
+ case QSOperator::InplaceRightShift: return IR::OpRShift;
+ case QSOperator::InplaceURightShift: return IR::OpURShift;
+ case QSOperator::InplaceXor: return IR::OpBitXor;
+ default: return IR::OpInvalid;
+ }
+}
+
+bool Codegen::visit(BinaryExpression *ast)
+{
+ if (ast->op == QSOperator::And) {
+ if (_expr.accept(cx)) {
+ IR::BasicBlock *iftrue = _function->newBasicBlock();
+ condition(ast->left, iftrue, _expr.iffalse);
+ _block = iftrue;
+ condition(ast->right, _expr.iftrue, _expr.iffalse);
+ } else {
+ IR::BasicBlock *iftrue = _function->newBasicBlock();
+ IR::BasicBlock *iffalse = _function->newBasicBlock();
+ IR::BasicBlock *endif = _function->newBasicBlock();
+
+ const unsigned r = _block->newTemp();
+
+ condition(ast->left, iftrue, iffalse);
+ _block = iffalse;
+
+ _block->MOVE(_block->TEMP(r), _block->CONST(IR::BoolType, 0));
+ _block->JUMP(endif);
+
+ _block = iftrue;
+ _block->MOVE(_block->TEMP(r), *expression(ast->right));
+ if (! _block->isTerminated())
+ _block->JUMP(endif);
+
+ _expr.code = _block->TEMP(r);
+ _block = endif;
+ }
+ return false;
+ } else if (ast->op == QSOperator::Or) {
+ if (_expr.accept(cx)) {
+ IR::BasicBlock *iffalse = _function->newBasicBlock();
+ condition(ast->left, _expr.iftrue, iffalse);
+ _block = iffalse;
+ condition(ast->right, _expr.iftrue, _expr.iffalse);
+ } else {
+ IR::BasicBlock *iffalse = _function->newBasicBlock();
+ IR::BasicBlock *endif = _function->newBasicBlock();
+
+ const unsigned r = _block->newTemp();
+ _block->MOVE(_block->TEMP(r), *expression(ast->left));
+ _block->CJUMP(_block->TEMP(r), endif, iffalse);
+ _block = iffalse;
+ _block->MOVE(_block->TEMP(r), *expression(ast->right));
+ if (! _block->isTerminated())
+ _block->JUMP(endif);
+
+ _block = endif;
+ _expr.code = _block->TEMP(r);
+ }
+ return false;
+ }
+
+ Result left = expression(ast->left);
+ Result right = expression(ast->right);
+
+ switch (ast->op) {
+ case QSOperator::Or:
+ case QSOperator::And:
+ break;
+
+ case QSOperator::Assign:
+ if (_expr.accept(nx)) {
+ _block->MOVE(*left, *right);
+ } else {
+ const unsigned t = _block->newTemp();
+ _block->MOVE(_block->TEMP(t), *right);
+ _block->MOVE(*left, _block->TEMP(t));
+ _expr.code = _block->TEMP(t);
+ }
+ break;
+
+ case QSOperator::InplaceAnd:
+ case QSOperator::InplaceSub:
+ case QSOperator::InplaceDiv:
+ case QSOperator::InplaceAdd:
+ case QSOperator::InplaceLeftShift:
+ case QSOperator::InplaceMod:
+ case QSOperator::InplaceMul:
+ case QSOperator::InplaceOr:
+ case QSOperator::InplaceRightShift:
+ case QSOperator::InplaceURightShift:
+ case QSOperator::InplaceXor: {
+ _block->MOVE(*left, *right, baseOp(ast->op));
+ if (_expr.accept(nx)) {
+ // nothing to do
+ } else {
+ _expr.code = *left;
+ }
+ break;
+ }
+
+ case QSOperator::In:
+ case QSOperator::InstanceOf:
+ case QSOperator::Equal:
+ case QSOperator::NotEqual:
+ case QSOperator::Ge:
+ case QSOperator::Gt:
+ case QSOperator::Le:
+ case QSOperator::Lt:
+ case QSOperator::StrictEqual:
+ case QSOperator::StrictNotEqual:
+ if (false && _expr.accept(cx)) {
+ _block->CJUMP(binop(IR::binaryOperator(ast->op), *left, *right), _expr.iftrue, _expr.iffalse);
+ } else {
+ IR::Expr *e = binop(IR::binaryOperator(ast->op), *left, *right);
+ if (e->asConst() || e->asString())
+ _expr.code = e;
+ else {
+ const unsigned t = _block->newTemp();
+ _block->MOVE(_block->TEMP(t), e);
+ _expr.code = _block->TEMP(t);
+ }
+ }
+ break;
+
+ case QSOperator::Add:
+ case QSOperator::BitAnd:
+ case QSOperator::BitOr:
+ case QSOperator::BitXor:
+ case QSOperator::Div:
+ case QSOperator::LShift:
+ case QSOperator::Mod:
+ case QSOperator::Mul:
+ case QSOperator::RShift:
+ case QSOperator::Sub:
+ case QSOperator::URShift: {
+ IR::Expr *e = binop(IR::binaryOperator(ast->op), *left, *right);
+ if (e->asConst() || e->asString())
+ _expr.code = e;
+ else {
+ const unsigned t = _block->newTemp();
+ _block->MOVE(_block->TEMP(t), e);
+ _expr.code = _block->TEMP(t);
+ }
+ break;
+ }
+
+ } // switch
+
+ return false;
+}
+
+bool Codegen::visit(CallExpression *ast)
+{
+ Result base = expression(ast->base);
+ IR::ExprList *args = 0, **args_it = &args;
+ for (ArgumentList *it = ast->arguments; it; it = it->next) {
+ Result arg = expression(it->expression);
+ IR::Expr *actual = argument(*arg);
+ *args_it = _function->New<IR::ExprList>();
+ (*args_it)->init(actual);
+ args_it = &(*args_it)->next;
+ }
+ _expr.code = _block->CALL(*base, args);
+ return false;
+}
+
+bool Codegen::visit(ConditionalExpression *ast)
+{
+ IR::BasicBlock *iftrue = _function->newBasicBlock();
+ IR::BasicBlock *iffalse = _function->newBasicBlock();
+ IR::BasicBlock *endif = _function->newBasicBlock();
+
+ const unsigned t = _block->newTemp();
+
+ condition(ast->expression, iftrue, iffalse);
+
+ _block = iftrue;
+ _block->MOVE(_block->TEMP(t), *expression(ast->ok));
+ _block->JUMP(endif);
+
+ _block = iffalse;
+ _block->MOVE(_block->TEMP(t), *expression(ast->ko));
+ _block->JUMP(endif);
+
+ _block = endif;
+
+ _expr.code = _block->TEMP(t);
+
+ return false;
+}
+
+bool Codegen::visit(DeleteExpression *)
+{
+ Q_ASSERT(!"not implemented");
+ return false;
+}
+
+bool Codegen::visit(FalseLiteral *)
+{
+ _expr.code = _block->CONST(IR::BoolType, 0);
+ return false;
+}
+
+bool Codegen::visit(FieldMemberExpression *ast)
+{
+ Result base = expression(ast->base);
+ _expr.code = member(*base, _function->newString(ast->name.toString()));
+ return false;
+}
+
+bool Codegen::visit(FunctionExpression *ast)
+{
+ defineFunction(ast, false);
+ return false;
+}
+
+bool Codegen::visit(IdentifierExpression *ast)
+{
+ _expr.code = _block->NAME(ast->name.toString(),
+ ast->identifierToken.startLine,
+ ast->identifierToken.startColumn);
+ return false;
+}
+
+bool Codegen::visit(NestedExpression *ast)
+{
+ accept(ast->expression);
+ return false;
+}
+
+bool Codegen::visit(NewExpression *ast)
+{
+ Result base = expression(ast->expression);
+ _expr.code = _block->NEW(*base, 0);
+ return false;
+}
+
+bool Codegen::visit(NewMemberExpression *ast)
+{
+ Result base = expression(ast->base);
+ IR::ExprList *args = 0, **args_it = &args;
+ for (ArgumentList *it = ast->arguments; it; it = it->next) {
+ Result arg = expression(it->expression);
+ IR::Expr *actual = argument(*arg);
+ *args_it = _function->New<IR::ExprList>();
+ (*args_it)->init(actual);
+ args_it = &(*args_it)->next;
+ }
+ _expr.code = _block->NEW(*base, args);
+ return false;
+}
+
+bool Codegen::visit(NotExpression *ast)
+{
+ Result expr = expression(ast->expression);
+ if (_expr.accept(cx)) {
+ _block->CJUMP(_block->UNOP(IR::OpNot, *expr), _expr.iftrue, _expr.iffalse);
+ } else {
+ const unsigned r = _block->newTemp();
+ _block->MOVE(_block->TEMP(r), _block->UNOP(IR::OpNot, *expr));
+ _expr.code = _block->TEMP(r);
+ }
+ return false;
+}
+
+bool Codegen::visit(NullExpression *)
+{
+ _expr.code = _block->CONST(IR::NullType, 0);
+ return false;
+}
+
+bool Codegen::visit(NumericLiteral *ast)
+{
+ _expr.code = _block->CONST(IR::NumberType, ast->value);
+ return false;
+}
+
+bool Codegen::visit(ObjectLiteral *ast)
+{
+ const unsigned t = _block->newTemp();
+ _block->MOVE(_block->TEMP(t), _block->NEW(_block->NAME(QLatin1String("Object"), ast->firstSourceLocation().startLine, ast->firstSourceLocation().startColumn)));
+ for (PropertyNameAndValueList *it = ast->properties; it; it = it->next) {
+ QString name = propertyName(it->name);
+ Result value = expression(it->value);
+ _block->MOVE(member(_block->TEMP(t), _function->newString(name)), *value);
+ }
+ _expr.code = _block->TEMP(t);
+ return false;
+}
+
+bool Codegen::visit(PostDecrementExpression *ast)
+{
+ Result expr = expression(ast->base);
+ if (_expr.accept(nx)) {
+ _block->MOVE(*expr, _block->CONST(IR::NumberType, 1), IR::OpSub);
+ } else {
+ const unsigned t = _block->newTemp();
+ _block->MOVE(_block->TEMP(t), *expr);
+ _block->MOVE(*expr, _block->CONST(IR::NumberType, 1), IR::OpSub);
+ _expr.code = _block->TEMP(t);
+ }
+ return false;
+}
+
+bool Codegen::visit(PostIncrementExpression *ast)
+{
+ Result expr = expression(ast->base);
+ if (_expr.accept(nx)) {
+ _block->MOVE(*expr, _block->CONST(IR::NumberType, 1), IR::OpAdd);
+ } else {
+ const unsigned t = _block->newTemp();
+ _block->MOVE(_block->TEMP(t), *expr);
+ _block->MOVE(*expr, _block->CONST(IR::NumberType, 1), IR::OpAdd);
+ _expr.code = _block->TEMP(t);
+ }
+ return false;
+}
+
+bool Codegen::visit(PreDecrementExpression *ast)
+{
+ Result expr = expression(ast->expression);
+ _block->MOVE(*expr, _block->CONST(IR::NumberType, 1), IR::OpSub);
+ if (_expr.accept(nx)) {
+ // nothing to do
+ } else {
+ _expr.code = *expr;
+ }
+ return false;
+}
+
+bool Codegen::visit(PreIncrementExpression *ast)
+{
+ Result expr = expression(ast->expression);
+ _block->MOVE(*expr, _block->CONST(IR::NumberType, 1), IR::OpAdd);
+
+ if (_expr.accept(nx)) {
+ // nothing to do
+ } else {
+ _expr.code = *expr;
+ }
+ return false;
+}
+
+bool Codegen::visit(RegExpLiteral *)
+{
+ Q_ASSERT(!"not implemented");
+ return false;
+}
+
+bool Codegen::visit(StringLiteral *ast)
+{
+ _expr.code = _block->STRING(_function->newString(ast->value.toString()));
+ return false;
+}
+
+bool Codegen::visit(ThisExpression *ast)
+{
+ _expr.code = _block->NAME(QLatin1String("this"), ast->thisToken.startLine, ast->thisToken.startColumn);
+ return false;
+}
+
+bool Codegen::visit(TildeExpression *ast)
+{
+ Result expr = expression(ast->expression);
+ const unsigned t = _block->newTemp();
+ _block->MOVE(_block->TEMP(t), _block->UNOP(IR::OpCompl, *expr));
+ _expr.code = _block->TEMP(t);
+ return false;
+}
+
+bool Codegen::visit(TrueLiteral *)
+{
+ _expr.code = _block->CONST(IR::BoolType, 1);
+ return false;
+}
+
+bool Codegen::visit(TypeOfExpression *ast)
+{
+ Result expr = expression(ast->expression);
+ IR::ExprList *args = _function->New<IR::ExprList>();
+ args->init(argument(*expr));
+ _expr.code = _block->CALL(_block->NAME(QLatin1String("typeof"), ast->typeofToken.startLine, ast->typeofToken.startColumn), args);
+ return false;
+}
+
+bool Codegen::visit(UnaryMinusExpression *ast)
+{
+ Result expr = expression(ast->expression);
+ const unsigned t = _block->newTemp();
+ _block->MOVE(_block->TEMP(t), _block->UNOP(IR::OpUMinus, *expr));
+ _expr.code = _block->TEMP(t);
+ return false;
+}
+
+bool Codegen::visit(UnaryPlusExpression *ast)
+{
+ Result expr = expression(ast->expression);
+ const unsigned t = _block->newTemp();
+ _block->MOVE(_block->TEMP(t), _block->UNOP(IR::OpUPlus, *expr));
+ _expr.code = _block->TEMP(t);
+ return false;
+}
+
+bool Codegen::visit(VoidExpression *ast)
+{
+ statement(ast->expression); // ### CHECK
+ return false;
+}
+
+bool Codegen::visit(FunctionDeclaration *ast)
+{
+ defineFunction(ast);
+ return false;
+}
+
+void Codegen::linearize(IR::Function *function)
+{
+ IR::BasicBlock *entryBlock = function->basicBlocks.at(0);
+ IR::BasicBlock *exitBlock = function->basicBlocks.at(1);
+
+ Q_UNUSED(entryBlock);
+
+ QSet<IR::BasicBlock *> V;
+ V.insert(exitBlock);
+
+ QVector<IR::BasicBlock *> trace;
+
+ for (int i = 0; i < function->basicBlocks.size(); ++i) {
+ IR::BasicBlock *block = function->basicBlocks.at(i);
+ if (block->statements.isEmpty()) {
+ if ((i + 1) < function->basicBlocks.size()) {
+ IR::BasicBlock *next = function->basicBlocks.at(i + 1);
+ block->JUMP(next);
+ }
+ }
+ }
+
+ foreach (IR::BasicBlock *block, function->basicBlocks) {
+ while (block) {
+ if (V.contains(block))
+ break;
+
+ V.insert(block);
+ block->index = trace.size();
+ trace.append(block);
+
+ if (IR::Stmt *term = block->terminator()) {
+ block = 0;
+
+ if (IR::Jump *j = term->asJump()) {
+ block = j->target;
+ } else if (IR::CJump *cj = term->asCJump()) {
+ if (! V.contains(cj->iffalse))
+ block = cj->iffalse;
+ else
+ block = cj->iftrue;
+ }
+ }
+ }
+ }
+ exitBlock->index = trace.size();
+ trace.append(exitBlock);
+ function->basicBlocks = trace;
+
+ liveness(function);
+
+ {
+ RegAlloc regalloc;
+ foreach (IR::BasicBlock *block, function->basicBlocks) {
+ foreach (IR::Stmt *s, block->statements)
+ regalloc(s);
+ }
+ function->tempCount = regalloc._registerCount - 1;
+ }
+
+ static bool showCode = !qgetenv("SHOW_CODE").isNull();
+ if (showCode) {
+ QVector<IR::Stmt *> code;
+ QHash<IR::Stmt *, IR::BasicBlock *> leader;
+
+ foreach (IR::BasicBlock *block, function->basicBlocks) {
+ leader.insert(block->statements.first(), block);
+ foreach (IR::Stmt *s, block->statements) {
+ code.append(s);
+ }
+ }
+
+ QString name;
+ if (function->name && !function->name->isEmpty())
+ name = *function->name;
+ else
+ name.sprintf("%p", function);
+
+ qout << "function " << name << "(";
+ for (int i = 0; i < function->formals.size(); ++i) {
+ if (i != 0)
+ qout << ", ";
+ qout << *function->formals.at(i);
+ }
+ qout << ")" << endl
+ << "{" << endl;
+
+ foreach (const QString *local, function->locals) {
+ qout << " var " << *local << ';' << endl;
+ }
+
+ for (int i = 0; i < code.size(); ++i) {
+ IR::Stmt *s = code.at(i);
+
+ if (IR::BasicBlock *bb = leader.value(s)) {
+ qout << endl;
+ qout << 'L' << bb << ':' << endl;
+ }
+ IR::Stmt *n = (i + 1) < code.size() ? code.at(i + 1) : 0;
+ if (n && s->asJump() && s->asJump()->target == leader.value(n)) {
+ continue;
+ }
+
+ QByteArray str;
+ QBuffer buf(&str);
+ buf.open(QIODevice::WriteOnly);
+ QTextStream out(&buf);
+ s->dump(out, IR::Stmt::MIR);
+ out.flush();
+
+ for (int i = 60 - str.size(); i >= 0; --i)
+ str.append(' ');
+
+ qout << " " << str;
+
+ // if (! s->uses.isEmpty()) {
+ // qout << " // uses:";
+ // foreach (unsigned use, s->uses) {
+ // qout << " %" << use;
+ // }
+ // }
+
+ // if (! s->defs.isEmpty()) {
+ // qout << " // defs:";
+ // foreach (unsigned def, s->defs) {
+ // qout << " %" << def;
+ // }
+ // }
+
+ // if (! s->liveOut.isEmpty()) {
+ // qout << " // lives:";
+ // foreach (unsigned live, s->liveOut) {
+ // qout << " %" << live;
+ // }
+ // }
+
+ qout << endl;
+
+ if (n && s->asCJump() && s->asCJump()->iffalse != leader.value(n)) {
+ qout << " goto L" << s->asCJump()->iffalse << ";" << endl;
+ }
+ }
+
+ qout << "}" << endl
+ << endl;
+ }
+}
+
+void Codegen::defineFunction(FunctionExpression *ast, bool /*isDeclaration*/)
+{
+ IR::Function *function = _module->newFunction(ast->name.toString());
+ IR::BasicBlock *entryBlock = function->newBasicBlock();
+ IR::BasicBlock *exitBlock = function->newBasicBlock();
+ unsigned returnAddress = entryBlock->newTemp();
+
+ exitBlock->RET(_block->TEMP(returnAddress), IR::InvalidType);
+
+ qSwap(_function, function);
+ qSwap(_block, entryBlock);
+ qSwap(_exitBlock, exitBlock);
+ qSwap(_returnAddress, returnAddress);
+
+ for (FormalParameterList *it = ast->formals; it; it = it->next) {
+ _function->RECEIVE(it->name.toString());
+ }
+
+ FindLocals locals;
+ if (ast->body)
+ ast->body->accept(&locals);
+
+ foreach (const QStringRef &local, locals.locals) {
+ _function->LOCAL(local.toString());
+ }
+
+ functionBody(ast->body);
+
+ if (! _block->isTerminated())
+ _block->JUMP(_exitBlock);
+
+ qSwap(_function, function);
+ qSwap(_block, entryBlock);
+ qSwap(_exitBlock, exitBlock);
+ qSwap(_returnAddress, returnAddress);
+
+ if (_expr.accept(nx)) {
+ // nothing to do
+ } else {
+ _expr.code = _block->CLOSURE(function);
+ }
+}
+
+bool Codegen::visit(IdentifierPropertyName *ast)
+{
+ _property = ast->id.toString();
+ return false;
+}
+
+bool Codegen::visit(NumericLiteralPropertyName *ast)
+{
+ _property = QString::number(ast->id, 'g', 16);
+ return false;
+}
+
+bool Codegen::visit(StringLiteralPropertyName *ast)
+{
+ _property = ast->id.toString();
+ return false;
+}
+
+bool Codegen::visit(FunctionSourceElement *ast)
+{
+ statement(ast->declaration);
+ return false;
+}
+
+bool Codegen::visit(StatementSourceElement *ast)
+{
+ statement(ast->statement);
+ return false;
+}
+
+bool Codegen::visit(Block *ast)
+{
+ for (StatementList *it = ast->statements; it; it = it->next) {
+ statement(it->statement);
+ }
+ return false;
+}
+
+bool Codegen::visit(BreakStatement *)
+{
+ _block->JUMP(_loop.breakBlock);
+ return false;
+}
+
+bool Codegen::visit(ContinueStatement *)
+{
+ _block->JUMP(_loop.continueBlock);
+ return false;
+}
+
+bool Codegen::visit(DebuggerStatement *)
+{
+ Q_ASSERT(!"not implemented");
+ return false;
+}
+
+bool Codegen::visit(DoWhileStatement *ast)
+{
+ IR::BasicBlock *loopbody = _function->newBasicBlock();
+ IR::BasicBlock *loopend = _function->newBasicBlock();
+
+ Loop loop(loopend, loopbody);
+ qSwap(_loop, loop);
+
+ if (_block->isTerminated())
+ _block->JUMP(loopbody);
+ _block = loopbody;
+ statement(ast->statement);
+ condition(ast->expression, loopbody, loopend);
+ _block = loopend;
+
+ qSwap(_loop, loop);
+ return false;
+}
+
+bool Codegen::visit(EmptyStatement *)
+{
+ return false;
+}
+
+bool Codegen::visit(ExpressionStatement *ast)
+{
+ statement(ast->expression);
+ return false;
+}
+
+bool Codegen::visit(ForEachStatement *)
+{
+ Q_ASSERT(!"not implemented");
+ return false;
+}
+
+bool Codegen::visit(ForStatement *ast)
+{
+ IR::BasicBlock *forcond = _function->newBasicBlock();
+ IR::BasicBlock *forbody = _function->newBasicBlock();
+ IR::BasicBlock *forstep = _function->newBasicBlock();
+ IR::BasicBlock *forend = _function->newBasicBlock();
+
+ Loop loop(forend, forstep);
+ qSwap(_loop, loop);
+
+ statement(ast->initialiser);
+ if (! _block->isTerminated())
+ _block->JUMP(forcond);
+
+ _block = forcond;
+ condition(ast->condition, forbody, forend);
+
+ _block = forbody;
+ statement(ast->statement);
+ if (! _block->isTerminated())
+ _block->JUMP(forstep);
+
+ _block = forstep;
+ statement(ast->expression);
+ if (! _block->isTerminated())
+ _block->JUMP(forcond);
+ _block = forend;
+
+ qSwap(_loop, loop);
+
+ return false;
+}
+
+bool Codegen::visit(IfStatement *ast)
+{
+ IR::BasicBlock *iftrue = _function->newBasicBlock();
+ IR::BasicBlock *iffalse = ast->ko ? _function->newBasicBlock() : 0;
+ IR::BasicBlock *endif = _function->newBasicBlock();
+ condition(ast->expression, iftrue, ast->ko ? iffalse : endif);
+
+ _block = iftrue;
+ statement(ast->ok);
+ if (! _block->isTerminated())
+ _block->JUMP(endif);
+
+ if (ast->ko) {
+ _block = iffalse;
+ statement(ast->ko);
+ if (! _block->isTerminated())
+ _block->JUMP(endif);
+ }
+
+ _block = endif;
+
+ return false;
+}
+
+bool Codegen::visit(LabelledStatement *)
+{
+ Q_ASSERT(!"not implemented");
+ return false;
+}
+
+bool Codegen::visit(LocalForEachStatement *)
+{
+ Q_ASSERT(!"not implemented");
+ return false;
+}
+
+bool Codegen::visit(LocalForStatement *ast)
+{
+ IR::BasicBlock *forcond = _function->newBasicBlock();
+ IR::BasicBlock *forbody = _function->newBasicBlock();
+ IR::BasicBlock *forstep = _function->newBasicBlock();
+ IR::BasicBlock *forend = _function->newBasicBlock();
+
+ Loop loop(forend, forstep);
+ qSwap(_loop, loop);
+
+ variableDeclarationList(ast->declarations);
+ if (! _block->isTerminated())
+ _block->JUMP(forcond);
+
+ _block = forcond;
+ condition(ast->condition, forbody, forend);
+
+ _block = forbody;
+ statement(ast->statement);
+ if (! _block->isTerminated())
+ _block->JUMP(forstep);
+
+ _block = forstep;
+ statement(ast->expression);
+ if (! _block->isTerminated())
+ _block->JUMP(forcond);
+ _block = forend;
+
+ qSwap(_loop, loop);
+ return false;
+}
+
+bool Codegen::visit(ReturnStatement *ast)
+{
+ if (ast->expression) {
+ Result expr = expression(ast->expression);
+ _block->MOVE(_block->TEMP(_returnAddress), *expr);
+ }
+
+ _block->JUMP(_exitBlock);
+ return false;
+}
+
+bool Codegen::visit(SwitchStatement *)
+{
+ Q_ASSERT(!"not implemented");
+ return false;
+}
+
+bool Codegen::visit(ThrowStatement *)
+{
+ //Q_ASSERT(!"not implemented");
+ _expr.code = _block->CONST(IR::UndefinedType, 0);
+ return false;
+}
+
+bool Codegen::visit(TryStatement *)
+{
+ Q_ASSERT(!"not implemented");
+ return false;
+}
+
+bool Codegen::visit(VariableStatement *ast)
+{
+ variableDeclarationList(ast->declarations);
+ return false;
+}
+
+bool Codegen::visit(WhileStatement *ast)
+{
+ IR::BasicBlock *whilecond = _function->newBasicBlock();
+ IR::BasicBlock *whilebody = _function->newBasicBlock();
+ IR::BasicBlock *whileend = _function->newBasicBlock();
+
+ Loop loop(whileend, whilecond);
+ qSwap(_loop, loop);
+
+ _block->JUMP(whilecond);
+ _block = whilecond;
+ condition(ast->expression, whilebody, whileend);
+
+ _block = whilebody;
+ statement(ast->statement);
+ if (! _block->isTerminated())
+ _block->JUMP(whilecond);
+
+ _block = whileend;
+ qSwap(_loop, loop);
+
+ return false;
+}
+
+bool Codegen::visit(WithStatement *)
+{
+ Q_ASSERT(!"not implemented");
+ return false;
+}
+
+bool Codegen::visit(UiArrayBinding *)
+{
+ Q_ASSERT(!"not implemented");
+ return false;
+}
+
+bool Codegen::visit(UiObjectBinding *)
+{
+ Q_ASSERT(!"not implemented");
+ return false;
+}
+
+bool Codegen::visit(UiObjectDefinition *)
+{
+ Q_ASSERT(!"not implemented");
+ return false;
+}
+
+bool Codegen::visit(UiPublicMember *)
+{
+ Q_ASSERT(!"not implemented");
+ return false;
+}
+
+bool Codegen::visit(UiScriptBinding *)
+{
+ Q_ASSERT(!"not implemented");
+ return false;
+}
+
+bool Codegen::visit(UiSourceElement *)
+{
+ Q_ASSERT(!"not implemented");
+ return false;
+}
--- /dev/null
+#ifndef QV4CODEGEN_P_H
+#define QV4CODEGEN_P_H
+
+#include "qv4ir_p.h"
+#include <private/qqmljsastvisitor_p.h>
+
+namespace QQmlJS {
+
+namespace AST {
+class UiParameterList;
+}
+
+class Codegen: protected AST::Visitor
+{
+public:
+ Codegen();
+
+ void operator()(AST::Program *ast);
+
+protected:
+ enum Format { ex, cx, nx };
+ struct Result {
+ IR::Expr *code;
+ IR::BasicBlock *iftrue;
+ IR::BasicBlock *iffalse;
+ Format format;
+ Format requested;
+
+ explicit Result(Format requested = ex)
+ : code(0)
+ , iftrue(0)
+ , iffalse(0)
+ , format(ex)
+ , requested(requested) {}
+
+ explicit Result(IR::BasicBlock *iftrue, IR::BasicBlock *iffalse)
+ : code(0)
+ , iftrue(iftrue)
+ , iffalse(iffalse)
+ , format(ex)
+ , requested(cx) {}
+
+ inline IR::Expr *operator*() const { Q_ASSERT(format == ex); return code; }
+ inline IR::Expr *operator->() const { Q_ASSERT(format == ex); return code; }
+
+ bool accept(Format f)
+ {
+ if (requested == f) {
+ format = f;
+ return true;
+ }
+ return false;
+ }
+ };
+
+ struct UiMember {
+ };
+
+ struct Loop {
+ IR::BasicBlock *breakBlock;
+ IR::BasicBlock *continueBlock;
+
+ Loop()
+ : breakBlock(0), continueBlock(0) {}
+
+ Loop(IR::BasicBlock *breakBlock, IR::BasicBlock *continueBlock)
+ : breakBlock(breakBlock), continueBlock(continueBlock) {}
+ };
+
+ IR::Expr *member(IR::Expr *base, const QString *name);
+ IR::Expr *subscript(IR::Expr *base, IR::Expr *index);
+ IR::Expr *argument(IR::Expr *expr);
+ IR::Expr *binop(IR::AluOp op, IR::Expr *left, IR::Expr *right);
+
+ void linearize(IR::Function *function);
+ void defineFunction(AST::FunctionExpression *ast, bool isDeclaration = false);
+
+ void statement(AST::Statement *ast);
+ void statement(AST::ExpressionNode *ast);
+ void condition(AST::ExpressionNode *ast, IR::BasicBlock *iftrue, IR::BasicBlock *iffalse);
+ Result expression(AST::ExpressionNode *ast);
+ QString propertyName(AST::PropertyName *ast);
+ Result sourceElement(AST::SourceElement *ast);
+ UiMember uiObjectMember(AST::UiObjectMember *ast);
+
+ void accept(AST::Node *node);
+
+ void argumentList(AST::ArgumentList *ast);
+ void caseBlock(AST::CaseBlock *ast);
+ void caseClause(AST::CaseClause *ast);
+ void caseClauses(AST::CaseClauses *ast);
+ void catchNode(AST::Catch *ast);
+ void defaultClause(AST::DefaultClause *ast);
+ void elementList(AST::ElementList *ast);
+ void elision(AST::Elision *ast);
+ void finallyNode(AST::Finally *ast);
+ void formalParameterList(AST::FormalParameterList *ast);
+ void functionBody(AST::FunctionBody *ast);
+ void program(AST::Program *ast);
+ void propertyNameAndValueList(AST::PropertyNameAndValueList *ast);
+ void sourceElements(AST::SourceElements *ast);
+ void statementList(AST::StatementList *ast);
+ void uiArrayMemberList(AST::UiArrayMemberList *ast);
+ void uiImport(AST::UiImport *ast);
+ void uiImportList(AST::UiImportList *ast);
+ void uiObjectInitializer(AST::UiObjectInitializer *ast);
+ void uiObjectMemberList(AST::UiObjectMemberList *ast);
+ void uiParameterList(AST::UiParameterList *ast);
+ void uiProgram(AST::UiProgram *ast);
+ void uiQualifiedId(AST::UiQualifiedId *ast);
+ void variableDeclaration(AST::VariableDeclaration *ast);
+ void variableDeclarationList(AST::VariableDeclarationList *ast);
+
+ // nodes
+ virtual bool visit(AST::ArgumentList *ast);
+ virtual bool visit(AST::CaseBlock *ast);
+ virtual bool visit(AST::CaseClause *ast);
+ virtual bool visit(AST::CaseClauses *ast);
+ virtual bool visit(AST::Catch *ast);
+ virtual bool visit(AST::DefaultClause *ast);
+ virtual bool visit(AST::ElementList *ast);
+ virtual bool visit(AST::Elision *ast);
+ virtual bool visit(AST::Finally *ast);
+ virtual bool visit(AST::FormalParameterList *ast);
+ virtual bool visit(AST::FunctionBody *ast);
+ virtual bool visit(AST::Program *ast);
+ virtual bool visit(AST::PropertyNameAndValueList *ast);
+ virtual bool visit(AST::SourceElements *ast);
+ virtual bool visit(AST::StatementList *ast);
+ virtual bool visit(AST::UiArrayMemberList *ast);
+ virtual bool visit(AST::UiImport *ast);
+ virtual bool visit(AST::UiImportList *ast);
+ virtual bool visit(AST::UiObjectInitializer *ast);
+ virtual bool visit(AST::UiObjectMemberList *ast);
+ virtual bool visit(AST::UiParameterList *ast);
+ virtual bool visit(AST::UiProgram *ast);
+ virtual bool visit(AST::UiQualifiedId *ast);
+ virtual bool visit(AST::VariableDeclaration *ast);
+ virtual bool visit(AST::VariableDeclarationList *ast);
+
+ // expressions
+ virtual bool visit(AST::Expression *ast);
+ virtual bool visit(AST::ArrayLiteral *ast);
+ virtual bool visit(AST::ArrayMemberExpression *ast);
+ virtual bool visit(AST::BinaryExpression *ast);
+ virtual bool visit(AST::CallExpression *ast);
+ virtual bool visit(AST::ConditionalExpression *ast);
+ virtual bool visit(AST::DeleteExpression *ast);
+ virtual bool visit(AST::FalseLiteral *ast);
+ virtual bool visit(AST::FieldMemberExpression *ast);
+ virtual bool visit(AST::FunctionExpression *ast);
+ virtual bool visit(AST::IdentifierExpression *ast);
+ virtual bool visit(AST::NestedExpression *ast);
+ virtual bool visit(AST::NewExpression *ast);
+ virtual bool visit(AST::NewMemberExpression *ast);
+ virtual bool visit(AST::NotExpression *ast);
+ virtual bool visit(AST::NullExpression *ast);
+ virtual bool visit(AST::NumericLiteral *ast);
+ virtual bool visit(AST::ObjectLiteral *ast);
+ virtual bool visit(AST::PostDecrementExpression *ast);
+ virtual bool visit(AST::PostIncrementExpression *ast);
+ virtual bool visit(AST::PreDecrementExpression *ast);
+ virtual bool visit(AST::PreIncrementExpression *ast);
+ virtual bool visit(AST::RegExpLiteral *ast);
+ virtual bool visit(AST::StringLiteral *ast);
+ virtual bool visit(AST::ThisExpression *ast);
+ virtual bool visit(AST::TildeExpression *ast);
+ virtual bool visit(AST::TrueLiteral *ast);
+ virtual bool visit(AST::TypeOfExpression *ast);
+ virtual bool visit(AST::UnaryMinusExpression *ast);
+ virtual bool visit(AST::UnaryPlusExpression *ast);
+ virtual bool visit(AST::VoidExpression *ast);
+ virtual bool visit(AST::FunctionDeclaration *ast);
+
+ // property names
+ virtual bool visit(AST::IdentifierPropertyName *ast);
+ virtual bool visit(AST::NumericLiteralPropertyName *ast);
+ virtual bool visit(AST::StringLiteralPropertyName *ast);
+
+ // source elements
+ virtual bool visit(AST::FunctionSourceElement *ast);
+ virtual bool visit(AST::StatementSourceElement *ast);
+
+ // statements
+ virtual bool visit(AST::Block *ast);
+ virtual bool visit(AST::BreakStatement *ast);
+ virtual bool visit(AST::ContinueStatement *ast);
+ virtual bool visit(AST::DebuggerStatement *ast);
+ virtual bool visit(AST::DoWhileStatement *ast);
+ virtual bool visit(AST::EmptyStatement *ast);
+ virtual bool visit(AST::ExpressionStatement *ast);
+ virtual bool visit(AST::ForEachStatement *ast);
+ virtual bool visit(AST::ForStatement *ast);
+ virtual bool visit(AST::IfStatement *ast);
+ virtual bool visit(AST::LabelledStatement *ast);
+ virtual bool visit(AST::LocalForEachStatement *ast);
+ virtual bool visit(AST::LocalForStatement *ast);
+ virtual bool visit(AST::ReturnStatement *ast);
+ virtual bool visit(AST::SwitchStatement *ast);
+ virtual bool visit(AST::ThrowStatement *ast);
+ virtual bool visit(AST::TryStatement *ast);
+ virtual bool visit(AST::VariableStatement *ast);
+ virtual bool visit(AST::WhileStatement *ast);
+ virtual bool visit(AST::WithStatement *ast);
+
+ // ui object members
+ virtual bool visit(AST::UiArrayBinding *ast);
+ virtual bool visit(AST::UiObjectBinding *ast);
+ virtual bool visit(AST::UiObjectDefinition *ast);
+ virtual bool visit(AST::UiPublicMember *ast);
+ virtual bool visit(AST::UiScriptBinding *ast);
+ virtual bool visit(AST::UiSourceElement *ast);
+
+private:
+ Result _expr;
+ QString _property;
+ UiMember _uiMember;
+ Loop _loop;
+ IR::Module *_module;
+ IR::Function *_function;
+ IR::BasicBlock *_block;
+ IR::BasicBlock *_exitBlock;
+ unsigned _returnAddress;
+};
+
+} // end of namespace QQmlJS
+
+#endif // QV4CODEGEN_P_H
--- /dev/null
+/****************************************************************************
+**
+** Copyright (C) 2012 Nokia Corporation and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/
+**
+** This file is part of the QtQml module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** GNU Lesser General Public License Usage
+** This file may be used under the terms of the GNU Lesser General Public
+** License version 2.1 as published by the Free Software Foundation and
+** appearing in the file LICENSE.LGPL included in the packaging of this
+** file. Please review the following information to ensure the GNU Lesser
+** General Public License version 2.1 requirements will be met:
+** http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU General
+** Public License version 3.0 as published by the Free Software Foundation
+** and appearing in the file LICENSE.GPL included in the packaging of this
+** file. Please review the following information to ensure the GNU General
+** Public License version 3.0 requirements will be met:
+** http://www.gnu.org/copyleft/gpl.html.
+**
+** Other Usage
+** Alternatively, this file may be used in accordance with the terms and
+** conditions contained in a signed written agreement between you and Nokia.
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "qv4ir_p.h"
+#include <private/qqmljsast_p.h>
+
+#include <QtCore/qtextstream.h>
+#include <QtCore/qdebug.h>
+#include <math.h>
+
+QT_BEGIN_NAMESPACE
+
+namespace QQmlJS {
+namespace IR {
+
+const char *typeName(Type t)
+{
+ switch (t) {
+ case InvalidType: return "invalid";
+ case UndefinedType: return "undefined";
+ case NullType: return "null";
+ case VoidType: return "void";
+ case StringType: return "string";
+ case UrlType: return "QUrl";
+ case ColorType: return "QColor";
+ case SGAnchorLineType: return "SGAnchorLine";
+ case AttachType: return "AttachType";
+ case ObjectType: return "object";
+ case VariantType: return "variant";
+ case VarType: return "var";
+ case BoolType: return "bool";
+ case IntType: return "int";
+ case FloatType: return "float";
+ case NumberType: return "number";
+ default: return "invalid";
+ }
+}
+
+inline bool isNumberType(IR::Type ty)
+{
+ return ty >= IR::FirstNumberType;
+}
+
+inline bool isStringType(IR::Type ty)
+{
+ return ty == IR::StringType || ty == IR::UrlType || ty == IR::ColorType;
+}
+
+IR::Type maxType(IR::Type left, IR::Type right)
+{
+ if (isStringType(left) && isStringType(right)) {
+ // String promotions (url to string) are more specific than
+ // identity conversions (AKA left == right). That's because
+ // we want to ensure we convert urls to strings in binary
+ // expressions.
+ return IR::StringType;
+ } else if (left == right)
+ return left;
+ else if (isNumberType(left) && isNumberType(right)) {
+ IR::Type ty = qMax(left, right);
+ return ty == FloatType ? NumberType : ty; // promote floats
+ } else if ((isNumberType(left) && isStringType(right)) ||
+ (isNumberType(right) && isStringType(left)))
+ return IR::StringType;
+ else
+ return IR::InvalidType;
+}
+
+bool isRealType(IR::Type type)
+{
+ return type == IR::NumberType || type == IR::FloatType;
+}
+
+const char *opname(AluOp op)
+{
+ switch (op) {
+ case OpInvalid: return "?";
+
+ case OpIfTrue: return "(bool)";
+ case OpNot: return "!";
+ case OpUMinus: return "-";
+ case OpUPlus: return "+";
+ case OpCompl: return "~";
+
+ case OpBitAnd: return "&";
+ case OpBitOr: return "|";
+ case OpBitXor: return "^";
+
+ case OpAdd: return "+";
+ case OpSub: return "-";
+ case OpMul: return "*";
+ case OpDiv: return "/";
+ case OpMod: return "%";
+
+ case OpLShift: return "<<";
+ case OpRShift: return ">>";
+ case OpURShift: return ">>>";
+
+ case OpGt: return ">";
+ case OpLt: return "<";
+ case OpGe: return ">=";
+ case OpLe: return "<=";
+ case OpEqual: return "==";
+ case OpNotEqual: return "!=";
+ case OpStrictEqual: return "===";
+ case OpStrictNotEqual: return "!==";
+
+ case OpAnd: return "&&";
+ case OpOr: return "||";
+
+ default: return "?";
+
+ } // switch
+}
+
+AluOp binaryOperator(int op)
+{
+ switch (static_cast<QSOperator::Op>(op)) {
+ case QSOperator::Add: return OpAdd;
+ case QSOperator::And: return OpAnd;
+ case QSOperator::BitAnd: return OpBitAnd;
+ case QSOperator::BitOr: return OpBitOr;
+ case QSOperator::BitXor: return OpBitXor;
+ case QSOperator::Div: return OpDiv;
+ case QSOperator::Equal: return OpEqual;
+ case QSOperator::Ge: return OpGe;
+ case QSOperator::Gt: return OpGt;
+ case QSOperator::Le: return OpLe;
+ case QSOperator::LShift: return OpLShift;
+ case QSOperator::Lt: return OpLt;
+ case QSOperator::Mod: return OpMod;
+ case QSOperator::Mul: return OpMul;
+ case QSOperator::NotEqual: return OpNotEqual;
+ case QSOperator::Or: return OpOr;
+ case QSOperator::RShift: return OpRShift;
+ case QSOperator::StrictEqual: return OpStrictEqual;
+ case QSOperator::StrictNotEqual: return OpStrictNotEqual;
+ case QSOperator::Sub: return OpSub;
+ case QSOperator::URShift: return OpURShift;
+ default: return OpInvalid;
+ }
+}
+
+void Const::dump(QTextStream &out)
+{
+ switch (type) {
+ case QQmlJS::IR::UndefinedType:
+ out << "undefined";
+ break;
+ case QQmlJS::IR::NullType:
+ out << "null";
+ break;
+ case QQmlJS::IR::VoidType:
+ out << "void";
+ break;
+ case QQmlJS::IR::BoolType:
+ out << (value ? "true" : "false");
+ break;
+ default:
+ out << QString::number(value, 'g', 16);
+ break;
+ }
+}
+
+void String::dump(QTextStream &out)
+{
+ out << '"' << escape(*value) << '"';
+}
+
+QString String::escape(const QString &s)
+{
+ QString r;
+ for (int i = 0; i < s.length(); ++i) {
+ const QChar ch = s.at(i);
+ if (ch == QLatin1Char('\n'))
+ r += QLatin1String("\\n");
+ else if (ch == QLatin1Char('\r'))
+ r += QLatin1String("\\r");
+ else if (ch == QLatin1Char('\\'))
+ r += QLatin1String("\\\\");
+ else if (ch == QLatin1Char('"'))
+ r += QLatin1String("\\\"");
+ else if (ch == QLatin1Char('\''))
+ r += QLatin1String("\\'");
+ else
+ r += ch;
+ }
+ return r;
+}
+
+void Name::init(Type type, const QString *id, quint32 line, quint32 column)
+{
+ this->type = type;
+ this->id = id;
+ this->line = line;
+ this->column = column;
+}
+
+void Name::dump(QTextStream &out)
+{
+ out << *id;
+}
+
+void Temp::dump(QTextStream &out)
+{
+ out << '%' << index;
+}
+
+void Closure::dump(QTextStream &out)
+{
+ QString name = value->name ? *value->name : QString();
+ if (name.isEmpty())
+ name.sprintf("%p", value);
+ out << "closure(" << name << ')';
+}
+
+void Unop::dump(QTextStream &out)
+{
+ out << opname(op);
+ expr->dump(out);
+}
+
+Type Unop::typeForOp(AluOp op, Expr *expr)
+{
+ switch (op) {
+ case OpIfTrue: return BoolType;
+ case OpNot: return BoolType;
+
+ case OpUMinus:
+ case OpUPlus:
+ case OpCompl:
+ return maxType(expr->type, NumberType);
+
+ default:
+ break;
+ }
+
+ return InvalidType;
+}
+
+void Binop::dump(QTextStream &out)
+{
+ left->dump(out);
+ out << ' ' << opname(op) << ' ';
+ right->dump(out);
+}
+
+Type Binop::typeForOp(AluOp op, Expr *left, Expr *right)
+{
+ if (! (left && right))
+ return InvalidType;
+
+ switch (op) {
+ case OpInvalid:
+ return InvalidType;
+
+ // unary operators
+ case OpIfTrue:
+ case OpNot:
+ case OpUMinus:
+ case OpUPlus:
+ case OpCompl:
+ return InvalidType;
+
+ // bit fields
+ case OpBitAnd:
+ case OpBitOr:
+ case OpBitXor:
+ return IntType;
+
+ case OpAdd:
+ if (left->type == StringType)
+ return StringType;
+ return NumberType;
+
+ case OpSub:
+ case OpMul:
+ case OpDiv:
+ case OpMod:
+ return NumberType;
+
+ case OpLShift:
+ case OpRShift:
+ case OpURShift:
+ return IntType;
+
+ case OpAnd:
+ case OpOr:
+ return BoolType;
+
+ case OpGt:
+ case OpLt:
+ case OpGe:
+ case OpLe:
+ case OpEqual:
+ case OpNotEqual:
+ case OpStrictEqual:
+ case OpStrictNotEqual:
+ return BoolType;
+ } // switch
+
+ return InvalidType;
+}
+
+void Call::dump(QTextStream &out)
+{
+ base->dump(out);
+ out << '(';
+ for (ExprList *it = args; it; it = it->next) {
+ if (it != args)
+ out << ", ";
+ it->expr->dump(out);
+ }
+ out << ')';
+}
+
+Type Call::typeForFunction(Expr *)
+{
+ return InvalidType;
+}
+
+void New::dump(QTextStream &out)
+{
+ out << "new ";
+ base->dump(out);
+ out << '(';
+ for (ExprList *it = args; it; it = it->next) {
+ if (it != args)
+ out << ", ";
+ it->expr->dump(out);
+ }
+ out << ')';
+}
+
+Type New::typeForFunction(Expr *)
+{
+ return InvalidType;
+}
+
+void Subscript::dump(QTextStream &out)
+{
+ base->dump(out);
+ out << '[';
+ index->dump(out);
+ out << ']';
+}
+
+void Member::dump(QTextStream &out)
+{
+ base->dump(out);
+ out << '.' << *name;
+}
+
+void Exp::dump(QTextStream &out, Mode)
+{
+ out << "(void) ";
+ expr->dump(out);
+ out << ';';
+}
+
+void Enter::dump(QTextStream &out, Mode)
+{
+ out << "%enter(";
+ expr->dump(out);
+ out << ");";
+}
+
+void Leave::dump(QTextStream &out, Mode)
+{
+ out << "%leave";
+ out << ';';
+}
+
+void Move::dump(QTextStream &out, Mode)
+{
+ target->dump(out);
+ out << ' ';
+ if (op != OpInvalid)
+ out << opname(op);
+ out << "= ";
+// if (source->type != target->type)
+// out << typeName(source->type) << "_to_" << typeName(target->type) << '(';
+ source->dump(out);
+// if (source->type != target->type)
+// out << ')';
+ out << ';';
+}
+
+void Jump::dump(QTextStream &out, Mode mode)
+{
+ Q_UNUSED(mode);
+ out << "goto " << 'L' << target << ';';
+}
+
+void CJump::dump(QTextStream &out, Mode mode)
+{
+ Q_UNUSED(mode);
+ out << "if (";
+ cond->dump(out);
+ if (mode == HIR)
+ out << ") goto " << 'L' << iftrue << "; else goto " << 'L' << iffalse << ';';
+ else
+ out << ") goto " << 'L' << iftrue << ";";
+}
+
+void Ret::dump(QTextStream &out, Mode)
+{
+ out << "return";
+ if (expr) {
+ out << ' ';
+ expr->dump(out);
+ }
+ out << ';';
+}
+
+Function *Module::newFunction(const QString &name)
+{
+ Function *f = new Function(this, name);
+ functions.append(f);
+ return f;
+}
+
+Function::~Function()
+{
+ qDeleteAll(basicBlocks);
+}
+
+const QString *Function::newString(const QString &text)
+{
+ return &*strings.insert(text);
+}
+
+BasicBlock *Function::newBasicBlock()
+{
+ return i(new BasicBlock(this));
+}
+
+void Function::dump(QTextStream &out, Stmt::Mode mode)
+{
+ QString n = name ? *name : QString();
+ if (n.isEmpty())
+ n.sprintf("%p", this);
+ out << "function " << n << "() {" << endl;
+ foreach (const QString *formal, formals)
+ out << "\treceive " << *formal << ';' << endl;
+ foreach (const QString *local, locals)
+ out << "\tlocal " << *local << ';' << endl;
+ foreach (BasicBlock *bb, basicBlocks)
+ bb->dump(out, mode);
+ out << '}' << endl;
+}
+
+unsigned BasicBlock::newTemp()
+{
+ return function->tempCount++;
+}
+
+Temp *BasicBlock::TEMP(unsigned index)
+{
+ Temp *e = function->New<Temp>();
+ e->init(IR::InvalidType, index);
+ return e;
+}
+
+Expr *BasicBlock::CONST(Type type, double value)
+{
+ Const *e = function->New<Const>();
+ e->init(type, value);
+ return e;
+}
+
+Expr *BasicBlock::STRING(const QString *value)
+{
+ String *e = function->New<String>();
+ e->init(value);
+ return e;
+}
+
+Name *BasicBlock::NAME(const QString &id, quint32 line, quint32 column)
+{
+ Name *e = function->New<Name>();
+ e->init(InvalidType, function->newString(id), line, column);
+ return e;
+}
+
+Closure *BasicBlock::CLOSURE(Function *function)
+{
+ Closure *clos = function->New<Closure>();
+ clos->init(IR::InvalidType, function);
+ return clos;
+}
+
+Expr *BasicBlock::UNOP(AluOp op, Expr *expr)
+{
+ Unop *e = function->New<Unop>();
+ e->init(op, expr);
+ return e;
+}
+
+Expr *BasicBlock::BINOP(AluOp op, Expr *left, Expr *right)
+{
+ if (left && right) {
+ if (Const *c1 = left->asConst()) {
+ if (Const *c2 = right->asConst()) {
+ const IR::Type ty = Binop::typeForOp(op, left, right);
+
+ switch (op) {
+ case OpAdd: return CONST(ty, c1->value + c2->value);
+ case OpAnd: return CONST(ty, c1->value ? c2->value : 0);
+ case OpBitAnd: return CONST(ty, int(c1->value) & int(c2->value));
+ case OpBitOr: return CONST(ty, int(c1->value) | int(c2->value));
+ case OpBitXor: return CONST(ty, int(c1->value) ^ int(c2->value));
+ case OpDiv: return CONST(ty, c1->value / c2->value);
+ case OpEqual: return CONST(ty, c1->value == c2->value);
+ case OpGe: return CONST(ty, c1->value >= c2->value);
+ case OpGt: return CONST(ty, c1->value > c2->value);
+ case OpLe: return CONST(ty, c1->value <= c2->value);
+ case OpLShift: return CONST(ty, int(c1->value) << int(c2->value));
+ case OpLt: return CONST(ty, c1->value < c2->value);
+ case OpMod: return CONST(ty, ::fmod(c1->value, c2->value));
+ case OpMul: return CONST(ty, c1->value * c2->value);
+ case OpNotEqual: return CONST(ty, c1->value != c2->value);
+ case OpOr: return CONST(ty, c1->value ? c1->value : c2->value);
+ case OpRShift: return CONST(ty, int(c1->value) >> int(c2->value));
+ case OpStrictEqual: return CONST(ty, c1->value == c2->value);
+ case OpStrictNotEqual: return CONST(ty, c1->value != c2->value);
+ case OpSub: return CONST(ty, c1->value - c2->value);
+ case OpURShift: return CONST(ty, unsigned(c1->value) >> int(c2->value));
+
+ case OpIfTrue: // unary ops
+ case OpNot:
+ case OpUMinus:
+ case OpUPlus:
+ case OpCompl:
+ case OpInvalid:
+ break;
+ }
+ }
+ } else if (op == OpAdd) {
+ if (String *s1 = left->asString()) {
+ if (String *s2 = right->asString()) {
+ return STRING(function->newString(*s1->value + *s2->value));
+ }
+ }
+ }
+ }
+
+ Binop *e = function->New<Binop>();
+ e->init(op, left, right);
+ return e;
+}
+
+Expr *BasicBlock::CALL(Expr *base, ExprList *args)
+{
+ Call *e = function->New<Call>();
+ e->init(base, args);
+ return e;
+}
+
+Expr *BasicBlock::NEW(Expr *base, ExprList *args)
+{
+ New *e = function->New<New>();
+ e->init(base, args);
+ return e;
+}
+
+Expr *BasicBlock::SUBSCRIPT(Expr *base, Expr *index)
+{
+ Subscript *e = function->New<Subscript>();
+ e->init(base, index);
+ return e;
+}
+
+Expr *BasicBlock::MEMBER(Expr *base, const QString *name)
+{
+ Member*e = function->New<Member>();
+ e->init(base, name);
+ return e;
+}
+
+Stmt *BasicBlock::EXP(Expr *expr)
+{
+ Exp *s = function->New<Exp>();
+ s->init(expr);
+ statements.append(s);
+ return s;
+}
+
+Stmt *BasicBlock::ENTER(Expr *expr)
+{
+ Enter *s = function->New<Enter>();
+ s->init(expr);
+ statements.append(s);
+ return s;
+}
+
+Stmt *BasicBlock::LEAVE()
+{
+ Leave *s = function->New<Leave>();
+ s->init();
+ statements.append(s);
+ return s;
+}
+
+Stmt *BasicBlock::MOVE(Expr *target, Expr *source, AluOp op)
+{
+ Move *s = function->New<Move>();
+ s->init(target, source, op);
+ statements.append(s);
+ return s;
+}
+
+Stmt *BasicBlock::JUMP(BasicBlock *target)
+{
+ if (isTerminated())
+ return 0;
+
+ Jump *s = function->New<Jump>();
+ s->init(target);
+ statements.append(s);
+ return s;
+}
+
+Stmt *BasicBlock::CJUMP(Expr *cond, BasicBlock *iftrue, BasicBlock *iffalse)
+{
+ if (isTerminated())
+ return 0;
+
+ CJump *s = function->New<CJump>();
+ s->init(cond, iftrue, iffalse);
+ statements.append(s);
+ return s;
+}
+
+Stmt *BasicBlock::RET(Expr *expr, Type type)
+{
+ if (isTerminated())
+ return 0;
+
+ Ret *s = function->New<Ret>();
+ s->init(expr, type);
+ statements.append(s);
+ return s;
+}
+
+void BasicBlock::dump(QTextStream &out, Stmt::Mode mode)
+{
+ out << 'L' << this << ':' << endl;
+ foreach (Stmt *s, statements) {
+ out << '\t';
+ s->dump(out, mode);
+ out << endl;
+ }
+}
+
+} // end of namespace IR
+} // end of namespace QQmlJS
+
+QT_END_NAMESPACE
--- /dev/null
+/****************************************************************************
+**
+** Copyright (C) 2012 Nokia Corporation and/or its subsidiary(-ies).
+** Contact: http://www.qt-project.org/
+**
+** This file is part of the QtQml module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** GNU Lesser General Public License Usage
+** This file may be used under the terms of the GNU Lesser General Public
+** License version 2.1 as published by the Free Software Foundation and
+** appearing in the file LICENSE.LGPL included in the packaging of this
+** file. Please review the following information to ensure the GNU Lesser
+** General Public License version 2.1 requirements will be met:
+** http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU General
+** Public License version 3.0 as published by the Free Software Foundation
+** and appearing in the file LICENSE.GPL included in the packaging of this
+** file. Please review the following information to ensure the GNU General
+** Public License version 3.0 requirements will be met:
+** http://www.gnu.org/copyleft/gpl.html.
+**
+** Other Usage
+** Alternatively, this file may be used in accordance with the terms and
+** conditions contained in a signed written agreement between you and Nokia.
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QV4IR_P_H
+#define QV4IR_P_H
+
+//
+// W A R N I N G
+// -------------
+//
+// This file is not part of the Qt API. It exists purely as an
+// implementation detail. This header file may change from version to
+// version without notice, or even be removed.
+//
+// We mean it.
+//
+
+#include <private/qqmljsmemorypool_p.h>
+
+#include <QtCore/QVector>
+#include <QtCore/QString>
+
+QT_BEGIN_HEADER
+
+QT_BEGIN_NAMESPACE
+
+class QTextStream;
+class QQmlType;
+
+namespace QQmlJS {
+
+namespace IR {
+
+struct BasicBlock;
+struct Function;
+struct Module;
+
+struct Stmt;
+struct Expr;
+
+// expressions
+struct Const;
+struct String;
+struct Name;
+struct Temp;
+struct Closure;
+struct Unop;
+struct Binop;
+struct Call;
+struct New;
+struct Subscript;
+struct Member;
+
+// statements
+struct Exp;
+struct Enter;
+struct Leave;
+struct Move;
+struct Jump;
+struct CJump;
+struct Ret;
+
+enum AluOp {
+ OpInvalid = 0,
+
+ OpIfTrue,
+ OpNot,
+ OpUMinus,
+ OpUPlus,
+ OpCompl,
+
+ OpBitAnd,
+ OpBitOr,
+ OpBitXor,
+
+ OpAdd,
+ OpSub,
+ OpMul,
+ OpDiv,
+ OpMod,
+
+ OpLShift,
+ OpRShift,
+ OpURShift,
+
+ OpGt,
+ OpLt,
+ OpGe,
+ OpLe,
+ OpEqual,
+ OpNotEqual,
+ OpStrictEqual,
+ OpStrictNotEqual,
+
+ OpAnd,
+ OpOr
+};
+AluOp binaryOperator(int op);
+const char *opname(IR::AluOp op);
+
+enum Type {
+ InvalidType,
+ UndefinedType,
+ NullType,
+ VoidType,
+ StringType,
+ UrlType,
+ ColorType,
+ SGAnchorLineType,
+ AttachType,
+ ObjectType,
+ VariantType,
+ VarType,
+
+ FirstNumberType,
+ BoolType = FirstNumberType,
+ IntType,
+ FloatType,
+ NumberType
+};
+Type maxType(IR::Type left, IR::Type right);
+bool isRealType(IR::Type type);
+const char *typeName(IR::Type t);
+
+struct ExprVisitor {
+ virtual ~ExprVisitor() {}
+ virtual void visitConst(Const *) {}
+ virtual void visitString(String *) {}
+ virtual void visitName(Name *) {}
+ virtual void visitTemp(Temp *) {}
+ virtual void visitClosure(Closure *) {}
+ virtual void visitUnop(Unop *) {}
+ virtual void visitBinop(Binop *) {}
+ virtual void visitCall(Call *) {}
+ virtual void visitNew(New *) {}
+ virtual void visitSubscript(Subscript *) {}
+ virtual void visitMember(Member *) {}
+};
+
+struct StmtVisitor {
+ virtual ~StmtVisitor() {}
+ virtual void visitExp(Exp *) {}
+ virtual void visitEnter(Enter *) {}
+ virtual void visitLeave(Leave *) {}
+ virtual void visitMove(Move *) {}
+ virtual void visitJump(Jump *) {}
+ virtual void visitCJump(CJump *) {}
+ virtual void visitRet(Ret *) {}
+};
+
+struct Expr {
+ Type type;
+
+ Expr(): type(InvalidType) {}
+ virtual ~Expr() {}
+ virtual void accept(ExprVisitor *) = 0;
+ virtual Const *asConst() { return 0; }
+ virtual String *asString() { return 0; }
+ virtual Name *asName() { return 0; }
+ virtual Temp *asTemp() { return 0; }
+ virtual Closure *asClosure() { return 0; }
+ virtual Unop *asUnop() { return 0; }
+ virtual Binop *asBinop() { return 0; }
+ virtual Call *asCall() { return 0; }
+ virtual New *asNew() { return 0; }
+ virtual Subscript *asSubscript() { return 0; }
+ virtual Member *asMember() { return 0; }
+ virtual void dump(QTextStream &out) = 0;
+};
+
+struct ExprList {
+ Expr *expr;
+ ExprList *next;
+
+ void init(Expr *expr, ExprList *next = 0)
+ {
+ this->expr = expr;
+ this->next = next;
+ }
+};
+
+struct Const: Expr {
+ double value;
+
+ void init(Type type, double value)
+ {
+ this->type = type;
+ this->value = value;
+ }
+
+ virtual void accept(ExprVisitor *v) { v->visitConst(this); }
+ virtual Const *asConst() { return this; }
+
+ virtual void dump(QTextStream &out);
+};
+
+struct String: Expr {
+ const QString *value;
+
+ void init(const QString *value)
+ {
+ this->type = StringType;
+ this->value = value;
+ }
+
+ virtual void accept(ExprVisitor *v) { v->visitString(this); }
+ virtual String *asString() { return this; }
+
+ virtual void dump(QTextStream &out);
+ static QString escape(const QString &s);
+};
+
+struct Name: Expr {
+ const QString *id;
+ quint32 line;
+ quint32 column;
+
+ void init(Type type, const QString *id, quint32 line, quint32 column);
+
+ virtual void accept(ExprVisitor *v) { v->visitName(this); }
+ virtual Name *asName() { return this; }
+
+ virtual void dump(QTextStream &out);
+};
+
+struct Temp: Expr {
+ unsigned index;
+
+ void init(Type type, unsigned index)
+ {
+ this->type = type;
+ this->index = index;
+ }
+
+ virtual void accept(ExprVisitor *v) { v->visitTemp(this); }
+ virtual Temp *asTemp() { return this; }
+
+ virtual void dump(QTextStream &out);
+};
+
+struct Closure: Expr {
+ Function *value;
+
+ void init(Type type, Function *value)
+ {
+ this->type = type;
+ this->value = value;
+ }
+
+ virtual void accept(ExprVisitor *v) { v->visitClosure(this); }
+ virtual Closure *asClosure() { return this; }
+
+ virtual void dump(QTextStream &out);
+};
+
+struct Unop: Expr {
+ AluOp op;
+ Expr *expr;
+
+ void init(AluOp op, Expr *expr)
+ {
+ this->type = this->typeForOp(op, expr);
+ this->op = op;
+ this->expr = expr;
+ }
+
+ virtual void accept(ExprVisitor *v) { v->visitUnop(this); }
+ virtual Unop *asUnop() { return this; }
+
+ virtual void dump(QTextStream &out);
+
+private:
+ static Type typeForOp(AluOp op, Expr *expr);
+};
+
+struct Binop: Expr {
+ AluOp op;
+ Expr *left;
+ Expr *right;
+
+ void init(AluOp op, Expr *left, Expr *right)
+ {
+ this->type = typeForOp(op, left, right);
+ this->op = op;
+ this->left = left;
+ this->right = right;
+ }
+
+ virtual void accept(ExprVisitor *v) { v->visitBinop(this); }
+ virtual Binop *asBinop() { return this; }
+
+ virtual void dump(QTextStream &out);
+
+ static Type typeForOp(AluOp op, Expr *left, Expr *right);
+};
+
+struct Call: Expr {
+ Expr *base;
+ ExprList *args;
+
+ void init(Expr *base, ExprList *args)
+ {
+ this->type = typeForFunction(base);
+ this->base = base;
+ this->args = args;
+ }
+
+ Expr *onlyArgument() const {
+ if (args && ! args->next)
+ return args->expr;
+ return 0;
+ }
+
+ virtual void accept(ExprVisitor *v) { v->visitCall(this); }
+ virtual Call *asCall() { return this; }
+
+ virtual void dump(QTextStream &out);
+
+private:
+ static Type typeForFunction(Expr *base);
+};
+
+struct New: Expr {
+ Expr *base;
+ ExprList *args;
+
+ void init(Expr *base, ExprList *args)
+ {
+ this->type = typeForFunction(base);
+ this->base = base;
+ this->args = args;
+ }
+
+ Expr *onlyArgument() const {
+ if (args && ! args->next)
+ return args->expr;
+ return 0;
+ }
+
+ virtual void accept(ExprVisitor *v) { v->visitNew(this); }
+ virtual New *asNew() { return this; }
+
+ virtual void dump(QTextStream &out);
+
+private:
+ static Type typeForFunction(Expr *base);
+};
+
+struct Subscript: Expr {
+ Expr *base;
+ Expr *index;
+
+ void init(Expr *base, Expr *index)
+ {
+ this->type = typeForFunction(base);
+ this->base = base;
+ this->index = index;
+ }
+
+ virtual void accept(ExprVisitor *v) { v->visitSubscript(this); }
+ virtual Subscript *asSubscript() { return this; }
+
+ virtual void dump(QTextStream &out);
+
+private:
+ static Type typeForFunction(Expr *) { return IR::InvalidType; }
+};
+
+struct Member: Expr {
+ Expr *base;
+ const QString *name;
+
+ void init(Expr *base, const QString *name)
+ {
+ this->type = typeForFunction(base);
+ this->base = base;
+ this->name = name;
+ }
+
+ virtual void accept(ExprVisitor *v) { v->visitMember(this); }
+ virtual Member *asMember() { return this; }
+
+ virtual void dump(QTextStream &out);
+
+private:
+ static Type typeForFunction(Expr *) { return IR::InvalidType; }
+};
+
+struct Stmt {
+ enum Mode {
+ HIR,
+ MIR
+ };
+
+ QVector<unsigned> uses;
+ QVector<unsigned> defs;
+ QVector<unsigned> liveIn;
+ QVector<unsigned> liveOut;
+
+ virtual ~Stmt() {}
+ virtual Stmt *asTerminator() { return 0; }
+
+ virtual void accept(StmtVisitor *) = 0;
+ virtual Exp *asExp() { return 0; }
+ virtual Move *asMove() { return 0; }
+ virtual Enter *asEnter() { return 0; }
+ virtual Leave *asLeave() { return 0; }
+ virtual Jump *asJump() { return 0; }
+ virtual CJump *asCJump() { return 0; }
+ virtual Ret *asRet() { return 0; }
+ virtual void dump(QTextStream &out, Mode mode = HIR) = 0;
+};
+
+struct Exp: Stmt {
+ Expr *expr;
+
+ void init(Expr *expr)
+ {
+ this->expr = expr;
+ }
+
+ virtual void accept(StmtVisitor *v) { v->visitExp(this); }
+ virtual Exp *asExp() { return this; }
+
+ virtual void dump(QTextStream &out, Mode);
+};
+
+struct Move: Stmt {
+ Expr *target;
+ Expr *source;
+ AluOp op;
+
+ void init(Expr *target, Expr *source, AluOp op)
+ {
+ this->target = target;
+ this->source = source;
+ this->op = op;
+ }
+
+ virtual void accept(StmtVisitor *v) { v->visitMove(this); }
+ virtual Move *asMove() { return this; }
+
+ virtual void dump(QTextStream &out, Mode);
+};
+
+struct Enter: Stmt {
+ Expr *expr;
+
+ void init(Expr *expr)
+ {
+ this->expr = expr;
+ }
+
+ virtual void accept(StmtVisitor *v) { v->visitEnter(this); }
+ virtual Enter *asEnter() { return this; }
+
+ virtual void dump(QTextStream &out, Mode);
+};
+
+struct Leave: Stmt {
+ void init() {}
+
+ virtual void accept(StmtVisitor *v) { v->visitLeave(this); }
+ virtual Leave *asLeave() { return this; }
+
+ virtual void dump(QTextStream &out, Mode);
+};
+
+struct Jump: Stmt {
+ BasicBlock *target;
+
+ void init(BasicBlock *target)
+ {
+ this->target = target;
+ }
+
+ virtual Stmt *asTerminator() { return this; }
+
+ virtual void accept(StmtVisitor *v) { v->visitJump(this); }
+ virtual Jump *asJump() { return this; }
+
+ virtual void dump(QTextStream &out, Mode mode);
+};
+
+struct CJump: Stmt {
+ Expr *cond;
+ BasicBlock *iftrue;
+ BasicBlock *iffalse;
+
+ void init(Expr *cond, BasicBlock *iftrue, BasicBlock *iffalse)
+ {
+ this->cond = cond;
+ this->iftrue = iftrue;
+ this->iffalse = iffalse;
+ }
+
+ virtual Stmt *asTerminator() { return this; }
+
+ virtual void accept(StmtVisitor *v) { v->visitCJump(this); }
+ virtual CJump *asCJump() { return this; }
+
+ virtual void dump(QTextStream &out, Mode mode);
+};
+
+struct Ret: Stmt {
+ Expr *expr;
+ Type type;
+
+ void init(Expr *expr, Type type)
+ {
+ this->expr = expr;
+ this->type = type;
+ }
+
+ virtual Stmt *asTerminator() { return this; }
+
+ virtual void accept(StmtVisitor *v) { v->visitRet(this); }
+ virtual Ret *asRet() { return this; }
+
+ virtual void dump(QTextStream &out, Mode);
+};
+
+struct Module {
+ MemoryPool pool;
+ QVector<Function *> functions;
+
+ Function *newFunction(const QString &name);
+};
+
+struct Function {
+ Module *module;
+ MemoryPool *pool;
+ const QString *name;
+ QVector<BasicBlock *> basicBlocks;
+ int tempCount;
+ QSet<QString> strings;
+ QList<const QString *> formals;
+ QList<const QString *> locals;
+
+ template <typename _Tp> _Tp *New() { return new (pool->allocate(sizeof(_Tp))) _Tp(); }
+
+ Function(Module *module, const QString &name)
+ : module(module), pool(&module->pool), tempCount(0) { this->name = newString(name); }
+
+ ~Function();
+
+ BasicBlock *newBasicBlock();
+ const QString *newString(const QString &text);
+
+ void RECEIVE(const QString &name) { formals.append(newString(name)); }
+ void LOCAL(const QString &name) { locals.append(newString(name)); }
+
+ inline BasicBlock *i(BasicBlock *block) { basicBlocks.append(block); return block; }
+
+ void dump(QTextStream &out, Stmt::Mode mode = Stmt::HIR);
+};
+
+struct BasicBlock {
+ Function *function;
+ QVector<Stmt *> statements;
+ QVector<BasicBlock *> in;
+ QVector<BasicBlock *> out;
+ QVector<unsigned> liveIn;
+ QVector<unsigned> liveOut;
+ int index;
+ int offset;
+
+ BasicBlock(Function *function): function(function), index(-1), offset(-1) {}
+ ~BasicBlock() {}
+
+ template <typename Instr> inline Instr i(Instr i) { statements.append(i); return i; }
+
+ inline bool isEmpty() const {
+ return statements.isEmpty();
+ }
+
+ inline Stmt *terminator() const {
+ if (! statements.isEmpty() && statements.at(statements.size() - 1)->asTerminator() != 0)
+ return statements.at(statements.size() - 1);
+ return 0;
+ }
+
+ inline bool isTerminated() const {
+ if (terminator() != 0)
+ return true;
+ return false;
+ }
+
+ unsigned newTemp();
+
+ Temp *TEMP(unsigned index);
+
+ Expr *CONST(Type type, double value);
+ Expr *STRING(const QString *value);
+
+ Name *NAME(const QString &id, quint32 line, quint32 column);
+
+ Closure *CLOSURE(Function *function);
+
+ Expr *UNOP(AluOp op, Expr *expr);
+ Expr *BINOP(AluOp op, Expr *left, Expr *right);
+ Expr *CALL(Expr *base, ExprList *args = 0);
+ Expr *NEW(Expr *base, ExprList *args = 0);
+ Expr *SUBSCRIPT(Expr *base, Expr *index);
+ Expr *MEMBER(Expr *base, const QString *name);
+
+ Stmt *EXP(Expr *expr);
+ Stmt *ENTER(Expr *expr);
+ Stmt *LEAVE();
+
+ Stmt *MOVE(Expr *target, Expr *source, AluOp op = IR::OpInvalid);
+
+ Stmt *JUMP(BasicBlock *target);
+ Stmt *CJUMP(Expr *cond, BasicBlock *iftrue, BasicBlock *iffalse);
+ Stmt *RET(Expr *expr, Type type);
+
+ void dump(QTextStream &out, Stmt::Mode mode = Stmt::HIR);
+};
+
+} // end of namespace IR
+
+} // end of namespace QQmlJS
+
+QT_END_NAMESPACE
+
+QT_END_HEADER
+
+#endif // QV4IR_P_H
--- /dev/null
+
+#include "qv4isel_p.h"
+#include "qmljs_runtime.h"
+#include "qmljs_objects.h"
+
+#define TARGET_AMD64
+typedef quint64 guint64;
+typedef qint64 gint64;
+typedef uchar guint8;
+typedef uint guint32;
+typedef void *gpointer;
+#include "amd64-codegen.h"
+
+#include <sys/mman.h>
+
+#ifndef NO_UDIS86
+# include <udis86.h>
+#endif
+
+using namespace QQmlJS;
+
+static inline bool protect(const void *addr, size_t size)
+{
+ size_t pageSize = sysconf(_SC_PAGESIZE);
+ size_t iaddr = reinterpret_cast<size_t>(addr);
+ size_t roundAddr = iaddr & ~(pageSize - static_cast<size_t>(1));
+ int mode = PROT_READ | PROT_WRITE | PROT_EXEC;
+ return mprotect(reinterpret_cast<void*>(roundAddr), size + (iaddr - roundAddr), mode) == 0;
+}
+
+static inline void
+amd64_patch (unsigned char* code, gpointer target)
+{
+ guint8 rex = 0;
+
+#ifdef __native_client_codegen__
+ code = amd64_skip_nops (code);
+#endif
+#if defined(__native_client_codegen__) && defined(__native_client__)
+ if (nacl_is_code_address (code)) {
+ /* For tail calls, code is patched after being installed */
+ /* but not through the normal "patch callsite" method. */
+ unsigned char buf[kNaClAlignment];
+ unsigned char *aligned_code = (uintptr_t)code & ~kNaClAlignmentMask;
+ int ret;
+ memcpy (buf, aligned_code, kNaClAlignment);
+ /* Patch a temp buffer of bundle size, */
+ /* then install to actual location. */
+ amd64_patch (buf + ((uintptr_t)code - (uintptr_t)aligned_code), target);
+ ret = nacl_dyncode_modify (aligned_code, buf, kNaClAlignment);
+ g_assert (ret == 0);
+ return;
+ }
+ target = nacl_modify_patch_target (target);
+#endif
+
+ /* Skip REX */
+ if ((code [0] >= 0x40) && (code [0] <= 0x4f)) {
+ rex = code [0];
+ code += 1;
+ }
+
+ if ((code [0] & 0xf8) == 0xb8) {
+ /* amd64_set_reg_template */
+ *(guint64*)(code + 1) = (guint64)target;
+ }
+ else if ((code [0] == 0x8b) && rex && x86_modrm_mod (code [1]) == 0 && x86_modrm_rm (code [1]) == 5) {
+ /* mov 0(%rip), %dreg */
+ *(guint32*)(code + 2) = (guint32)(guint64)target - 7;
+ }
+ else if ((code [0] == 0xff) && (code [1] == 0x15)) {
+ /* call *<OFFSET>(%rip) */
+ *(guint32*)(code + 2) = ((guint32)(guint64)target) - 7;
+ }
+ else if (code [0] == 0xe8) {
+ /* call <DISP> */
+ gint64 disp = (guint8*)target - (guint8*)code;
+ assert (amd64_is_imm32 (disp));
+ x86_patch (code, (unsigned char*)target);
+ }
+ else
+ x86_patch (code, (unsigned char*)target);
+}
+InstructionSelection::InstructionSelection(IR::Module *module)
+ : _module(module)
+ , _function(0)
+ , _block(0)
+ , _code(0)
+ , _codePtr(0)
+{
+}
+
+InstructionSelection::~InstructionSelection()
+{
+}
+
+void InstructionSelection::visitFunction(IR::Function *function)
+{
+ uchar *code = (uchar *) malloc(getpagesize());
+ Q_ASSERT(! (size_t(code) & 15));
+
+ protect(code, getpagesize());
+
+ uchar *codePtr = code;
+
+ qSwap(_code, code);
+ qSwap(_codePtr, codePtr);
+
+ int locals = function->tempCount * sizeof(Value);
+ locals = (locals + 15) & ~15;
+
+ amd64_push_reg(_codePtr, AMD64_R14);
+ amd64_mov_reg_reg(_codePtr, AMD64_R14, AMD64_RDI, 8);
+ amd64_alu_reg_imm(_codePtr, X86_SUB, AMD64_RSP, locals);
+
+ foreach (IR::BasicBlock *block, function->basicBlocks) {
+ _block = block;
+ _addrs[block] = _codePtr;
+ foreach (IR::Stmt *s, block->statements) {
+ s->accept(this);
+ }
+ }
+
+ QHashIterator<IR::BasicBlock *, QVector<uchar *> > it(_patches);
+ while (it.hasNext()) {
+ it.next();
+ uchar *target = _addrs[it.key()];
+ foreach (uchar *instr, it.value()) {
+ amd64_patch(instr, target);
+ }
+ }
+
+ amd64_alu_reg_imm(_codePtr, X86_ADD, AMD64_RSP, locals);
+ amd64_pop_reg(_codePtr, AMD64_R14);
+ amd64_ret(_codePtr);
+
+ qSwap(_codePtr, codePtr);
+ qSwap(_code, code);
+
+ static bool showCode = !qgetenv("SHOW_CODE").isNull();
+ if (showCode) {
+#ifndef NO_UDIS86
+ ud_t ud_obj;
+
+ ud_init(&ud_obj);
+ ud_set_input_buffer(&ud_obj, code, codePtr - code);
+ ud_set_mode(&ud_obj, 64);
+ ud_set_syntax(&ud_obj, UD_SYN_INTEL);
+
+ while (ud_disassemble(&ud_obj)) {
+ printf("\t%s\n", ud_insn_asm(&ud_obj));
+ }
+#endif
+ }
+
+ void (*f)(Context *) = (void (*)(Context *)) code;
+
+ Context *ctx = new (GC) Context;
+ ctx->activation = Value::object(ctx, new (GC) ArgumentsObject);
+ f(ctx);
+ Value d;
+ ctx->activation.objectValue->get(String::get(ctx, QLatin1String("d")), &d);
+ __qmljs_to_string(ctx, &d, &d);
+ qDebug() << qPrintable(d.stringValue->text());
+}
+
+String *InstructionSelection::identifier(const QString &s)
+{
+ String *&id = _identifiers[s];
+ if (! id)
+ id = new (GC) String(s);
+ return id;
+}
+
+void InstructionSelection::visitExp(IR::Exp *s)
+{
+ // if (IR::Call *c = s->expr->asCall()) {
+ // return;
+ // }
+ Q_ASSERT(!"TODO");
+}
+
+void InstructionSelection::visitEnter(IR::Enter *)
+{
+ Q_ASSERT(!"TODO");
+}
+
+void InstructionSelection::visitLeave(IR::Leave *)
+{
+ Q_ASSERT(!"TODO");
+}
+
+void InstructionSelection::visitMove(IR::Move *s)
+{
+ // %rdi, %rsi, %rdx, %rcx, %r8 and %r9
+ if (s->op == IR::OpInvalid) {
+ if (IR::Name *n = s->target->asName()) {
+ String *propertyName = identifier(*n->id);
+
+ if (IR::Const *c = s->source->asConst()) {
+ amd64_mov_reg_reg(_codePtr, AMD64_RDI, AMD64_R14, 8);
+ amd64_mov_reg_imm(_codePtr, AMD64_RSI, propertyName);
+ amd64_mov_reg_imm(_codePtr, AMD64_RAX, &c->value);
+ amd64_movsd_reg_regp(_codePtr, X86_XMM0, AMD64_RAX);
+ amd64_call_code(_codePtr, __qmljs_set_activation_property_number);
+ return;
+ } else if (IR::Temp *t = s->source->asTemp()) {
+ amd64_mov_reg_reg(_codePtr, AMD64_RDI, AMD64_R14, 8);
+ amd64_mov_reg_imm(_codePtr, AMD64_RSI, propertyName);
+ amd64_lea_membase(_codePtr, AMD64_RDX, AMD64_RSP, 8 + sizeof(Value) * (t->index - 1));
+ amd64_call_code(_codePtr, __qmljs_set_activation_property);
+ return;
+ }
+ } else if (IR::Temp *t = s->target->asTemp()) {
+ const int offset = 8 + sizeof(Value) * (t->index - 1);
+ if (IR::Name *n = s->source->asName()) {
+ String *propertyName = identifier(*n->id);
+ amd64_mov_reg_reg(_codePtr, AMD64_RDI, AMD64_R14, 8);
+ amd64_lea_membase(_codePtr, AMD64_RSI, AMD64_RSP, offset);
+ amd64_mov_reg_imm(_codePtr, AMD64_RDX, propertyName);
+ amd64_call_code(_codePtr, __qmljs_get_activation_property);
+ return;
+ } else if (IR::Const *c = s->source->asConst()) {
+ amd64_mov_reg_reg(_codePtr, AMD64_RDI, AMD64_R14, 8);
+ amd64_lea_membase(_codePtr, AMD64_RSI, AMD64_RSP, offset);
+ amd64_mov_reg_imm(_codePtr, AMD64_RAX, &c->value);
+ amd64_movsd_reg_regp(_codePtr, X86_XMM0, AMD64_RAX);
+ amd64_call_code(_codePtr, __qmljs_init_number);
+ return;
+ } else if (IR::Binop *b = s->source->asBinop()) {
+ IR::Temp *l = b->left->asTemp();
+ IR::Temp *r = b->right->asTemp();
+ if (l && r) {
+ amd64_mov_reg_reg(_codePtr, AMD64_RDI, AMD64_R14, 8);
+ amd64_lea_membase(_codePtr, AMD64_RSI, AMD64_RSP, offset);
+ amd64_lea_membase(_codePtr, AMD64_RDX, AMD64_RSP, 8 + sizeof(Value) * (l->index - 1));
+ amd64_lea_membase(_codePtr, AMD64_RCX, AMD64_RSP, 8 + sizeof(Value) * (r->index - 1));
+
+ void (*op)(Context *, Value *, const Value *, const Value *) = 0;
+
+ switch (b->op) {
+ case QQmlJS::IR::OpInvalid:
+ case QQmlJS::IR::OpIfTrue:
+ case QQmlJS::IR::OpNot:
+ case QQmlJS::IR::OpUMinus:
+ case QQmlJS::IR::OpUPlus:
+ case QQmlJS::IR::OpCompl:
+ Q_ASSERT(!"unreachable");
+ break;
+
+ case QQmlJS::IR::OpBitAnd: op = __qmljs_bit_and; break;
+ case QQmlJS::IR::OpBitOr: op = __qmljs_bit_or; break;
+ case QQmlJS::IR::OpBitXor: op = __qmljs_bit_xor; break;
+ case QQmlJS::IR::OpAdd: op = __qmljs_add; break;
+ case QQmlJS::IR::OpSub: op = __qmljs_sub; break;
+ case QQmlJS::IR::OpMul: op = __qmljs_mul; break;
+ case QQmlJS::IR::OpDiv: op = __qmljs_div; break;
+ case QQmlJS::IR::OpMod: op = __qmljs_mod; break;
+ case QQmlJS::IR::OpLShift: op = __qmljs_shl; break;
+ case QQmlJS::IR::OpRShift: op = __qmljs_shr; break;
+ case QQmlJS::IR::OpURShift: op = __qmljs_ushr; break;
+ case QQmlJS::IR::OpGt: op = __qmljs_gt; break;
+ case QQmlJS::IR::OpLt: op = __qmljs_lt; break;
+ case QQmlJS::IR::OpGe: op = __qmljs_ge; break;
+ case QQmlJS::IR::OpLe: op = __qmljs_le; break;
+ case QQmlJS::IR::OpEqual: op = __qmljs_eq; break;
+ case QQmlJS::IR::OpNotEqual: op = __qmljs_ne; break;
+ case QQmlJS::IR::OpStrictEqual: op = __qmljs_se; break;
+ case QQmlJS::IR::OpStrictNotEqual: op = __qmljs_sne; break;
+
+ case QQmlJS::IR::OpAnd:
+ case QQmlJS::IR::OpOr:
+ Q_ASSERT(!"unreachable");
+ break;
+ }
+ amd64_call_code(_codePtr, op);
+ return;
+ }
+ }
+ }
+ } else {
+ // inplace assignment, e.g. x += 1, ++x, ...
+ }
+ Q_ASSERT(!"TODO");
+}
+
+void InstructionSelection::visitJump(IR::Jump *s)
+{
+ if (_block->index + 1 != s->target->index) {
+ _patches[s->target].append(_codePtr);
+ amd64_jump32(_codePtr, 0);
+ }
+}
+
+void InstructionSelection::visitCJump(IR::CJump *s)
+{
+ if (IR::Temp *t = s->cond->asTemp()) {
+ amd64_mov_reg_reg(_codePtr, AMD64_RDI, AMD64_R14, 8);
+ amd64_lea_membase(_codePtr, AMD64_RSI, AMD64_RSP, 8 + sizeof(Value) * (t->index - 1));
+ amd64_call_code(_codePtr, __qmljs_to_boolean);
+ amd64_alu_reg_imm_size(_codePtr, X86_CMP, X86_EAX, 0, 4);
+ _patches[s->iftrue].append(_codePtr);
+ amd64_branch32(_codePtr, X86_CC_NZ, 0, 1);
+
+ if (_block->index + 1 != s->iffalse->index) {
+ _patches[s->iffalse].append(_codePtr);
+ amd64_jump32(_codePtr, 0);
+ }
+ return;
+ }
+ Q_ASSERT(!"TODO");
+}
+
+void InstructionSelection::visitRet(IR::Ret *s)
+{
+ qWarning() << "TODO: RET";
+ //Q_ASSERT(!"TODO");
+}
+
--- /dev/null
+#ifndef QV4ISEL_P_H
+#define QV4ISEL_P_H
+
+#include "qv4ir_p.h"
+#include "qmljs_objects.h"
+
+#include <QtCore/QHash>
+
+namespace QQmlJS {
+
+class InstructionSelection: protected IR::StmtVisitor
+{
+public:
+ InstructionSelection(IR::Module *module);
+ ~InstructionSelection();
+
+ void visitFunction(IR::Function *function);
+
+protected:
+ String *identifier(const QString &s);
+
+ virtual void visitExp(IR::Exp *);
+ virtual void visitEnter(IR::Enter *);
+ virtual void visitLeave(IR::Leave *);
+ virtual void visitMove(IR::Move *);
+ virtual void visitJump(IR::Jump *);
+ virtual void visitCJump(IR::CJump *);
+ virtual void visitRet(IR::Ret *);
+
+private:
+ IR::Module *_module;
+ IR::Function *_function;
+ IR::BasicBlock *_block;
+ uchar *_code;
+ uchar *_codePtr;
+ QHash<IR::BasicBlock *, QVector<uchar *> > _patches;
+ QHash<IR::BasicBlock *, uchar *> _addrs;
+ QHash<QString, String *> _identifiers;
+};
+
+} // end of namespace QQmlJS
+
+#endif // QV4ISEL_P_H
--- /dev/null
+
+var a = 1
+var b = 2
+var c = 10
+var d = 100
+
+for (i = 0; i < 1000000; i = i + 1) {
+ if (a == 1)
+ d = a + b * c
+ else
+ d = 321
+}
--- /dev/null
+QT = core qmldevtools-private
+CONFIG -= app_bundle
+CONFIG += console
+
+DEFINES += __default_codegen__
+
+LIBS += -lgc
+
+udis86:LIBS += -ludis86
+else:DEFINES += NO_UDIS86
+
+SOURCES += main.cpp \
+ qv4codegen.cpp \
+ qv4ir.cpp \
+ qmljs_runtime.cpp \
+ qmljs_objects.cpp \
+ qv4isel.cpp
+
+HEADERS += \
+ qv4codegen_p.h \
+ qv4ir_p.h \
+ qmljs_runtime.h \
+ qmljs_objects.h \
+ qv4isel_p.h \
+ x86-codegen.h \
+ amd64-codegen.h
+
+
+
--- /dev/null
+/*
+ * x86-codegen.h: Macros for generating x86 code
+ *
+ * Authors:
+ * Paolo Molaro (lupus@ximian.com)
+ * Intel Corporation (ORP Project)
+ * Sergey Chaban (serge@wildwestsoftware.com)
+ * Dietmar Maurer (dietmar@ximian.com)
+ * Patrik Torstensson
+ *
+ * Copyright (C) 2000 Intel Corporation. All rights reserved.
+ * Copyright (C) 2001, 2002 Ximian, Inc.
+ */
+
+#ifndef X86_H
+#define X86_H
+#include <assert.h>
+
+#ifdef __native_client_codegen__
+extern gint8 nacl_align_byte;
+#endif /* __native_client_codegen__ */
+
+
+#if defined( __native_client_codegen__ ) && defined( TARGET_X86 )
+#define x86_codegen_pre(inst_ptr_ptr, inst_len) do { mono_nacl_align_inst(inst_ptr_ptr, inst_len); } while (0)
+#define x86_call_sequence_pre_val(inst) guint8* _code_start = (inst);
+#define x86_call_sequence_post_val(inst) \
+ (mono_nacl_align_call(&_code_start, &(inst)), _code_start);
+#define x86_call_sequence_pre(inst) x86_call_sequence_pre_val((inst))
+#define x86_call_sequence_post(inst) x86_call_sequence_post_val((inst))
+#else
+#define x86_codegen_pre(inst_ptr_ptr, inst_len) do {} while (0)
+/* Two variants are needed to avoid warnings */
+#define x86_call_sequence_pre_val(inst) guint8* _code_start = (inst);
+#define x86_call_sequence_post_val(inst) _code_start
+#define x86_call_sequence_pre(inst)
+#define x86_call_sequence_post(inst)
+#endif /* __native_client_codegen__ */
+
+
+/*
+// x86 register numbers
+*/
+typedef enum {
+ X86_EAX = 0,
+ X86_ECX = 1,
+ X86_EDX = 2,
+ X86_EBX = 3,
+ X86_ESP = 4,
+ X86_EBP = 5,
+ X86_ESI = 6,
+ X86_EDI = 7,
+ X86_NREG
+} X86_Reg_No;
+
+typedef enum {
+ X86_XMM0,
+ X86_XMM1,
+ X86_XMM2,
+ X86_XMM3,
+ X86_XMM4,
+ X86_XMM5,
+ X86_XMM6,
+ X86_XMM7,
+ X86_XMM_NREG
+} X86_XMM_Reg_No;
+
+/*
+// opcodes for alu instructions
+*/
+typedef enum {
+ X86_ADD = 0,
+ X86_OR = 1,
+ X86_ADC = 2,
+ X86_SBB = 3,
+ X86_AND = 4,
+ X86_SUB = 5,
+ X86_XOR = 6,
+ X86_CMP = 7,
+ X86_NALU
+} X86_ALU_Opcode;
+/*
+// opcodes for shift instructions
+*/
+typedef enum {
+ X86_SHLD,
+ X86_SHLR,
+ X86_ROL = 0,
+ X86_ROR = 1,
+ X86_RCL = 2,
+ X86_RCR = 3,
+ X86_SHL = 4,
+ X86_SHR = 5,
+ X86_SAR = 7,
+ X86_NSHIFT = 8
+} X86_Shift_Opcode;
+/*
+// opcodes for floating-point instructions
+*/
+typedef enum {
+ X86_FADD = 0,
+ X86_FMUL = 1,
+ X86_FCOM = 2,
+ X86_FCOMP = 3,
+ X86_FSUB = 4,
+ X86_FSUBR = 5,
+ X86_FDIV = 6,
+ X86_FDIVR = 7,
+ X86_NFP = 8
+} X86_FP_Opcode;
+/*
+// integer conditions codes
+*/
+typedef enum {
+ X86_CC_EQ = 0, X86_CC_E = 0, X86_CC_Z = 0,
+ X86_CC_NE = 1, X86_CC_NZ = 1,
+ X86_CC_LT = 2, X86_CC_B = 2, X86_CC_C = 2, X86_CC_NAE = 2,
+ X86_CC_LE = 3, X86_CC_BE = 3, X86_CC_NA = 3,
+ X86_CC_GT = 4, X86_CC_A = 4, X86_CC_NBE = 4,
+ X86_CC_GE = 5, X86_CC_AE = 5, X86_CC_NB = 5, X86_CC_NC = 5,
+ X86_CC_LZ = 6, X86_CC_S = 6,
+ X86_CC_GEZ = 7, X86_CC_NS = 7,
+ X86_CC_P = 8, X86_CC_PE = 8,
+ X86_CC_NP = 9, X86_CC_PO = 9,
+ X86_CC_O = 10,
+ X86_CC_NO = 11,
+ X86_NCC
+} X86_CC;
+
+/* FP status */
+enum {
+ X86_FP_C0 = 0x100,
+ X86_FP_C1 = 0x200,
+ X86_FP_C2 = 0x400,
+ X86_FP_C3 = 0x4000,
+ X86_FP_CC_MASK = 0x4500
+};
+
+/* FP control word */
+enum {
+ X86_FPCW_INVOPEX_MASK = 0x1,
+ X86_FPCW_DENOPEX_MASK = 0x2,
+ X86_FPCW_ZERODIV_MASK = 0x4,
+ X86_FPCW_OVFEX_MASK = 0x8,
+ X86_FPCW_UNDFEX_MASK = 0x10,
+ X86_FPCW_PRECEX_MASK = 0x20,
+ X86_FPCW_PRECC_MASK = 0x300,
+ X86_FPCW_ROUNDC_MASK = 0xc00,
+
+ /* values for precision control */
+ X86_FPCW_PREC_SINGLE = 0,
+ X86_FPCW_PREC_DOUBLE = 0x200,
+ X86_FPCW_PREC_EXTENDED = 0x300,
+
+ /* values for rounding control */
+ X86_FPCW_ROUND_NEAREST = 0,
+ X86_FPCW_ROUND_DOWN = 0x400,
+ X86_FPCW_ROUND_UP = 0x800,
+ X86_FPCW_ROUND_TOZERO = 0xc00
+};
+
+/*
+// prefix code
+*/
+typedef enum {
+ X86_LOCK_PREFIX = 0xF0,
+ X86_REPNZ_PREFIX = 0xF2,
+ X86_REPZ_PREFIX = 0xF3,
+ X86_REP_PREFIX = 0xF3,
+ X86_CS_PREFIX = 0x2E,
+ X86_SS_PREFIX = 0x36,
+ X86_DS_PREFIX = 0x3E,
+ X86_ES_PREFIX = 0x26,
+ X86_FS_PREFIX = 0x64,
+ X86_GS_PREFIX = 0x65,
+ X86_UNLIKELY_PREFIX = 0x2E,
+ X86_LIKELY_PREFIX = 0x3E,
+ X86_OPERAND_PREFIX = 0x66,
+ X86_ADDRESS_PREFIX = 0x67
+} X86_Prefix;
+
+static const unsigned char
+x86_cc_unsigned_map [X86_NCC] = {
+ 0x74, /* eq */
+ 0x75, /* ne */
+ 0x72, /* lt */
+ 0x76, /* le */
+ 0x77, /* gt */
+ 0x73, /* ge */
+ 0x78, /* lz */
+ 0x79, /* gez */
+ 0x7a, /* p */
+ 0x7b, /* np */
+ 0x70, /* o */
+ 0x71, /* no */
+};
+
+static const unsigned char
+x86_cc_signed_map [X86_NCC] = {
+ 0x74, /* eq */
+ 0x75, /* ne */
+ 0x7c, /* lt */
+ 0x7e, /* le */
+ 0x7f, /* gt */
+ 0x7d, /* ge */
+ 0x78, /* lz */
+ 0x79, /* gez */
+ 0x7a, /* p */
+ 0x7b, /* np */
+ 0x70, /* o */
+ 0x71, /* no */
+};
+
+typedef union {
+ int val;
+ unsigned char b [4];
+} x86_imm_buf;
+
+#define X86_NOBASEREG (-1)
+
+/*
+// bitvector mask for callee-saved registers
+*/
+#define X86_ESI_MASK (1<<X86_ESI)
+#define X86_EDI_MASK (1<<X86_EDI)
+#define X86_EBX_MASK (1<<X86_EBX)
+#define X86_EBP_MASK (1<<X86_EBP)
+
+#define X86_CALLEE_REGS ((1<<X86_EAX) | (1<<X86_ECX) | (1<<X86_EDX))
+#define X86_CALLER_REGS ((1<<X86_EBX) | (1<<X86_EBP) | (1<<X86_ESI) | (1<<X86_EDI))
+#define X86_BYTE_REGS ((1<<X86_EAX) | (1<<X86_ECX) | (1<<X86_EDX) | (1<<X86_EBX))
+
+#define X86_IS_SCRATCH(reg) (X86_CALLER_REGS & (1 << (reg))) /* X86_EAX, X86_ECX, or X86_EDX */
+#define X86_IS_CALLEE(reg) (X86_CALLEE_REGS & (1 << (reg))) /* X86_ESI, X86_EDI, X86_EBX, or X86_EBP */
+
+#define X86_IS_BYTE_REG(reg) ((reg) < 4)
+
+/*
+// Frame structure:
+//
+// +--------------------------------+
+// | in_arg[0] = var[0] |
+// | in_arg[1] = var[1] |
+// | . . . |
+// | in_arg[n_arg-1] = var[n_arg-1] |
+// +--------------------------------+
+// | return IP |
+// +--------------------------------+
+// | saved EBP | <-- frame pointer (EBP)
+// +--------------------------------+
+// | ... | n_extra
+// +--------------------------------+
+// | var[n_arg] |
+// | var[n_arg+1] | local variables area
+// | . . . |
+// | var[n_var-1] |
+// +--------------------------------+
+// | |
+// | |
+// | spill area | area for spilling mimic stack
+// | |
+// +--------------------------------|
+// | ebx |
+// | ebp [ESP_Frame only] |
+// | esi | 0..3 callee-saved regs
+// | edi | <-- stack pointer (ESP)
+// +--------------------------------+
+// | stk0 |
+// | stk1 | operand stack area/
+// | . . . | out args
+// | stkn-1 |
+// +--------------------------------|
+//
+//
+*/
+
+
+/*
+ * useful building blocks
+ */
+#define x86_modrm_mod(modrm) ((modrm) >> 6)
+#define x86_modrm_reg(modrm) (((modrm) >> 3) & 0x7)
+#define x86_modrm_rm(modrm) ((modrm) & 0x7)
+
+#define x86_address_byte(inst,m,o,r) do { *(inst)++ = ((((m)&0x03)<<6)|(((o)&0x07)<<3)|(((r)&0x07))); } while (0)
+#define x86_imm_emit32(inst,imm) \
+ do { \
+ x86_imm_buf imb; imb.val = (int) (imm); \
+ *(inst)++ = imb.b [0]; \
+ *(inst)++ = imb.b [1]; \
+ *(inst)++ = imb.b [2]; \
+ *(inst)++ = imb.b [3]; \
+ } while (0)
+#define x86_imm_emit16(inst,imm) do { *(short*)(inst) = (imm); (inst) += 2; } while (0)
+#define x86_imm_emit8(inst,imm) do { *(inst) = (unsigned char)((imm) & 0xff); ++(inst); } while (0)
+#define x86_is_imm8(imm) (((int)(imm) >= -128 && (int)(imm) <= 127))
+#define x86_is_imm16(imm) (((int)(imm) >= -(1<<16) && (int)(imm) <= ((1<<16)-1)))
+
+#define x86_reg_emit(inst,r,regno) do { x86_address_byte ((inst), 3, (r), (regno)); } while (0)
+#define x86_reg8_emit(inst,r,regno,is_rh,is_rnoh) do {x86_address_byte ((inst), 3, (is_rh)?((r)|4):(r), (is_rnoh)?((regno)|4):(regno));} while (0)
+#define x86_regp_emit(inst,r,regno) do { x86_address_byte ((inst), 0, (r), (regno)); } while (0)
+#define x86_mem_emit(inst,r,disp) do { x86_address_byte ((inst), 0, (r), 5); x86_imm_emit32((inst), (disp)); } while (0)
+
+#define kMaxMembaseEmitPadding 6
+
+#define x86_membase_emit_body(inst,r,basereg,disp) do {\
+ if ((basereg) == X86_ESP) { \
+ if ((disp) == 0) { \
+ x86_address_byte ((inst), 0, (r), X86_ESP); \
+ x86_address_byte ((inst), 0, X86_ESP, X86_ESP); \
+ } else if (x86_is_imm8((disp))) { \
+ x86_address_byte ((inst), 1, (r), X86_ESP); \
+ x86_address_byte ((inst), 0, X86_ESP, X86_ESP); \
+ x86_imm_emit8 ((inst), (disp)); \
+ } else { \
+ x86_address_byte ((inst), 2, (r), X86_ESP); \
+ x86_address_byte ((inst), 0, X86_ESP, X86_ESP); \
+ x86_imm_emit32 ((inst), (disp)); \
+ } \
+ break; \
+ } \
+ if ((disp) == 0 && (basereg) != X86_EBP) { \
+ x86_address_byte ((inst), 0, (r), (basereg)); \
+ break; \
+ } \
+ if (x86_is_imm8((disp))) { \
+ x86_address_byte ((inst), 1, (r), (basereg)); \
+ x86_imm_emit8 ((inst), (disp)); \
+ } else { \
+ x86_address_byte ((inst), 2, (r), (basereg)); \
+ x86_imm_emit32 ((inst), (disp)); \
+ } \
+ } while (0)
+
+#if defined(__native_client_codegen__) && defined(TARGET_AMD64)
+#define x86_membase_emit(inst,r,basereg,disp) \
+ do { \
+ amd64_nacl_membase_handler(&(inst), (basereg), (disp), (r)) ; \
+ } while (0)
+#else /* __default_codegen__ || 32-bit NaCl codegen */
+#define x86_membase_emit(inst,r,basereg,disp) \
+ do { \
+ x86_membase_emit_body((inst),(r),(basereg),(disp)); \
+ } while (0)
+#endif
+
+#define kMaxMemindexEmitPadding 6
+
+#define x86_memindex_emit(inst,r,basereg,disp,indexreg,shift) \
+ do { \
+ if ((basereg) == X86_NOBASEREG) { \
+ x86_address_byte ((inst), 0, (r), 4); \
+ x86_address_byte ((inst), (shift), (indexreg), 5); \
+ x86_imm_emit32 ((inst), (disp)); \
+ } else if ((disp) == 0 && (basereg) != X86_EBP) { \
+ x86_address_byte ((inst), 0, (r), 4); \
+ x86_address_byte ((inst), (shift), (indexreg), (basereg)); \
+ } else if (x86_is_imm8((disp))) { \
+ x86_address_byte ((inst), 1, (r), 4); \
+ x86_address_byte ((inst), (shift), (indexreg), (basereg)); \
+ x86_imm_emit8 ((inst), (disp)); \
+ } else { \
+ x86_address_byte ((inst), 2, (r), 4); \
+ x86_address_byte ((inst), (shift), (indexreg), (basereg)); \
+ x86_imm_emit32 ((inst), (disp)); \
+ } \
+ } while (0)
+
+/*
+ * target is the position in the code where to jump to:
+ * target = code;
+ * .. output loop code...
+ * x86_mov_reg_imm (code, X86_EAX, 0);
+ * loop = code;
+ * x86_loop (code, -1);
+ * ... finish method
+ *
+ * patch displacement
+ * x86_patch (loop, target);
+ *
+ * ins should point at the start of the instruction that encodes a target.
+ * the instruction is inspected for validity and the correct displacement
+ * is inserted.
+ */
+#define x86_do_patch(ins,target) \
+ do { \
+ unsigned char* pos = (ins) + 1; \
+ int disp, size = 0; \
+ switch (*(unsigned char*)(ins)) { \
+ case 0xe8: case 0xe9: ++size; break; /* call, jump32 */ \
+ case 0x0f: if (!(*pos >= 0x70 && *pos <= 0x8f)) assert (0); \
+ ++size; ++pos; break; /* prefix for 32-bit disp */ \
+ case 0xe0: case 0xe1: case 0xe2: /* loop */ \
+ case 0xeb: /* jump8 */ \
+ /* conditional jump opcodes */ \
+ case 0x70: case 0x71: case 0x72: case 0x73: \
+ case 0x74: case 0x75: case 0x76: case 0x77: \
+ case 0x78: case 0x79: case 0x7a: case 0x7b: \
+ case 0x7c: case 0x7d: case 0x7e: case 0x7f: \
+ break; \
+ default: assert (0); \
+ } \
+ disp = (target) - pos; \
+ if (size) x86_imm_emit32 (pos, disp - 4); \
+ else if (x86_is_imm8 (disp - 1)) x86_imm_emit8 (pos, disp - 1); \
+ else assert (0); \
+ } while (0)
+
+#if defined( __native_client_codegen__ ) && defined(TARGET_X86)
+
+#define x86_skip_nops(inst) \
+ do { \
+ int in_nop = 0; \
+ do { \
+ in_nop = 0; \
+ if (inst[0] == 0x90) { \
+ in_nop = 1; \
+ inst += 1; \
+ } \
+ if (inst[0] == 0x8b && inst[1] == 0xc0) { \
+ in_nop = 1; \
+ inst += 2; \
+ } \
+ if (inst[0] == 0x8d && inst[1] == 0x6d \
+ && inst[2] == 0x00) { \
+ in_nop = 1; \
+ inst += 3; \
+ } \
+ if (inst[0] == 0x8d && inst[1] == 0x64 \
+ && inst[2] == 0x24 && inst[3] == 0x00) { \
+ in_nop = 1; \
+ inst += 4; \
+ } \
+ /* skip inst+=5 case because it's the 4-byte + 1-byte case */ \
+ if (inst[0] == 0x8d && inst[1] == 0xad \
+ && inst[2] == 0x00 && inst[3] == 0x00 \
+ && inst[4] == 0x00 && inst[5] == 0x00) { \
+ in_nop = 1; \
+ inst += 6; \
+ } \
+ if (inst[0] == 0x8d && inst[1] == 0xa4 \
+ && inst[2] == 0x24 && inst[3] == 0x00 \
+ && inst[4] == 0x00 && inst[5] == 0x00 \
+ && inst[6] == 0x00 ) { \
+ in_nop = 1; \
+ inst += 7; \
+ } \
+ } while ( in_nop ); \
+ } while (0)
+
+#if defined(__native_client__)
+#define x86_patch(ins,target) \
+ do { \
+ unsigned char* inst = (ins); \
+ guint8* new_target = nacl_modify_patch_target((target)); \
+ x86_skip_nops((inst)); \
+ x86_do_patch((inst), new_target); \
+ } while (0)
+#else /* __native_client__ */
+#define x86_patch(ins,target) \
+ do { \
+ unsigned char* inst = (ins); \
+ guint8* new_target = (target); \
+ x86_skip_nops((inst)); \
+ x86_do_patch((inst), new_target); \
+ } while (0)
+#endif /* __native_client__ */
+
+#else
+#define x86_patch(ins,target) do { x86_do_patch((ins), (target)); } while (0)
+#endif /* __native_client_codegen__ */
+
+#ifdef __native_client_codegen__
+/* The breakpoint instruction is illegal in Native Client, although the HALT */
+/* instruction is allowed. The breakpoint is used several places in mini-x86.c */
+/* and exceptions-x86.c. */
+#define x86_breakpoint(inst) \
+ do { \
+ *(inst)++ = 0xf4; \
+ } while (0)
+#else
+#define x86_breakpoint(inst) \
+ do { \
+ *(inst)++ = 0xcc; \
+ } while (0)
+#endif
+
+#define x86_cld(inst) do { *(inst)++ =(unsigned char)0xfc; } while (0)
+#define x86_stosb(inst) do { *(inst)++ =(unsigned char)0xaa; } while (0)
+#define x86_stosl(inst) do { *(inst)++ =(unsigned char)0xab; } while (0)
+#define x86_stosd(inst) x86_stosl((inst))
+#define x86_movsb(inst) do { *(inst)++ =(unsigned char)0xa4; } while (0)
+#define x86_movsl(inst) do { *(inst)++ =(unsigned char)0xa5; } while (0)
+#define x86_movsd(inst) x86_movsl((inst))
+
+#if defined(__default_codegen__)
+#define x86_prefix(inst,p) \
+ do { \
+ *(inst)++ =(unsigned char) (p); \
+ } while (0)
+#elif defined(__native_client_codegen__)
+#if defined(TARGET_X86)
+/* kNaClAlignment - 1 is the max value we can pass into x86_codegen_pre. */
+/* This keeps us from having to call x86_codegen_pre with specific */
+/* knowledge of the size of the instruction that follows it, and */
+/* localizes the alignment requirement to this spot. */
+#define x86_prefix(inst,p) \
+ do { \
+ x86_codegen_pre(&(inst), kNaClAlignment - 1); \
+ *(inst)++ =(unsigned char) (p); \
+ } while (0)
+#elif defined(TARGET_AMD64)
+/* We need to tag any prefixes so we can perform proper membase sandboxing */
+/* See: mini-amd64.c:amd64_nacl_membase_handler for verbose details */
+#define x86_prefix(inst,p) \
+ do { \
+ amd64_nacl_tag_legacy_prefix((inst)); \
+ *(inst)++ =(unsigned char) (p); \
+ } while (0)
+
+#endif /* TARGET_AMD64 */
+
+#endif /* __native_client_codegen__ */
+
+#define x86_rdtsc(inst) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = 0x0f; \
+ *(inst)++ = 0x31; \
+ } while (0)
+
+#define x86_cmpxchg_reg_reg(inst,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0xb1; \
+ x86_reg_emit ((inst), (reg), (dreg)); \
+ } while (0)
+
+#define x86_cmpxchg_mem_reg(inst,mem,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0xb1; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define x86_cmpxchg_membase_reg(inst,basereg,disp,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0xb1; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_xchg_reg_reg(inst,dreg,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ if ((size) == 1) \
+ *(inst)++ = (unsigned char)0x86; \
+ else \
+ *(inst)++ = (unsigned char)0x87; \
+ x86_reg_emit ((inst), (reg), (dreg)); \
+ } while (0)
+
+#define x86_xchg_mem_reg(inst,mem,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ if ((size) == 1) \
+ *(inst)++ = (unsigned char)0x86; \
+ else \
+ *(inst)++ = (unsigned char)0x87; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define x86_xchg_membase_reg(inst,basereg,disp,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ if ((size) == 1) \
+ *(inst)++ = (unsigned char)0x86; \
+ else \
+ *(inst)++ = (unsigned char)0x87; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_xadd_reg_reg(inst,dreg,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x0F; \
+ if ((size) == 1) \
+ *(inst)++ = (unsigned char)0xC0; \
+ else \
+ *(inst)++ = (unsigned char)0xC1; \
+ x86_reg_emit ((inst), (reg), (dreg)); \
+ } while (0)
+
+#define x86_xadd_mem_reg(inst,mem,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char)0x0F; \
+ if ((size) == 1) \
+ *(inst)++ = (unsigned char)0xC0; \
+ else \
+ *(inst)++ = (unsigned char)0xC1; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define x86_xadd_membase_reg(inst,basereg,disp,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0F; \
+ if ((size) == 1) \
+ *(inst)++ = (unsigned char)0xC0; \
+ else \
+ *(inst)++ = (unsigned char)0xC1; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_inc_mem(inst,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_mem_emit ((inst), 0, (mem)); \
+ } while (0)
+
+#define x86_inc_membase(inst,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_membase_emit ((inst), 0, (basereg), (disp)); \
+ } while (0)
+
+#define x86_inc_reg(inst,reg) do { *(inst)++ = (unsigned char)0x40 + (reg); } while (0)
+
+#define x86_dec_mem(inst,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_mem_emit ((inst), 1, (mem)); \
+ } while (0)
+
+#define x86_dec_membase(inst,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_membase_emit ((inst), 1, (basereg), (disp)); \
+ } while (0)
+
+#define x86_dec_reg(inst,reg) do { *(inst)++ = (unsigned char)0x48 + (reg); } while (0)
+
+#define x86_not_mem(inst,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_mem_emit ((inst), 2, (mem)); \
+ } while (0)
+
+#define x86_not_membase(inst,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_membase_emit ((inst), 2, (basereg), (disp)); \
+ } while (0)
+
+#define x86_not_reg(inst,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_reg_emit ((inst), 2, (reg)); \
+ } while (0)
+
+#define x86_neg_mem(inst,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_mem_emit ((inst), 3, (mem)); \
+ } while (0)
+
+#define x86_neg_membase(inst,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_membase_emit ((inst), 3, (basereg), (disp)); \
+ } while (0)
+
+#define x86_neg_reg(inst,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_reg_emit ((inst), 3, (reg)); \
+ } while (0)
+
+#define x86_nop(inst) do { *(inst)++ = (unsigned char)0x90; } while (0)
+
+#define x86_alu_reg_imm(inst,opc,reg,imm) \
+ do { \
+ if ((reg) == X86_EAX) { \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (((unsigned char)(opc)) << 3) + 5; \
+ x86_imm_emit32 ((inst), (imm)); \
+ break; \
+ } \
+ if (x86_is_imm8((imm))) { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x83; \
+ x86_reg_emit ((inst), (opc), (reg)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0x81; \
+ x86_reg_emit ((inst), (opc), (reg)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_alu_mem_imm(inst,opc,mem,imm) \
+ do { \
+ if (x86_is_imm8((imm))) { \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char)0x83; \
+ x86_mem_emit ((inst), (opc), (mem)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else { \
+ x86_codegen_pre(&(inst), 10); \
+ *(inst)++ = (unsigned char)0x81; \
+ x86_mem_emit ((inst), (opc), (mem)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_alu_membase_imm(inst,opc,basereg,disp,imm) \
+ do { \
+ if (x86_is_imm8((imm))) { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x83; \
+ x86_membase_emit ((inst), (opc), (basereg), (disp)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else { \
+ x86_codegen_pre(&(inst), 5 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x81; \
+ x86_membase_emit ((inst), (opc), (basereg), (disp)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_alu_membase8_imm(inst,opc,basereg,disp,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x80; \
+ x86_membase_emit ((inst), (opc), (basereg), (disp)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } while (0)
+
+#define x86_alu_mem_reg(inst,opc,mem,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (((unsigned char)(opc)) << 3) + 1; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define x86_alu_membase_reg(inst,opc,basereg,disp,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (((unsigned char)(opc)) << 3) + 1; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_alu_reg_reg(inst,opc,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ } while (0)
+
+/**
+ * @x86_alu_reg8_reg8:
+ * Supports ALU operations between two 8-bit registers.
+ * dreg := dreg opc reg
+ * X86_Reg_No enum is used to specify the registers.
+ * Additionally is_*_h flags are used to specify what part
+ * of a given 32-bit register is used - high (TRUE) or low (FALSE).
+ * For example: dreg = X86_EAX, is_dreg_h = TRUE -> use AH
+ */
+#define x86_alu_reg8_reg8(inst,opc,dreg,reg,is_dreg_h,is_reg_h) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (((unsigned char)(opc)) << 3) + 2; \
+ x86_reg8_emit ((inst), (dreg), (reg), (is_dreg_h), (is_reg_h)); \
+ } while (0)
+
+#define x86_alu_reg_mem(inst,opc,reg,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define x86_alu_reg_membase(inst,opc,reg,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_test_reg_imm(inst,reg,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ if ((reg) == X86_EAX) { \
+ *(inst)++ = (unsigned char)0xa9; \
+ } else { \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_reg_emit ((inst), 0, (reg)); \
+ } \
+ x86_imm_emit32 ((inst), (imm)); \
+ } while (0)
+
+#define x86_test_mem_imm(inst,mem,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 10); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_mem_emit ((inst), 0, (mem)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } while (0)
+
+#define x86_test_membase_imm(inst,basereg,disp,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 5 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_membase_emit ((inst), 0, (basereg), (disp)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } while (0)
+
+#define x86_test_reg_reg(inst,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0x85; \
+ x86_reg_emit ((inst), (reg), (dreg)); \
+ } while (0)
+
+#define x86_test_mem_reg(inst,mem,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0x85; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define x86_test_membase_reg(inst,basereg,disp,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x85; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_shift_reg_imm(inst,opc,reg,imm) \
+ do { \
+ if ((imm) == 1) { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xd1; \
+ x86_reg_emit ((inst), (opc), (reg)); \
+ } else { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0xc1; \
+ x86_reg_emit ((inst), (opc), (reg)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_shift_mem_imm(inst,opc,mem,imm) \
+ do { \
+ if ((imm) == 1) { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xd1; \
+ x86_mem_emit ((inst), (opc), (mem)); \
+ } else { \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char)0xc1; \
+ x86_mem_emit ((inst), (opc), (mem)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_shift_membase_imm(inst,opc,basereg,disp,imm) \
+ do { \
+ if ((imm) == 1) { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xd1; \
+ x86_membase_emit ((inst), (opc), (basereg), (disp)); \
+ } else { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xc1; \
+ x86_membase_emit ((inst), (opc), (basereg), (disp)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_shift_reg(inst,opc,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xd3; \
+ x86_reg_emit ((inst), (opc), (reg)); \
+ } while (0)
+
+#define x86_shift_mem(inst,opc,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xd3; \
+ x86_mem_emit ((inst), (opc), (mem)); \
+ } while (0)
+
+#define x86_shift_membase(inst,opc,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xd3; \
+ x86_membase_emit ((inst), (opc), (basereg), (disp)); \
+ } while (0)
+
+/*
+ * Multi op shift missing.
+ */
+
+#define x86_shrd_reg(inst,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0xad; \
+ x86_reg_emit ((inst), (reg), (dreg)); \
+ } while (0)
+
+#define x86_shrd_reg_imm(inst,dreg,reg,shamt) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0xac; \
+ x86_reg_emit ((inst), (reg), (dreg)); \
+ x86_imm_emit8 ((inst), (shamt)); \
+ } while (0)
+
+#define x86_shld_reg(inst,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0xa5; \
+ x86_reg_emit ((inst), (reg), (dreg)); \
+ } while (0)
+
+#define x86_shld_reg_imm(inst,dreg,reg,shamt) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0xa4; \
+ x86_reg_emit ((inst), (reg), (dreg)); \
+ x86_imm_emit8 ((inst), (shamt)); \
+ } while (0)
+
+/*
+ * EDX:EAX = EAX * rm
+ */
+#define x86_mul_reg(inst,reg,is_signed) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_reg_emit ((inst), 4 + ((is_signed) ? 1 : 0), (reg)); \
+ } while (0)
+
+#define x86_mul_mem(inst,mem,is_signed) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_mem_emit ((inst), 4 + ((is_signed) ? 1 : 0), (mem)); \
+ } while (0)
+
+#define x86_mul_membase(inst,basereg,disp,is_signed) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_membase_emit ((inst), 4 + ((is_signed) ? 1 : 0), (basereg), (disp)); \
+ } while (0)
+
+/*
+ * r *= rm
+ */
+#define x86_imul_reg_reg(inst,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0xaf; \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ } while (0)
+
+#define x86_imul_reg_mem(inst,reg,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0xaf; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define x86_imul_reg_membase(inst,reg,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0xaf; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+/*
+ * dreg = rm * imm
+ */
+#define x86_imul_reg_reg_imm(inst,dreg,reg,imm) \
+ do { \
+ if (x86_is_imm8 ((imm))) { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x6b; \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0x69; \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_imul_reg_mem_imm(inst,reg,mem,imm) \
+ do { \
+ if (x86_is_imm8 ((imm))) { \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char)0x6b; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0x69; \
+ x86_reg_emit ((inst), (reg), (mem)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_imul_reg_membase_imm(inst,reg,basereg,disp,imm) \
+ do { \
+ if (x86_is_imm8 ((imm))) { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x6b; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else { \
+ x86_codegen_pre(&(inst), 5 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x69; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ } while (0)
+
+/*
+ * divide EDX:EAX by rm;
+ * eax = quotient, edx = remainder
+ */
+
+#define x86_div_reg(inst,reg,is_signed) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_reg_emit ((inst), 6 + ((is_signed) ? 1 : 0), (reg)); \
+ } while (0)
+
+#define x86_div_mem(inst,mem,is_signed) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_mem_emit ((inst), 6 + ((is_signed) ? 1 : 0), (mem)); \
+ } while (0)
+
+#define x86_div_membase(inst,basereg,disp,is_signed) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_membase_emit ((inst), 6 + ((is_signed) ? 1 : 0), (basereg), (disp)); \
+ } while (0)
+
+#define x86_mov_mem_reg(inst,mem,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 7); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x88; break; \
+ case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \
+ case 4: *(inst)++ = (unsigned char)0x89; break; \
+ default: assert (0); \
+ } \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define x86_mov_regp_reg(inst,regp,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x88; break; \
+ case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \
+ case 4: *(inst)++ = (unsigned char)0x89; break; \
+ default: assert (0); \
+ } \
+ x86_regp_emit ((inst), (reg), (regp)); \
+ } while (0)
+
+#define x86_mov_membase_reg(inst,basereg,disp,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x88; break; \
+ case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \
+ case 4: *(inst)++ = (unsigned char)0x89; break; \
+ default: assert (0); \
+ } \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_mov_memindex_reg(inst,basereg,disp,indexreg,shift,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMemindexEmitPadding); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x88; break; \
+ case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \
+ case 4: *(inst)++ = (unsigned char)0x89; break; \
+ default: assert (0); \
+ } \
+ x86_memindex_emit ((inst), (reg), (basereg), (disp), (indexreg), (shift)); \
+ } while (0)
+
+#define x86_mov_reg_reg(inst,dreg,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x8a; break; \
+ case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \
+ case 4: *(inst)++ = (unsigned char)0x8b; break; \
+ default: assert (0); \
+ } \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ } while (0)
+
+#define x86_mov_reg_mem(inst,reg,mem,size) \
+ do { \
+ x86_codegen_pre(&(inst), 7); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x8a; break; \
+ case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \
+ case 4: *(inst)++ = (unsigned char)0x8b; break; \
+ default: assert (0); \
+ } \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define kMovRegMembasePadding (2 + kMaxMembaseEmitPadding)
+
+#define x86_mov_reg_membase(inst,reg,basereg,disp,size) \
+ do { \
+ x86_codegen_pre(&(inst), kMovRegMembasePadding); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x8a; break; \
+ case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \
+ case 4: *(inst)++ = (unsigned char)0x8b; break; \
+ default: assert (0); \
+ } \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_mov_reg_memindex(inst,reg,basereg,disp,indexreg,shift,size) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMemindexEmitPadding); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x8a; break; \
+ case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \
+ case 4: *(inst)++ = (unsigned char)0x8b; break; \
+ default: assert (0); \
+ } \
+ x86_memindex_emit ((inst), (reg), (basereg), (disp), (indexreg), (shift)); \
+ } while (0)
+
+/*
+ * Note: x86_clear_reg () chacnges the condition code!
+ */
+#define x86_clear_reg(inst,reg) x86_alu_reg_reg((inst), X86_XOR, (reg), (reg))
+
+#define x86_mov_reg_imm(inst,reg,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (unsigned char)0xb8 + (reg); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } while (0)
+
+#define x86_mov_mem_imm(inst,mem,imm,size) \
+ do { \
+ if ((size) == 1) { \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char)0xc6; \
+ x86_mem_emit ((inst), 0, (mem)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else if ((size) == 2) { \
+ x86_codegen_pre(&(inst), 9); \
+ x86_prefix((inst), X86_OPERAND_PREFIX); \
+ *(inst)++ = (unsigned char)0xc7; \
+ x86_mem_emit ((inst), 0, (mem)); \
+ x86_imm_emit16 ((inst), (imm)); \
+ } else { \
+ x86_codegen_pre(&(inst), 10); \
+ *(inst)++ = (unsigned char)0xc7; \
+ x86_mem_emit ((inst), 0, (mem)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_mov_membase_imm(inst,basereg,disp,imm,size) \
+ do { \
+ if ((size) == 1) { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xc6; \
+ x86_membase_emit ((inst), 0, (basereg), (disp)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else if ((size) == 2) { \
+ x86_codegen_pre(&(inst), 4 + kMaxMembaseEmitPadding); \
+ x86_prefix((inst), X86_OPERAND_PREFIX); \
+ *(inst)++ = (unsigned char)0xc7; \
+ x86_membase_emit ((inst), 0, (basereg), (disp)); \
+ x86_imm_emit16 ((inst), (imm)); \
+ } else { \
+ x86_codegen_pre(&(inst), 5 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xc7; \
+ x86_membase_emit ((inst), 0, (basereg), (disp)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_mov_memindex_imm(inst,basereg,disp,indexreg,shift,imm,size) \
+ do { \
+ if ((size) == 1) { \
+ x86_codegen_pre(&(inst), 2 + kMaxMemindexEmitPadding); \
+ *(inst)++ = (unsigned char)0xc6; \
+ x86_memindex_emit ((inst), 0, (basereg), (disp), (indexreg), (shift)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else if ((size) == 2) { \
+ x86_codegen_pre(&(inst), 4 + kMaxMemindexEmitPadding); \
+ x86_prefix((inst), X86_OPERAND_PREFIX); \
+ *(inst)++ = (unsigned char)0xc7; \
+ x86_memindex_emit ((inst), 0, (basereg), (disp), (indexreg), (shift)); \
+ x86_imm_emit16 ((inst), (imm)); \
+ } else { \
+ x86_codegen_pre(&(inst), 5 + kMaxMemindexEmitPadding); \
+ *(inst)++ = (unsigned char)0xc7; \
+ x86_memindex_emit ((inst), 0, (basereg), (disp), (indexreg), (shift)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_lea_mem(inst,reg,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (unsigned char)0x8d; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define x86_lea_membase(inst,reg,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x8d; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_lea_memindex(inst,reg,basereg,disp,indexreg,shift) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMemindexEmitPadding); \
+ *(inst)++ = (unsigned char)0x8d; \
+ x86_memindex_emit ((inst), (reg), (basereg), (disp), (indexreg), (shift)); \
+ } while (0)
+
+#define x86_widen_reg(inst,dreg,reg,is_signed,is_half) \
+ do { \
+ unsigned char op = 0xb6; \
+ g_assert (is_half || X86_IS_BYTE_REG (reg)); \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x0f; \
+ if ((is_signed)) op += 0x08; \
+ if ((is_half)) op += 0x01; \
+ *(inst)++ = op; \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ } while (0)
+
+#define x86_widen_mem(inst,dreg,mem,is_signed,is_half) \
+ do { \
+ unsigned char op = 0xb6; \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char)0x0f; \
+ if ((is_signed)) op += 0x08; \
+ if ((is_half)) op += 0x01; \
+ *(inst)++ = op; \
+ x86_mem_emit ((inst), (dreg), (mem)); \
+ } while (0)
+
+#define x86_widen_membase(inst,dreg,basereg,disp,is_signed,is_half) \
+ do { \
+ unsigned char op = 0xb6; \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0f; \
+ if ((is_signed)) op += 0x08; \
+ if ((is_half)) op += 0x01; \
+ *(inst)++ = op; \
+ x86_membase_emit ((inst), (dreg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_widen_memindex(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half) \
+ do { \
+ unsigned char op = 0xb6; \
+ x86_codegen_pre(&(inst), 2 + kMaxMemindexEmitPadding); \
+ *(inst)++ = (unsigned char)0x0f; \
+ if ((is_signed)) op += 0x08; \
+ if ((is_half)) op += 0x01; \
+ *(inst)++ = op; \
+ x86_memindex_emit ((inst), (dreg), (basereg), (disp), (indexreg), (shift)); \
+ } while (0)
+
+#define x86_cdq(inst) do { *(inst)++ = (unsigned char)0x99; } while (0)
+#define x86_wait(inst) do { *(inst)++ = (unsigned char)0x9b; } while (0)
+
+#define x86_fp_op_mem(inst,opc,mem,is_double) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (is_double) ? (unsigned char)0xdc : (unsigned char)0xd8; \
+ x86_mem_emit ((inst), (opc), (mem)); \
+ } while (0)
+
+#define x86_fp_op_membase(inst,opc,basereg,disp,is_double) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (is_double) ? (unsigned char)0xdc : (unsigned char)0xd8; \
+ x86_membase_emit ((inst), (opc), (basereg), (disp)); \
+ } while (0)
+
+#define x86_fp_op(inst,opc,index) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xd8; \
+ *(inst)++ = (unsigned char)0xc0+((opc)<<3)+((index)&0x07); \
+ } while (0)
+
+#define x86_fp_op_reg(inst,opc,index,pop_stack) \
+ do { \
+ static const unsigned char map[] = { 0, 1, 2, 3, 5, 4, 7, 6, 8}; \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (pop_stack) ? (unsigned char)0xde : (unsigned char)0xdc; \
+ *(inst)++ = (unsigned char)0xc0+(map[(opc)]<<3)+((index)&0x07); \
+ } while (0)
+
+/**
+ * @x86_fp_int_op_membase
+ * Supports FPU operations between ST(0) and integer operand in memory.
+ * Operation encoded using X86_FP_Opcode enum.
+ * Operand is addressed by [basereg + disp].
+ * is_int specifies whether operand is int32 (TRUE) or int16 (FALSE).
+ */
+#define x86_fp_int_op_membase(inst,opc,basereg,disp,is_int) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (is_int) ? (unsigned char)0xda : (unsigned char)0xde; \
+ x86_membase_emit ((inst), opc, (basereg), (disp)); \
+ } while (0)
+
+#define x86_fstp(inst,index) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xdd; \
+ *(inst)++ = (unsigned char)0xd8+(index); \
+ } while (0)
+
+#define x86_fcompp(inst) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xde; \
+ *(inst)++ = (unsigned char)0xd9; \
+ } while (0)
+
+#define x86_fucompp(inst) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xda; \
+ *(inst)++ = (unsigned char)0xe9; \
+ } while (0)
+
+#define x86_fnstsw(inst) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xdf; \
+ *(inst)++ = (unsigned char)0xe0; \
+ } while (0)
+
+#define x86_fnstcw(inst,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xd9; \
+ x86_mem_emit ((inst), 7, (mem)); \
+ } while (0)
+
+#define x86_fnstcw_membase(inst,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xd9; \
+ x86_membase_emit ((inst), 7, (basereg), (disp)); \
+ } while (0)
+
+#define x86_fldcw(inst,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xd9; \
+ x86_mem_emit ((inst), 5, (mem)); \
+ } while (0)
+
+#define x86_fldcw_membase(inst,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xd9; \
+ x86_membase_emit ((inst), 5, (basereg), (disp)); \
+ } while (0)
+
+#define x86_fchs(inst) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xd9; \
+ *(inst)++ = (unsigned char)0xe0; \
+ } while (0)
+
+#define x86_frem(inst) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xd9; \
+ *(inst)++ = (unsigned char)0xf8; \
+ } while (0)
+
+#define x86_fxch(inst,index) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xd9; \
+ *(inst)++ = (unsigned char)0xc8 + ((index) & 0x07); \
+ } while (0)
+
+#define x86_fcomi(inst,index) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xdb; \
+ *(inst)++ = (unsigned char)0xf0 + ((index) & 0x07); \
+ } while (0)
+
+#define x86_fcomip(inst,index) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xdf; \
+ *(inst)++ = (unsigned char)0xf0 + ((index) & 0x07); \
+ } while (0)
+
+#define x86_fucomi(inst,index) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xdb; \
+ *(inst)++ = (unsigned char)0xe8 + ((index) & 0x07); \
+ } while (0)
+
+#define x86_fucomip(inst,index) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xdf; \
+ *(inst)++ = (unsigned char)0xe8 + ((index) & 0x07); \
+ } while (0)
+
+#define x86_fld(inst,mem,is_double) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (is_double) ? (unsigned char)0xdd : (unsigned char)0xd9; \
+ x86_mem_emit ((inst), 0, (mem)); \
+ } while (0)
+
+#define x86_fld_membase(inst,basereg,disp,is_double) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (is_double) ? (unsigned char)0xdd : (unsigned char)0xd9; \
+ x86_membase_emit ((inst), 0, (basereg), (disp)); \
+ } while (0)
+
+#define x86_fld80_mem(inst,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xdb; \
+ x86_mem_emit ((inst), 5, (mem)); \
+ } while (0)
+
+#define x86_fld80_membase(inst,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xdb; \
+ x86_membase_emit ((inst), 5, (basereg), (disp)); \
+ } while (0)
+
+#define x86_fild(inst,mem,is_long) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ if ((is_long)) { \
+ *(inst)++ = (unsigned char)0xdf; \
+ x86_mem_emit ((inst), 5, (mem)); \
+ } else { \
+ *(inst)++ = (unsigned char)0xdb; \
+ x86_mem_emit ((inst), 0, (mem)); \
+ } \
+ } while (0)
+
+#define x86_fild_membase(inst,basereg,disp,is_long) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ if ((is_long)) { \
+ *(inst)++ = (unsigned char)0xdf; \
+ x86_membase_emit ((inst), 5, (basereg), (disp)); \
+ } else { \
+ *(inst)++ = (unsigned char)0xdb; \
+ x86_membase_emit ((inst), 0, (basereg), (disp)); \
+ } \
+ } while (0)
+
+#define x86_fld_reg(inst,index) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xd9; \
+ *(inst)++ = (unsigned char)0xc0 + ((index) & 0x07); \
+ } while (0)
+
+#define x86_fldz(inst) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xd9; \
+ *(inst)++ = (unsigned char)0xee; \
+ } while (0)
+
+#define x86_fld1(inst) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xd9; \
+ *(inst)++ = (unsigned char)0xe8; \
+ } while (0)
+
+#define x86_fldpi(inst) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xd9; \
+ *(inst)++ = (unsigned char)0xeb; \
+ } while (0)
+
+#define x86_fst(inst,mem,is_double,pop_stack) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (is_double) ? (unsigned char)0xdd: (unsigned char)0xd9; \
+ x86_mem_emit ((inst), 2 + ((pop_stack) ? 1 : 0), (mem)); \
+ } while (0)
+
+#define x86_fst_membase(inst,basereg,disp,is_double,pop_stack) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (is_double) ? (unsigned char)0xdd: (unsigned char)0xd9; \
+ x86_membase_emit ((inst), 2 + ((pop_stack) ? 1 : 0), (basereg), (disp)); \
+ } while (0)
+
+#define x86_fst80_mem(inst,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xdb; \
+ x86_mem_emit ((inst), 7, (mem)); \
+ } while (0)
+
+
+#define x86_fst80_membase(inst,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xdb; \
+ x86_membase_emit ((inst), 7, (basereg), (disp)); \
+ } while (0)
+
+
+#define x86_fist_pop(inst,mem,is_long) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ if ((is_long)) { \
+ *(inst)++ = (unsigned char)0xdf; \
+ x86_mem_emit ((inst), 7, (mem)); \
+ } else { \
+ *(inst)++ = (unsigned char)0xdb; \
+ x86_mem_emit ((inst), 3, (mem)); \
+ } \
+ } while (0)
+
+#define x86_fist_pop_membase(inst,basereg,disp,is_long) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ if ((is_long)) { \
+ *(inst)++ = (unsigned char)0xdf; \
+ x86_membase_emit ((inst), 7, (basereg), (disp)); \
+ } else { \
+ *(inst)++ = (unsigned char)0xdb; \
+ x86_membase_emit ((inst), 3, (basereg), (disp)); \
+ } \
+ } while (0)
+
+#define x86_fstsw(inst) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x9b; \
+ *(inst)++ = (unsigned char)0xdf; \
+ *(inst)++ = (unsigned char)0xe0; \
+ } while (0)
+
+/**
+ * @x86_fist_membase
+ * Converts content of ST(0) to integer and stores it at memory location
+ * addressed by [basereg + disp].
+ * is_int specifies whether destination is int32 (TRUE) or int16 (FALSE).
+ */
+#define x86_fist_membase(inst,basereg,disp,is_int) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ if ((is_int)) { \
+ *(inst)++ = (unsigned char)0xdb; \
+ x86_membase_emit ((inst), 2, (basereg), (disp)); \
+ } else { \
+ *(inst)++ = (unsigned char)0xdf; \
+ x86_membase_emit ((inst), 2, (basereg), (disp)); \
+ } \
+ } while (0)
+
+
+#define x86_push_reg(inst,reg) \
+ do { \
+ *(inst)++ = (unsigned char)0x50 + (reg); \
+ } while (0)
+
+#define x86_push_regp(inst,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_regp_emit ((inst), 6, (reg)); \
+ } while (0)
+
+#define x86_push_mem(inst,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_mem_emit ((inst), 6, (mem)); \
+ } while (0)
+
+#define x86_push_membase(inst,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_membase_emit ((inst), 6, (basereg), (disp)); \
+ } while (0)
+
+#define x86_push_memindex(inst,basereg,disp,indexreg,shift) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMemindexEmitPadding); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_memindex_emit ((inst), 6, (basereg), (disp), (indexreg), (shift)); \
+ } while (0)
+
+#define x86_push_imm_template(inst) x86_push_imm (inst, 0xf0f0f0f0)
+
+#define x86_push_imm(inst,imm) \
+ do { \
+ int _imm = (int) (imm); \
+ if (x86_is_imm8 (_imm)) { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0x6A; \
+ x86_imm_emit8 ((inst), (_imm)); \
+ } else { \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (unsigned char)0x68; \
+ x86_imm_emit32 ((inst), (_imm)); \
+ } \
+ } while (0)
+
+#define x86_pop_reg(inst,reg) \
+ do { \
+ *(inst)++ = (unsigned char)0x58 + (reg); \
+ } while (0)
+
+#define x86_pop_mem(inst,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0x87; \
+ x86_mem_emit ((inst), 0, (mem)); \
+ } while (0)
+
+#define x86_pop_membase(inst,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x87; \
+ x86_membase_emit ((inst), 0, (basereg), (disp)); \
+ } while (0)
+
+#define x86_pushad(inst) do { *(inst)++ = (unsigned char)0x60; } while (0)
+#define x86_pushfd(inst) do { *(inst)++ = (unsigned char)0x9c; } while (0)
+#define x86_popad(inst) do { *(inst)++ = (unsigned char)0x61; } while (0)
+#define x86_popfd(inst) do { *(inst)++ = (unsigned char)0x9d; } while (0)
+
+#define x86_loop(inst,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xe2; \
+ x86_imm_emit8 ((inst), (imm)); \
+ } while (0)
+
+#define x86_loope(inst,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xe1; \
+ x86_imm_emit8 ((inst), (imm)); \
+ } while (0)
+
+#define x86_loopne(inst,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xe0; \
+ x86_imm_emit8 ((inst), (imm)); \
+ } while (0)
+
+#if defined(TARGET_X86)
+#define x86_jump32(inst,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (unsigned char)0xe9; \
+ x86_imm_emit32 ((inst), (imm)); \
+ } while (0)
+
+#define x86_jump8(inst,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xeb; \
+ x86_imm_emit8 ((inst), (imm)); \
+ } while (0)
+#elif defined(TARGET_AMD64)
+/* These macros are used directly from mini-amd64.c and other */
+/* amd64 specific files, so they need to be instrumented directly. */
+#define x86_jump32(inst,imm) \
+ do { \
+ amd64_codegen_pre(inst); \
+ *(inst)++ = (unsigned char)0xe9; \
+ x86_imm_emit32 ((inst), (imm)); \
+ amd64_codegen_post(inst); \
+ } while (0)
+
+#define x86_jump8(inst,imm) \
+ do { \
+ amd64_codegen_pre(inst); \
+ *(inst)++ = (unsigned char)0xeb; \
+ x86_imm_emit8 ((inst), (imm)); \
+ amd64_codegen_post(inst); \
+ } while (0)
+#endif
+
+#if defined( __native_client_codegen__ ) && defined( TARGET_X86 )
+#define x86_jump_reg(inst,reg) do { \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (unsigned char)0x83; /* and */ \
+ x86_reg_emit ((inst), 4, (reg)); /* reg */ \
+ *(inst)++ = (unsigned char)nacl_align_byte; \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_reg_emit ((inst), 4, (reg)); \
+ } while (0)
+
+/* Let's hope ECX is available for these... */
+#define x86_jump_mem(inst,mem) do { \
+ x86_mov_reg_mem(inst, (X86_ECX), (mem), 4); \
+ x86_jump_reg(inst, (X86_ECX)); \
+ } while (0)
+
+#define x86_jump_membase(inst,basereg,disp) do { \
+ x86_mov_reg_membase(inst, (X86_ECX), basereg, disp, 4); \
+ x86_jump_reg(inst, (X86_ECX)); \
+ } while (0)
+
+/* like x86_jump_membase, but force a 32-bit displacement */
+#define x86_jump_membase32(inst,basereg,disp) do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0x8b; \
+ x86_address_byte ((inst), 2, X86_ECX, (basereg)); \
+ x86_imm_emit32 ((inst), (disp)); \
+ x86_jump_reg(inst, (X86_ECX)); \
+ } while (0)
+#else /* __native_client_codegen__ */
+#define x86_jump_reg(inst,reg) \
+ do { \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_reg_emit ((inst), 4, (reg)); \
+ } while (0)
+
+#define x86_jump_mem(inst,mem) \
+ do { \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_mem_emit ((inst), 4, (mem)); \
+ } while (0)
+
+#define x86_jump_membase(inst,basereg,disp) \
+ do { \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_membase_emit ((inst), 4, (basereg), (disp)); \
+ } while (0)
+#endif /* __native_client_codegen__ */
+/*
+ * target is a pointer in our buffer.
+ */
+#define x86_jump_code_body(inst,target) \
+ do { \
+ int t; \
+ x86_codegen_pre(&(inst), 2); \
+ t = (unsigned char*)(target) - (inst) - 2; \
+ if (x86_is_imm8(t)) { \
+ x86_jump8 ((inst), t); \
+ } else { \
+ x86_codegen_pre(&(inst), 5); \
+ t = (unsigned char*)(target) - (inst) - 5; \
+ x86_jump32 ((inst), t); \
+ } \
+ } while (0)
+
+#if defined(__default_codegen__)
+#define x86_jump_code(inst,target) \
+ do { \
+ x86_jump_code_body((inst),(target)); \
+ } while (0)
+#elif defined(__native_client_codegen__) && defined(TARGET_X86)
+#define x86_jump_code(inst,target) \
+ do { \
+ guint8* jump_start = (inst); \
+ x86_jump_code_body((inst),(target)); \
+ x86_patch(jump_start, (target)); \
+ } while (0)
+#elif defined(__native_client_codegen__) && defined(TARGET_AMD64)
+#define x86_jump_code(inst,target) \
+ do { \
+ /* jump_code_body is used twice because there are offsets */ \
+ /* calculated based on the IP, which can change after the */ \
+ /* call to amd64_codegen_post */ \
+ amd64_codegen_pre(inst); \
+ x86_jump_code_body((inst),(target)); \
+ inst = amd64_codegen_post(inst); \
+ x86_jump_code_body((inst),(target)); \
+ } while (0)
+#endif /* __native_client_codegen__ */
+
+#define x86_jump_disp(inst,disp) \
+ do { \
+ int t = (disp) - 2; \
+ if (x86_is_imm8(t)) { \
+ x86_jump8 ((inst), t); \
+ } else { \
+ t -= 3; \
+ x86_jump32 ((inst), t); \
+ } \
+ } while (0)
+
+#if defined(TARGET_X86)
+#define x86_branch8(inst,cond,imm,is_signed) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ if ((is_signed)) \
+ *(inst)++ = x86_cc_signed_map [(cond)]; \
+ else \
+ *(inst)++ = x86_cc_unsigned_map [(cond)]; \
+ x86_imm_emit8 ((inst), (imm)); \
+ } while (0)
+
+#define x86_branch32(inst,cond,imm,is_signed) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0x0f; \
+ if ((is_signed)) \
+ *(inst)++ = x86_cc_signed_map [(cond)] + 0x10; \
+ else \
+ *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x10; \
+ x86_imm_emit32 ((inst), (imm)); \
+ } while (0)
+#elif defined(TARGET_AMD64)
+/* These macros are used directly from mini-amd64.c and other */
+/* amd64 specific files, so they need to be instrumented directly. */
+#define x86_branch8(inst,cond,imm,is_signed) \
+ do { \
+ amd64_codegen_pre(inst); \
+ if ((is_signed)) \
+ *(inst)++ = x86_cc_signed_map [(cond)]; \
+ else \
+ *(inst)++ = x86_cc_unsigned_map [(cond)]; \
+ x86_imm_emit8 ((inst), (imm)); \
+ amd64_codegen_post(inst); \
+ } while (0)
+#define x86_branch32(inst,cond,imm,is_signed) \
+ do { \
+ amd64_codegen_pre(inst); \
+ *(inst)++ = (unsigned char)0x0f; \
+ if ((is_signed)) \
+ *(inst)++ = x86_cc_signed_map [(cond)] + 0x10; \
+ else \
+ *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x10; \
+ x86_imm_emit32 ((inst), (imm)); \
+ amd64_codegen_post(inst); \
+ } while (0)
+#endif
+
+#if defined(TARGET_X86)
+#define x86_branch(inst,cond,target,is_signed) \
+ do { \
+ int offset; \
+ guint8* branch_start; \
+ x86_codegen_pre(&(inst), 2); \
+ offset = (target) - (inst) - 2; \
+ branch_start = (inst); \
+ if (x86_is_imm8 ((offset))) \
+ x86_branch8 ((inst), (cond), offset, (is_signed)); \
+ else { \
+ x86_codegen_pre(&(inst), 6); \
+ offset = (target) - (inst) - 6; \
+ x86_branch32 ((inst), (cond), offset, (is_signed)); \
+ } \
+ x86_patch(branch_start, (target)); \
+ } while (0)
+#elif defined(TARGET_AMD64)
+/* This macro is used directly from mini-amd64.c and other */
+/* amd64 specific files, so it needs to be instrumented directly. */
+
+#define x86_branch_body(inst,cond,target,is_signed) \
+ do { \
+ int offset = (target) - (inst) - 2; \
+ if (x86_is_imm8 ((offset))) \
+ x86_branch8 ((inst), (cond), offset, (is_signed)); \
+ else { \
+ offset = (target) - (inst) - 6; \
+ x86_branch32 ((inst), (cond), offset, (is_signed)); \
+ } \
+ } while (0)
+
+#if defined(__default_codegen__)
+#define x86_branch(inst,cond,target,is_signed) \
+ do { \
+ x86_branch_body((inst),(cond),(target),(is_signed)); \
+ } while (0)
+#elif defined(__native_client_codegen__)
+#define x86_branch(inst,cond,target,is_signed) \
+ do { \
+ /* branch_body is used twice because there are offsets */ \
+ /* calculated based on the IP, which can change after */ \
+ /* the call to amd64_codegen_post */ \
+ amd64_codegen_pre(inst); \
+ x86_branch_body((inst),(cond),(target),(is_signed)); \
+ inst = amd64_codegen_post(inst); \
+ x86_branch_body((inst),(cond),(target),(is_signed)); \
+ } while (0)
+#endif /* __native_client_codegen__ */
+
+#endif /* TARGET_AMD64 */
+
+#define x86_branch_disp(inst,cond,disp,is_signed) \
+ do { \
+ int offset = (disp) - 2; \
+ if (x86_is_imm8 ((offset))) \
+ x86_branch8 ((inst), (cond), offset, (is_signed)); \
+ else { \
+ offset -= 4; \
+ x86_branch32 ((inst), (cond), offset, (is_signed)); \
+ } \
+ } while (0)
+
+#define x86_set_reg(inst,cond,reg,is_signed) \
+ do { \
+ g_assert (X86_IS_BYTE_REG (reg)); \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x0f; \
+ if ((is_signed)) \
+ *(inst)++ = x86_cc_signed_map [(cond)] + 0x20; \
+ else \
+ *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x20; \
+ x86_reg_emit ((inst), 0, (reg)); \
+ } while (0)
+
+#define x86_set_mem(inst,cond,mem,is_signed) \
+ do { \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char)0x0f; \
+ if ((is_signed)) \
+ *(inst)++ = x86_cc_signed_map [(cond)] + 0x20; \
+ else \
+ *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x20; \
+ x86_mem_emit ((inst), 0, (mem)); \
+ } while (0)
+
+#define x86_set_membase(inst,cond,basereg,disp,is_signed) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0f; \
+ if ((is_signed)) \
+ *(inst)++ = x86_cc_signed_map [(cond)] + 0x20; \
+ else \
+ *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x20; \
+ x86_membase_emit ((inst), 0, (basereg), (disp)); \
+ } while (0)
+
+#define x86_call_imm_body(inst,disp) \
+ do { \
+ *(inst)++ = (unsigned char)0xe8; \
+ x86_imm_emit32 ((inst), (int)(disp)); \
+ } while (0)
+
+#define x86_call_imm(inst,disp) \
+ do { \
+ x86_call_sequence_pre((inst)); \
+ x86_call_imm_body((inst), (disp)); \
+ x86_call_sequence_post((inst)); \
+ } while (0)
+
+
+#if defined( __native_client_codegen__ ) && defined( TARGET_X86 )
+#define x86_call_reg_internal(inst,reg) \
+ do { \
+ *(inst)++ = (unsigned char)0x83; /* and */ \
+ x86_reg_emit ((inst), 4, (reg)); /* reg */ \
+ *(inst)++ = (unsigned char)nacl_align_byte; \
+ *(inst)++ = (unsigned char)0xff; /* call */ \
+ x86_reg_emit ((inst), 2, (reg)); /* reg */ \
+ } while (0)
+
+#define x86_call_reg(inst, reg) do { \
+ x86_call_sequence_pre((inst)); \
+ x86_call_reg_internal(inst, reg); \
+ x86_call_sequence_post((inst)); \
+ } while (0)
+
+
+/* It appears that x86_call_mem() is never used, so I'm leaving it out. */
+#define x86_call_membase(inst,basereg,disp) do { \
+ x86_call_sequence_pre((inst)); \
+ /* x86_mov_reg_membase() inlined so its fixed size */ \
+ *(inst)++ = (unsigned char)0x8b; \
+ x86_address_byte ((inst), 2, (X86_ECX), (basereg)); \
+ x86_imm_emit32 ((inst), (disp)); \
+ x86_call_reg_internal(inst, X86_ECX); \
+ x86_call_sequence_post((inst)); \
+ } while (0)
+#else /* __native_client_codegen__ */
+#define x86_call_reg(inst,reg) \
+ do { \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_reg_emit ((inst), 2, (reg)); \
+ } while (0)
+
+#define x86_call_mem(inst,mem) \
+ do { \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_mem_emit ((inst), 2, (mem)); \
+ } while (0)
+
+#define x86_call_membase(inst,basereg,disp) \
+ do { \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_membase_emit ((inst), 2, (basereg), (disp)); \
+ } while (0)
+#endif /* __native_client_codegen__ */
+
+
+#if defined( __native_client_codegen__ ) && defined( TARGET_X86 )
+
+#define x86_call_code(inst,target) \
+ do { \
+ int _x86_offset; \
+ guint8* call_start; \
+ guint8* _aligned_start; \
+ x86_call_sequence_pre_val((inst)); \
+ _x86_offset = (unsigned char*)(target) - (inst); \
+ _x86_offset -= 5; \
+ x86_call_imm_body ((inst), _x86_offset); \
+ _aligned_start = x86_call_sequence_post_val((inst)); \
+ call_start = _aligned_start; \
+ _x86_offset = (unsigned char*)(target) - (_aligned_start); \
+ _x86_offset -= 5; \
+ x86_call_imm_body ((_aligned_start), _x86_offset); \
+ x86_patch(call_start, (target)); \
+ } while (0)
+
+#define SIZE_OF_RET 6
+#define x86_ret(inst) do { \
+ *(inst)++ = (unsigned char)0x59; /* pop ecx */ \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (unsigned char)0x83; /* and 0xffffffff, ecx */ \
+ *(inst)++ = (unsigned char)0xe1; \
+ *(inst)++ = (unsigned char)nacl_align_byte; \
+ *(inst)++ = (unsigned char)0xff; /* jmp ecx */ \
+ *(inst)++ = (unsigned char)0xe1; \
+ } while (0)
+
+/* pop return address */
+/* pop imm bytes from stack */
+/* return */
+#define x86_ret_imm(inst,imm) do { \
+ *(inst)++ = (unsigned char)0x59; /* pop ecx */ \
+ x86_alu_reg_imm ((inst), X86_ADD, X86_ESP, imm); \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (unsigned char)0x83; /* and 0xffffffff, ecx */ \
+ *(inst)++ = (unsigned char)0xe1; \
+ *(inst)++ = (unsigned char)nacl_align_byte; \
+ *(inst)++ = (unsigned char)0xff; /* jmp ecx */ \
+ *(inst)++ = (unsigned char)0xe1; \
+} while (0)
+#else /* __native_client_codegen__ */
+
+#define x86_call_code(inst,target) \
+ do { \
+ int _x86_offset; \
+ _x86_offset = (unsigned char*)(target) - (inst); \
+ _x86_offset -= 5; \
+ x86_call_imm_body ((inst), _x86_offset); \
+ } while (0)
+
+#define x86_ret(inst) do { *(inst)++ = (unsigned char)0xc3; } while (0)
+
+#define x86_ret_imm(inst,imm) \
+ do { \
+ if ((imm) == 0) { \
+ x86_ret ((inst)); \
+ } else { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0xc2; \
+ x86_imm_emit16 ((inst), (imm)); \
+ } \
+ } while (0)
+#endif /* __native_client_codegen__ */
+
+#define x86_cmov_reg(inst,cond,is_signed,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char) 0x0f; \
+ if ((is_signed)) \
+ *(inst)++ = x86_cc_signed_map [(cond)] - 0x30; \
+ else \
+ *(inst)++ = x86_cc_unsigned_map [(cond)] - 0x30; \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ } while (0)
+
+#define x86_cmov_mem(inst,cond,is_signed,reg,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char) 0x0f; \
+ if ((is_signed)) \
+ *(inst)++ = x86_cc_signed_map [(cond)] - 0x30; \
+ else \
+ *(inst)++ = x86_cc_unsigned_map [(cond)] - 0x30; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define x86_cmov_membase(inst,cond,is_signed,reg,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char) 0x0f; \
+ if ((is_signed)) \
+ *(inst)++ = x86_cc_signed_map [(cond)] - 0x30; \
+ else \
+ *(inst)++ = x86_cc_unsigned_map [(cond)] - 0x30; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_enter(inst,framesize) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ *(inst)++ = (unsigned char)0xc8; \
+ x86_imm_emit16 ((inst), (framesize)); \
+ *(inst)++ = 0; \
+ } while (0)
+
+#define x86_leave(inst) do { *(inst)++ = (unsigned char)0xc9; } while (0)
+#define x86_sahf(inst) do { *(inst)++ = (unsigned char)0x9e; } while (0)
+
+#define x86_fsin(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xfe; } while (0)
+#define x86_fcos(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xff; } while (0)
+#define x86_fabs(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xe1; } while (0)
+#define x86_ftst(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xe4; } while (0)
+#define x86_fxam(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xe5; } while (0)
+#define x86_fpatan(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf3; } while (0)
+#define x86_fprem(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf8; } while (0)
+#define x86_fprem1(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf5; } while (0)
+#define x86_frndint(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xfc; } while (0)
+#define x86_fsqrt(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xfa; } while (0)
+#define x86_fptan(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf2; } while (0)
+
+#define x86_padding(inst,size) \
+ do { \
+ switch ((size)) { \
+ case 1: x86_nop ((inst)); break; \
+ case 2: *(inst)++ = 0x8b; \
+ *(inst)++ = 0xc0; break; \
+ case 3: *(inst)++ = 0x8d; *(inst)++ = 0x6d; \
+ *(inst)++ = 0x00; break; \
+ case 4: *(inst)++ = 0x8d; *(inst)++ = 0x64; \
+ *(inst)++ = 0x24; *(inst)++ = 0x00; \
+ break; \
+ case 5: *(inst)++ = 0x8d; *(inst)++ = 0x64; \
+ *(inst)++ = 0x24; *(inst)++ = 0x00; \
+ x86_nop ((inst)); break; \
+ case 6: *(inst)++ = 0x8d; *(inst)++ = 0xad; \
+ *(inst)++ = 0x00; *(inst)++ = 0x00; \
+ *(inst)++ = 0x00; *(inst)++ = 0x00; \
+ break; \
+ case 7: *(inst)++ = 0x8d; *(inst)++ = 0xa4; \
+ *(inst)++ = 0x24; *(inst)++ = 0x00; \
+ *(inst)++ = 0x00; *(inst)++ = 0x00; \
+ *(inst)++ = 0x00; break; \
+ default: assert (0); \
+ } \
+ } while (0)
+
+#ifdef __native_client_codegen__
+
+#define kx86NaClLengthOfCallReg 5
+#define kx86NaClLengthOfCallImm 5
+#define kx86NaClLengthOfCallMembase (kx86NaClLengthOfCallReg + 6)
+
+#endif /* __native_client_codegen__ */
+
+#define x86_prolog(inst,frame_size,reg_mask) \
+ do { \
+ unsigned i, m = 1; \
+ x86_enter ((inst), (frame_size)); \
+ for (i = 0; i < X86_NREG; ++i, m <<= 1) { \
+ if ((reg_mask) & m) \
+ x86_push_reg ((inst), i); \
+ } \
+ } while (0)
+
+#define x86_epilog(inst,reg_mask) \
+ do { \
+ unsigned i, m = 1 << X86_EDI; \
+ for (i = X86_EDI; m != 0; i--, m=m>>1) { \
+ if ((reg_mask) & m) \
+ x86_pop_reg ((inst), i); \
+ } \
+ x86_leave ((inst)); \
+ x86_ret ((inst)); \
+ } while (0)
+
+
+typedef enum {
+ X86_SSE_SQRT = 0x51,
+ X86_SSE_RSQRT = 0x52,
+ X86_SSE_RCP = 0x53,
+ X86_SSE_ADD = 0x58,
+ X86_SSE_DIV = 0x5E,
+ X86_SSE_MUL = 0x59,
+ X86_SSE_SUB = 0x5C,
+ X86_SSE_MIN = 0x5D,
+ X86_SSE_MAX = 0x5F,
+ X86_SSE_COMP = 0xC2,
+ X86_SSE_AND = 0x54,
+ X86_SSE_ANDN = 0x55,
+ X86_SSE_OR = 0x56,
+ X86_SSE_XOR = 0x57,
+ X86_SSE_UNPCKL = 0x14,
+ X86_SSE_UNPCKH = 0x15,
+
+ X86_SSE_ADDSUB = 0xD0,
+ X86_SSE_HADD = 0x7C,
+ X86_SSE_HSUB = 0x7D,
+ X86_SSE_MOVSHDUP = 0x16,
+ X86_SSE_MOVSLDUP = 0x12,
+ X86_SSE_MOVDDUP = 0x12,
+
+ X86_SSE_PAND = 0xDB,
+ X86_SSE_POR = 0xEB,
+ X86_SSE_PXOR = 0xEF,
+
+ X86_SSE_PADDB = 0xFC,
+ X86_SSE_PADDW = 0xFD,
+ X86_SSE_PADDD = 0xFE,
+ X86_SSE_PADDQ = 0xD4,
+
+ X86_SSE_PSUBB = 0xF8,
+ X86_SSE_PSUBW = 0xF9,
+ X86_SSE_PSUBD = 0xFA,
+ X86_SSE_PSUBQ = 0xFB,
+
+ X86_SSE_PMAXSB = 0x3C, /*sse41*/
+ X86_SSE_PMAXSW = 0xEE,
+ X86_SSE_PMAXSD = 0x3D, /*sse41*/
+
+ X86_SSE_PMAXUB = 0xDE,
+ X86_SSE_PMAXUW = 0x3E, /*sse41*/
+ X86_SSE_PMAXUD = 0x3F, /*sse41*/
+
+ X86_SSE_PMINSB = 0x38, /*sse41*/
+ X86_SSE_PMINSW = 0xEA,
+ X86_SSE_PMINSD = 0x39,/*sse41*/
+
+ X86_SSE_PMINUB = 0xDA,
+ X86_SSE_PMINUW = 0x3A, /*sse41*/
+ X86_SSE_PMINUD = 0x3B, /*sse41*/
+
+ X86_SSE_PAVGB = 0xE0,
+ X86_SSE_PAVGW = 0xE3,
+
+ X86_SSE_PCMPEQB = 0x74,
+ X86_SSE_PCMPEQW = 0x75,
+ X86_SSE_PCMPEQD = 0x76,
+ X86_SSE_PCMPEQQ = 0x29, /*sse41*/
+
+ X86_SSE_PCMPGTB = 0x64,
+ X86_SSE_PCMPGTW = 0x65,
+ X86_SSE_PCMPGTD = 0x66,
+ X86_SSE_PCMPGTQ = 0x37, /*sse42*/
+
+ X86_SSE_PSADBW = 0xf6,
+
+ X86_SSE_PSHUFD = 0x70,
+
+ X86_SSE_PUNPCKLBW = 0x60,
+ X86_SSE_PUNPCKLWD = 0x61,
+ X86_SSE_PUNPCKLDQ = 0x62,
+ X86_SSE_PUNPCKLQDQ = 0x6C,
+
+ X86_SSE_PUNPCKHBW = 0x68,
+ X86_SSE_PUNPCKHWD = 0x69,
+ X86_SSE_PUNPCKHDQ = 0x6A,
+ X86_SSE_PUNPCKHQDQ = 0x6D,
+
+ X86_SSE_PACKSSWB = 0x63,
+ X86_SSE_PACKSSDW = 0x6B,
+
+ X86_SSE_PACKUSWB = 0x67,
+ X86_SSE_PACKUSDW = 0x2B,/*sse41*/
+
+ X86_SSE_PADDUSB = 0xDC,
+ X86_SSE_PADDUSW = 0xDD,
+ X86_SSE_PSUBUSB = 0xD8,
+ X86_SSE_PSUBUSW = 0xD9,
+
+ X86_SSE_PADDSB = 0xEC,
+ X86_SSE_PADDSW = 0xED,
+ X86_SSE_PSUBSB = 0xE8,
+ X86_SSE_PSUBSW = 0xE9,
+
+ X86_SSE_PMULLW = 0xD5,
+ X86_SSE_PMULLD = 0x40,/*sse41*/
+ X86_SSE_PMULHUW = 0xE4,
+ X86_SSE_PMULHW = 0xE5,
+ X86_SSE_PMULUDQ = 0xF4,
+
+ X86_SSE_PMOVMSKB = 0xD7,
+
+ X86_SSE_PSHIFTW = 0x71,
+ X86_SSE_PSHIFTD = 0x72,
+ X86_SSE_PSHIFTQ = 0x73,
+ X86_SSE_SHR = 2,
+ X86_SSE_SAR = 4,
+ X86_SSE_SHL = 6,
+
+ X86_SSE_PSRLW_REG = 0xD1,
+ X86_SSE_PSRAW_REG = 0xE1,
+ X86_SSE_PSLLW_REG = 0xF1,
+
+ X86_SSE_PSRLD_REG = 0xD2,
+ X86_SSE_PSRAD_REG = 0xE2,
+ X86_SSE_PSLLD_REG = 0xF2,
+
+ X86_SSE_PSRLQ_REG = 0xD3,
+ X86_SSE_PSLLQ_REG = 0xF3,
+
+ X86_SSE_PREFETCH = 0x18,
+ X86_SSE_MOVNTPS = 0x2B,
+ X86_SSE_MOVHPD_REG_MEMBASE = 0x16,
+ X86_SSE_MOVHPD_MEMBASE_REG = 0x17,
+
+ X86_SSE_MOVSD_REG_MEMBASE = 0x10,
+ X86_SSE_MOVSD_MEMBASE_REG = 0x11,
+
+ X86_SSE_PINSRB = 0x20,/*sse41*/
+ X86_SSE_PINSRW = 0xC4,
+ X86_SSE_PINSRD = 0x22,/*sse41*/
+
+ X86_SSE_PEXTRB = 0x14,/*sse41*/
+ X86_SSE_PEXTRW = 0xC5,
+ X86_SSE_PEXTRD = 0x16,/*sse41*/
+
+ X86_SSE_SHUFP = 0xC6,
+
+ X86_SSE_CVTDQ2PD = 0xE6,
+ X86_SSE_CVTDQ2PS = 0x5B,
+ X86_SSE_CVTPD2DQ = 0xE6,
+ X86_SSE_CVTPD2PS = 0x5A,
+ X86_SSE_CVTPS2DQ = 0x5B,
+ X86_SSE_CVTPS2PD = 0x5A,
+ X86_SSE_CVTTPD2DQ = 0xE6,
+ X86_SSE_CVTTPS2DQ = 0x5B,
+} X86_SSE_Opcode;
+
+
+/* minimal SSE* support */
+#define x86_movsd_reg_membase(inst,dreg,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 3 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xf2; \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x10; \
+ x86_membase_emit ((inst), (dreg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_cvttsd2si(inst,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ *(inst)++ = (unsigned char)0xf2; \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x2c; \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ } while (0)
+
+#define x86_sse_alu_reg_reg(inst,opc,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x0F; \
+ *(inst)++ = (unsigned char)(opc); \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ } while (0)
+
+#define x86_sse_alu_reg_membase(inst,opc,sreg,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)(opc); \
+ x86_membase_emit ((inst), (sreg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_sse_alu_membase_reg(inst,opc,basereg,disp,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0F; \
+ *(inst)++ = (unsigned char)(opc); \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_sse_alu_reg_reg_imm8(inst,opc,dreg,reg, imm8) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ *(inst)++ = (unsigned char)0x0F; \
+ *(inst)++ = (unsigned char)(opc); \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ *(inst)++ = (unsigned char)(imm8); \
+ } while (0)
+
+#define x86_sse_alu_pd_reg_reg_imm8(inst,opc,dreg,reg, imm8) \
+ do { \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (unsigned char)0x66; \
+ x86_sse_alu_reg_reg_imm8 ((inst), (opc), (dreg), (reg), (imm8)); \
+ } while (0)
+
+#define x86_sse_alu_pd_reg_reg(inst,opc,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ *(inst)++ = (unsigned char)0x66; \
+ x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \
+ } while (0)
+
+#define x86_sse_alu_pd_membase_reg(inst,opc,basereg,disp,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 3 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x66; \
+ x86_sse_alu_membase_reg ((inst), (opc), (basereg), (disp), (reg)); \
+ } while (0)
+
+#define x86_sse_alu_pd_reg_membase(inst,opc,dreg,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 3 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x66; \
+ x86_sse_alu_reg_membase ((inst), (opc), (dreg),(basereg), (disp)); \
+ } while (0)
+
+#define x86_sse_alu_pd_reg_reg_imm(inst,opc,dreg,reg,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 5); \
+ x86_sse_alu_pd_reg_reg ((inst), (opc), (dreg), (reg)); \
+ *(inst)++ = (unsigned char)(imm); \
+ } while (0)
+
+#define x86_sse_alu_pd_reg_membase_imm(inst,opc,dreg,basereg,disp,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 4 + kMaxMembaseEmitPadding); \
+ x86_sse_alu_pd_reg_membase ((inst), (opc), (dreg),(basereg), (disp)); \
+ *(inst)++ = (unsigned char)(imm); \
+ } while (0)
+
+
+#define x86_sse_alu_ps_reg_reg(inst,opc,dreg,reg) \
+ do { \
+ x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \
+ } while (0)
+
+#define x86_sse_alu_ps_reg_reg_imm(inst,opc,dreg,reg, imm) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \
+ *(inst)++ = (unsigned char)imm; \
+ } while (0)
+
+
+#define x86_sse_alu_sd_reg_reg(inst,opc,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ *(inst)++ = (unsigned char)0xF2; \
+ x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \
+ } while (0)
+
+#define x86_sse_alu_sd_membase_reg(inst,opc,basereg,disp,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 3 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xF2; \
+ x86_sse_alu_membase_reg ((inst), (opc), (basereg), (disp), (reg)); \
+ } while (0)
+
+
+#define x86_sse_alu_ss_reg_reg(inst,opc,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ *(inst)++ = (unsigned char)0xF3; \
+ x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \
+ } while (0)
+
+#define x86_sse_alu_ss_membase_reg(inst,opc,basereg,disp,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 3 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xF3; \
+ x86_sse_alu_membase_reg ((inst), (opc), (basereg), (disp), (reg)); \
+ } while (0)
+
+
+
+#define x86_sse_alu_sse41_reg_reg(inst,opc,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (unsigned char)0x66; \
+ *(inst)++ = (unsigned char)0x0F; \
+ *(inst)++ = (unsigned char)0x38; \
+ *(inst)++ = (unsigned char)(opc); \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ } while (0)
+
+#define x86_movups_reg_membase(inst,sreg,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x10; \
+ x86_membase_emit ((inst), (sreg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_movups_membase_reg(inst,basereg,disp,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x11; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_movaps_reg_membase(inst,sreg,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x28; \
+ x86_membase_emit ((inst), (sreg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_movaps_membase_reg(inst,basereg,disp,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x29; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_movaps_reg_reg(inst,dreg,sreg) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x28; \
+ x86_reg_emit ((inst), (dreg), (sreg)); \
+ } while (0)
+
+
+#define x86_movd_reg_xreg(inst,dreg,sreg) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ *(inst)++ = (unsigned char)0x66; \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x7e; \
+ x86_reg_emit ((inst), (sreg), (dreg)); \
+ } while (0)
+
+#define x86_movd_xreg_reg(inst,dreg,sreg) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ *(inst)++ = (unsigned char)0x66; \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x6e; \
+ x86_reg_emit ((inst), (dreg), (sreg)); \
+ } while (0)
+
+#define x86_movd_xreg_membase(inst,sreg,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 3 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x66; \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x6e; \
+ x86_membase_emit ((inst), (sreg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_pshufw_reg_reg(inst,dreg,sreg,mask,high_words) \
+ do { \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (unsigned char)(high_words) ? 0xF3 : 0xF2; \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x70; \
+ x86_reg_emit ((inst), (dreg), (sreg)); \
+ *(inst)++ = (unsigned char)mask; \
+ } while (0)
+
+#define x86_sse_shift_reg_imm(inst,opc,mode, dreg,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 5); \
+ x86_sse_alu_pd_reg_reg (inst, opc, mode, dreg); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } while (0)
+
+#define x86_sse_shift_reg_reg(inst,opc,dreg,sreg) \
+ do { \
+ x86_sse_alu_pd_reg_reg (inst, opc, dreg, sreg); \
+ } while (0)
+
+
+
+#endif // X86_H
+