///
MRMSrcMem = 33,
+ /// MRMSrcMem4VOp3 - This form is used for instructions that encode
+ /// operand 3 with VEX.VVVV and load from memory.
+ ///
+ MRMSrcMem4VOp3 = 34,
+
/// MRMSrcMemOp4 - This form is used for instructions that use the Mod/RM
/// byte to specify the fourth source, which in this case is memory.
///
- MRMSrcMemOp4 = 34,
+ MRMSrcMemOp4 = 35,
/// MRMXm - This form is used for instructions that use the Mod/RM byte
/// to specify a memory source, but doesn't use the middle field.
///
MRMSrcReg = 49,
+ /// MRMSrcReg4VOp3 - This form is used for instructions that encode
+ /// operand 3 with VEX.VVVV and do not load from memory.
+ ///
+ MRMSrcReg4VOp3 = 50,
+
/// MRMSrcRegOp4 - This form is used for instructions that use the Mod/RM
/// byte to specify the fourth source, which in this case is a register.
///
- MRMSrcRegOp4 = 50,
+ MRMSrcRegOp4 = 51,
/// MRMXr - This form is used for instructions that use the Mod/RM byte
/// to specify a register source, but doesn't use the middle field.
VEX_4VShift = VEX_WShift + 1,
VEX_4V = 1ULL << VEX_4VShift,
- /// VEX_4VOp3 - Similar to VEX_4V, but used on instructions that encode
- /// operand 3 with VEX.vvvv.
- VEX_4VOp3Shift = VEX_4VShift + 1,
- VEX_4VOp3 = 1ULL << VEX_4VOp3Shift,
-
/// VEX_L - Stands for a bit in the VEX opcode prefix meaning the current
/// instruction uses 256-bit wide registers. This is usually auto detected
/// if a VR256 register is used, but some AVX instructions also have this
/// field marked when using a f256 memory references.
- VEX_LShift = VEX_4VOp3Shift + 1,
+ VEX_LShift = VEX_4VShift + 1,
VEX_L = 1ULL << VEX_LShift,
// EVEX_K - Set if this instruction requires masking
// Start from 1, skip any registers encoded in VEX_VVVV or I8IMM, or a
// mask register.
return 1 + HasVEX_4V + HasEVEX_K;
+ case X86II::MRMSrcMem4VOp3:
+ // Skip registers encoded in reg.
+ return 1 + HasEVEX_K;
case X86II::MRMSrcMemOp4:
// Skip registers encoded in reg, VEX_VVVV, and I8IMM.
return 3;
case X86II::MRMDestReg:
case X86II::MRMSrcReg:
+ case X86II::MRMSrcReg4VOp3:
case X86II::MRMSrcRegOp4:
case X86II::MRMXr:
case X86II::MRM0r: case X86II::MRM1r:
uint64_t Encoding = TSFlags & X86II::EncodingMask;
bool HasEVEX_K = TSFlags & X86II::EVEX_K;
bool HasVEX_4V = TSFlags & X86II::VEX_4V;
- bool HasVEX_4VOp3 = TSFlags & X86II::VEX_4VOp3;
bool HasEVEX_RC = TSFlags & X86II::EVEX_RC;
// VEX_R: opcode externsion equivalent to REX.R in
if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV.
EVEX_V2 = ~(IndexRegEnc >> 4) & 1;
- if (HasVEX_4VOp3)
- // Instruction format for 4VOp3:
- // src1(ModR/M), MemAddr, src3(VEX_4V)
- // CurOp points to start of the MemoryOperand,
- // it skips TIED_TO operands if exist, then increments past src1.
- // CurOp + X86::AddrNumOperands will point to src3.
- VEX_4V = ~getX86RegEncoding(MI, CurOp + X86::AddrNumOperands) & 0xf;
+ break;
+ }
+ case X86II::MRMSrcMem4VOp3: {
+ // Instruction format for 4VOp3:
+ // src1(ModR/M), MemAddr, src3(VEX_4V)
+ unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_R = ~(RegEnc >> 3) & 1;
+
+ unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
+ VEX_B = ~(BaseRegEnc >> 3) & 1;
+ unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg);
+ VEX_X = ~(IndexRegEnc >> 3) & 1;
+
+ VEX_4V = ~getX86RegEncoding(MI, CurOp + X86::AddrNumOperands) & 0xf;
break;
}
case X86II::MRMSrcMemOp4: {
RegEnc = getX86RegEncoding(MI, CurOp++);
VEX_B = ~(RegEnc >> 3) & 1;
VEX_X = ~(RegEnc >> 4) & 1;
- if (HasVEX_4VOp3)
- VEX_4V = ~getX86RegEncoding(MI, CurOp++) & 0xf;
+
if (EVEX_b) {
if (HasEVEX_RC) {
unsigned RcOperand = NumOps-1;
}
break;
}
+ case X86II::MRMSrcReg4VOp3: {
+ // Instruction format for 4VOp3:
+ // src1(ModR/M), src2(ModR/M), src3(VEX_4V)
+ unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_R = ~(RegEnc >> 3) & 1;
+
+ RegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_B = ~(RegEnc >> 3) & 1;
+
+ VEX_4V = ~getX86RegEncoding(MI, CurOp++) & 0xf;
+ break;
+ }
case X86II::MRMSrcRegOp4: {
// dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
// It uses the VEX.VVVV field?
bool HasVEX_4V = TSFlags & X86II::VEX_4V;
- bool HasVEX_4VOp3 = TSFlags & X86II::VEX_4VOp3;
bool HasVEX_I8Reg = (TSFlags & X86II::ImmMask) == X86II::Imm8Reg;
// It uses the EVEX.aaa field?
EmitRegModRMByte(MI.getOperand(SrcRegNum),
GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS);
CurOp = SrcRegNum + 1;
- if (HasVEX_4VOp3)
- ++CurOp;
if (HasVEX_I8Reg)
I8RegNum = getX86RegEncoding(MI, CurOp++);
// do not count the rounding control operand
--NumOps;
break;
}
+ case X86II::MRMSrcReg4VOp3: {
+ EmitByte(BaseOpcode, CurByte, OS);
+ unsigned SrcRegNum = CurOp + 1;
+
+ EmitRegModRMByte(MI.getOperand(SrcRegNum),
+ GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS);
+ CurOp = SrcRegNum + 1;
+ ++CurOp; // Encoded in VEX.VVVV
+ break;
+ }
case X86II::MRMSrcRegOp4: {
EmitByte(BaseOpcode, CurByte, OS);
unsigned SrcRegNum = CurOp + 1;
emitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)),
TSFlags, Rex, CurByte, OS, Fixups, STI);
CurOp = FirstMemOp + X86::AddrNumOperands;
- if (HasVEX_4VOp3)
- ++CurOp;
if (HasVEX_I8Reg)
I8RegNum = getX86RegEncoding(MI, CurOp++);
break;
}
+ case X86II::MRMSrcMem4VOp3: {
+ unsigned FirstMemOp = CurOp+1;
+
+ EmitByte(BaseOpcode, CurByte, OS);
+
+ emitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)),
+ TSFlags, Rex, CurByte, OS, Fixups, STI);
+ CurOp = FirstMemOp + X86::AddrNumOperands;
+ ++CurOp; // Encoded in VEX.VVVV.
+ break;
+ }
case X86II::MRMSrcMemOp4: {
unsigned FirstMemOp = CurOp+1;
def RawFrmDstSrc : Format<6>;
def RawFrmImm8 : Format<7>;
def RawFrmImm16 : Format<8>;
-def MRMDestMem : Format<32>;
-def MRMSrcMem : Format<33>;
-def MRMSrcMemOp4 : Format<34>;
+def MRMDestMem : Format<32>;
+def MRMSrcMem : Format<33>;
+def MRMSrcMem4VOp3 : Format<34>;
+def MRMSrcMemOp4 : Format<35>;
def MRMXm : Format<39>;
def MRM0m : Format<40>; def MRM1m : Format<41>; def MRM2m : Format<42>;
def MRM3m : Format<43>; def MRM4m : Format<44>; def MRM5m : Format<45>;
def MRM6m : Format<46>; def MRM7m : Format<47>;
-def MRMDestReg : Format<48>;
-def MRMSrcReg : Format<49>;
-def MRMSrcRegOp4 : Format<50>;
+def MRMDestReg : Format<48>;
+def MRMSrcReg : Format<49>;
+def MRMSrcReg4VOp3 : Format<50>;
+def MRMSrcRegOp4 : Format<51>;
def MRMXr : Format<55>;
def MRM0r : Format<56>; def MRM1r : Format<57>; def MRM2r : Format<58>;
def MRM3r : Format<59>; def MRM4r : Format<60>; def MRM5r : Format<61>;
class VEX { Encoding OpEnc = EncVEX; }
class VEX_W { bit hasVEX_WPrefix = 1; }
class VEX_4V : VEX { bit hasVEX_4V = 1; }
-class VEX_4VOp3 : VEX { bit hasVEX_4VOp3 = 1; }
class VEX_L { bit hasVEX_L = 1; }
class VEX_LIG { bit ignoresVEX_L = 1; }
class EVEX : VEX { Encoding OpEnc = EncEVEX; }
class Has3DNow0F0FOpcode { bit has3DNow0F0FOpcode = 1; }
class XOP { Encoding OpEnc = EncXOP; }
class XOP_4V : XOP { bit hasVEX_4V = 1; }
-class XOP_4VOp3 : XOP { bit hasVEX_4VOp3 = 1; }
class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
string AsmStr,
bits<2> OpEncBits = OpEnc.Value;
bit hasVEX_WPrefix = 0; // Does this inst set the VEX_W field?
bit hasVEX_4V = 0; // Does this inst require the VEX.VVVV field?
- bit hasVEX_4VOp3 = 0; // Does this inst require the VEX.VVVV field to
- // encode the third operand?
bit hasVEX_L = 0; // Does this inst use large (256-bit) registers?
bit ignoresVEX_L = 0; // Does this instruction ignore the L-bit
bit hasEVEX_K = 0; // Does this inst require masking?
let TSFlags{38-31} = Opcode;
let TSFlags{39} = hasVEX_WPrefix;
let TSFlags{40} = hasVEX_4V;
- let TSFlags{41} = hasVEX_4VOp3;
- let TSFlags{42} = hasVEX_L;
- let TSFlags{43} = hasEVEX_K;
- let TSFlags{44} = hasEVEX_Z;
- let TSFlags{45} = hasEVEX_L2;
- let TSFlags{46} = hasEVEX_B;
+ let TSFlags{41} = hasVEX_L;
+ let TSFlags{42} = hasEVEX_K;
+ let TSFlags{43} = hasEVEX_Z;
+ let TSFlags{44} = hasEVEX_L2;
+ let TSFlags{45} = hasEVEX_B;
// If we run out of TSFlags bits, it's possible to encode this in 3 bits.
- let TSFlags{53-47} = CD8_Scale;
- let TSFlags{54} = has3DNow0F0FOpcode;
- let TSFlags{55} = hasEVEX_RC;
+ let TSFlags{52-46} = CD8_Scale;
+ let TSFlags{53} = has3DNow0F0FOpcode;
+ let TSFlags{54} = hasEVEX_RC;
}
class PseudoI<dag oops, dag iops, list<dag> pattern>
multiclass bmi_bextr_bzhi<bits<8> opc, string mnemonic, RegisterClass RC,
X86MemOperand x86memop, Intrinsic Int,
PatFrag ld_frag> {
- def rr : I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
+ def rr : I<opc, MRMSrcReg4VOp3, (outs RC:$dst), (ins RC:$src1, RC:$src2),
!strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst, (Int RC:$src1, RC:$src2)), (implicit EFLAGS)]>,
- T8PS, VEX_4VOp3;
- def rm : I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src1, RC:$src2),
+ T8PS, VEX;
+ def rm : I<opc, MRMSrcMem4VOp3, (outs RC:$dst), (ins x86memop:$src1, RC:$src2),
!strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set RC:$dst, (Int (ld_frag addr:$src1), RC:$src2)),
- (implicit EFLAGS)]>, T8PS, VEX_4VOp3;
+ (implicit EFLAGS)]>, T8PS, VEX;
}
let Predicates = [HasBMI], Defs = [EFLAGS] in {
// VGATHER - GATHER Operations
multiclass avx2_gather<bits<8> opc, string OpcodeStr, RegisterClass RC256,
X86MemOperand memop128, X86MemOperand memop256> {
- def rm : AVX28I<opc, MRMSrcMem, (outs VR128:$dst, VR128:$mask_wb),
+ def rm : AVX28I<opc, MRMSrcMem4VOp3, (outs VR128:$dst, VR128:$mask_wb),
(ins VR128:$src1, memop128:$src2, VR128:$mask),
!strconcat(OpcodeStr,
"\t{$mask, $src2, $dst|$dst, $src2, $mask}"),
- []>, VEX_4VOp3;
- def Yrm : AVX28I<opc, MRMSrcMem, (outs RC256:$dst, RC256:$mask_wb),
+ []>, VEX;
+ def Yrm : AVX28I<opc, MRMSrcMem4VOp3, (outs RC256:$dst, RC256:$mask_wb),
(ins RC256:$src1, memop256:$src2, RC256:$mask),
!strconcat(OpcodeStr,
"\t{$mask, $src2, $dst|$dst, $src2, $mask}"),
- []>, VEX_4VOp3, VEX_L;
+ []>, VEX, VEX_L;
}
let mayLoad = 1, hasSideEffects = 0, Constraints
multiclass bmi_shift<string asm, RegisterClass RC, X86MemOperand x86memop> {
let hasSideEffects = 0 in {
- def rr : I<0xF7, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
+ def rr : I<0xF7, MRMSrcReg4VOp3, (outs RC:$dst), (ins RC:$src1, RC:$src2),
!strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
- VEX_4VOp3, Sched<[WriteShift]>;
+ VEX, Sched<[WriteShift]>;
let mayLoad = 1 in
- def rm : I<0xF7, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src1, RC:$src2),
+ def rm : I<0xF7, MRMSrcMem4VOp3,
+ (outs RC:$dst), (ins x86memop:$src1, RC:$src2),
!strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
- VEX_4VOp3,
- Sched<[WriteShiftLd,
- // x86memop:$src1
- ReadDefault, ReadDefault, ReadDefault, ReadDefault,
- ReadDefault,
- // RC:$src1
- ReadAfterLd]>;
+ VEX, Sched<[WriteShiftLd,
+ // x86memop:$src1
+ ReadDefault, ReadDefault, ReadDefault, ReadDefault,
+ ReadDefault,
+ // RC:$src1
+ ReadAfterLd]>;
}
}
multiclass xop3op<bits<8> opc, string OpcodeStr, SDNode OpNode,
ValueType vt128> {
- def rr : IXOP<opc, MRMSrcReg, (outs VR128:$dst),
+ def rr : IXOP<opc, MRMSrcReg4VOp3, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR128:$dst,
(vt128 (OpNode (vt128 VR128:$src1), (vt128 VR128:$src2))))]>,
- XOP_4VOp3, Sched<[WriteVarVecShift]>;
+ XOP, Sched<[WriteVarVecShift]>;
def rm : IXOP<opc, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, i128mem:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
(vt128 (OpNode (vt128 VR128:$src1),
(vt128 (bitconvert (loadv2i64 addr:$src2))))))]>,
XOP_4V, VEX_W, Sched<[WriteVarVecShift, ReadAfterLd]>;
- def mr : IXOP<opc, MRMSrcMem, (outs VR128:$dst),
+ def mr : IXOP<opc, MRMSrcMem4VOp3, (outs VR128:$dst),
(ins i128mem:$src1, VR128:$src2),
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
[(set VR128:$dst,
(vt128 (OpNode (vt128 (bitconvert (loadv2i64 addr:$src1))),
(vt128 VR128:$src2))))]>,
- XOP_4VOp3, Sched<[WriteVarVecShift, ReadAfterLd]>;
+ XOP, Sched<[WriteVarVecShift, ReadAfterLd]>;
}
let ExeDomain = SSEPackedInt in {
RawFrmDstSrc = 6,
RawFrmImm8 = 7,
RawFrmImm16 = 8,
- MRMDestMem = 32,
- MRMSrcMem = 33,
- MRMSrcMemOp4 = 34,
+ MRMDestMem = 32,
+ MRMSrcMem = 33,
+ MRMSrcMem4VOp3 = 34,
+ MRMSrcMemOp4 = 35,
MRMXm = 39,
MRM0m = 40, MRM1m = 41, MRM2m = 42, MRM3m = 43,
MRM4m = 44, MRM5m = 45, MRM6m = 46, MRM7m = 47,
- MRMDestReg = 48,
- MRMSrcReg = 49,
- MRMSrcRegOp4 = 50,
+ MRMDestReg = 48,
+ MRMSrcReg = 49,
+ MRMSrcReg4VOp3 = 50,
+ MRMSrcRegOp4 = 51,
MRMXr = 55,
MRM0r = 56, MRM1r = 57, MRM2r = 58, MRM3r = 59,
MRM4r = 60, MRM5r = 61, MRM6r = 62, MRM7r = 63,
AdSize = byteFromRec(Rec, "AdSizeBits");
HasREX_WPrefix = Rec->getValueAsBit("hasREX_WPrefix");
HasVEX_4V = Rec->getValueAsBit("hasVEX_4V");
- HasVEX_4VOp3 = Rec->getValueAsBit("hasVEX_4VOp3");
HasVEX_WPrefix = Rec->getValueAsBit("hasVEX_WPrefix");
IgnoresVEX_L = Rec->getValueAsBit("ignoresVEX_L");
HasEVEX_L2Prefix = Rec->getValueAsBit("hasEVEX_L2");
// Given the set of prefix bits, how many additional operands does the
// instruction have?
unsigned additionalOperands = 0;
- if (HasVEX_4V || HasVEX_4VOp3)
+ if (HasVEX_4V)
++additionalOperands;
if (HasEVEX_K)
++additionalOperands;
HANDLE_OPERAND(vvvvRegister)
HANDLE_OPERAND(rmRegister)
-
- if (HasVEX_4VOp3)
- HANDLE_OPERAND(vvvvRegister)
-
HANDLE_OPTIONAL(immediate)
HANDLE_OPTIONAL(immediate) // above might be a register in 7:4
HANDLE_OPTIONAL(immediate)
break;
+ case X86Local::MRMSrcReg4VOp3:
+ assert(numPhysicalOperands == 3 &&
+ "Unexpected number of operands for MRMSrcRegFrm");
+ HANDLE_OPERAND(roRegister)
+ HANDLE_OPERAND(rmRegister)
+ HANDLE_OPERAND(vvvvRegister)
+ break;
case X86Local::MRMSrcRegOp4:
assert(numPhysicalOperands >= 4 && numPhysicalOperands <= 5 &&
"Unexpected number of operands for MRMSrcRegOp4Frm");
HANDLE_OPERAND(vvvvRegister)
HANDLE_OPERAND(memory)
-
- if (HasVEX_4VOp3)
- HANDLE_OPERAND(vvvvRegister)
-
HANDLE_OPTIONAL(immediate)
HANDLE_OPTIONAL(immediate) // above might be a register in 7:4
break;
+ case X86Local::MRMSrcMem4VOp3:
+ assert(numPhysicalOperands == 3 &&
+ "Unexpected number of operands for MRMSrcMemFrm");
+ HANDLE_OPERAND(roRegister)
+ HANDLE_OPERAND(memory)
+ HANDLE_OPERAND(vvvvRegister)
+ break;
case X86Local::MRMSrcMemOp4:
assert(numPhysicalOperands >= 4 && numPhysicalOperands <= 5 &&
"Unexpected number of operands for MRMSrcMemOp4Frm");
break;
case X86Local::MRMDestReg:
case X86Local::MRMSrcReg:
+ case X86Local::MRMSrcReg4VOp3:
case X86Local::MRMSrcRegOp4:
case X86Local::MRMXr:
filter = new ModFilter(true);
break;
case X86Local::MRMDestMem:
case X86Local::MRMSrcMem:
+ case X86Local::MRMSrcMem4VOp3:
case X86Local::MRMSrcMemOp4:
case X86Local::MRMXm:
filter = new ModFilter(false);
bool HasREX_WPrefix;
/// The hasVEX_4V field from the record
bool HasVEX_4V;
- /// The hasVEX_4VOp3 field from the record
- bool HasVEX_4VOp3;
/// The hasVEX_WPrefix field from the record
bool HasVEX_WPrefix;
/// Inferred from the operands; indicates whether the L bit in the VEX prefix is set