From 5f8419da34393de830e8d53c93253159cc9a2e3f Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Mon, 22 Aug 2016 07:38:50 +0000 Subject: [PATCH] [X86] Create a new instruction format to handle 4VOp3 encoding. This saves one bit in TSFlags and simplifies MRMSrcMem/MRMSrcReg format handling. llvm-svn: 279424 --- llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h | 25 ++++++--- .../Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp | 63 ++++++++++++++++------ llvm/lib/Target/X86/X86InstrFormats.td | 35 ++++++------ llvm/lib/Target/X86/X86InstrInfo.td | 8 +-- llvm/lib/Target/X86/X86InstrSSE.td | 8 +-- llvm/lib/Target/X86/X86InstrShiftRotate.td | 20 +++---- llvm/lib/Target/X86/X86InstrXOP.td | 8 +-- llvm/utils/TableGen/X86RecognizableInstr.cpp | 41 ++++++++------ llvm/utils/TableGen/X86RecognizableInstr.h | 2 - 9 files changed, 128 insertions(+), 82 deletions(-) diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h b/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h index 2751b6a..6e7bdfe 100644 --- a/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h +++ b/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h @@ -277,10 +277,15 @@ namespace X86II { /// MRMSrcMem = 33, + /// MRMSrcMem4VOp3 - This form is used for instructions that encode + /// operand 3 with VEX.VVVV and load from memory. + /// + MRMSrcMem4VOp3 = 34, + /// MRMSrcMemOp4 - This form is used for instructions that use the Mod/RM /// byte to specify the fourth source, which in this case is memory. /// - MRMSrcMemOp4 = 34, + MRMSrcMemOp4 = 35, /// MRMXm - This form is used for instructions that use the Mod/RM byte /// to specify a memory source, but doesn't use the middle field. @@ -301,10 +306,15 @@ namespace X86II { /// MRMSrcReg = 49, + /// MRMSrcReg4VOp3 - This form is used for instructions that encode + /// operand 3 with VEX.VVVV and do not load from memory. + /// + MRMSrcReg4VOp3 = 50, + /// MRMSrcRegOp4 - This form is used for instructions that use the Mod/RM /// byte to specify the fourth source, which in this case is a register. /// - MRMSrcRegOp4 = 50, + MRMSrcRegOp4 = 51, /// MRMXr - This form is used for instructions that use the Mod/RM byte /// to specify a register source, but doesn't use the middle field. @@ -505,16 +515,11 @@ namespace X86II { VEX_4VShift = VEX_WShift + 1, VEX_4V = 1ULL << VEX_4VShift, - /// VEX_4VOp3 - Similar to VEX_4V, but used on instructions that encode - /// operand 3 with VEX.vvvv. - VEX_4VOp3Shift = VEX_4VShift + 1, - VEX_4VOp3 = 1ULL << VEX_4VOp3Shift, - /// VEX_L - Stands for a bit in the VEX opcode prefix meaning the current /// instruction uses 256-bit wide registers. This is usually auto detected /// if a VR256 register is used, but some AVX instructions also have this /// field marked when using a f256 memory references. - VEX_LShift = VEX_4VOp3Shift + 1, + VEX_LShift = VEX_4VShift + 1, VEX_L = 1ULL << VEX_LShift, // EVEX_K - Set if this instruction requires masking @@ -674,11 +679,15 @@ namespace X86II { // Start from 1, skip any registers encoded in VEX_VVVV or I8IMM, or a // mask register. return 1 + HasVEX_4V + HasEVEX_K; + case X86II::MRMSrcMem4VOp3: + // Skip registers encoded in reg. + return 1 + HasEVEX_K; case X86II::MRMSrcMemOp4: // Skip registers encoded in reg, VEX_VVVV, and I8IMM. return 3; case X86II::MRMDestReg: case X86II::MRMSrcReg: + case X86II::MRMSrcReg4VOp3: case X86II::MRMSrcRegOp4: case X86II::MRMXr: case X86II::MRM0r: case X86II::MRM1r: diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp index 05b905f..0b59a69 100644 --- a/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp @@ -602,7 +602,6 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, uint64_t Encoding = TSFlags & X86II::EncodingMask; bool HasEVEX_K = TSFlags & X86II::EVEX_K; bool HasVEX_4V = TSFlags & X86II::VEX_4V; - bool HasVEX_4VOp3 = TSFlags & X86II::VEX_4VOp3; bool HasEVEX_RC = TSFlags & X86II::EVEX_RC; // VEX_R: opcode externsion equivalent to REX.R in @@ -768,13 +767,20 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV. EVEX_V2 = ~(IndexRegEnc >> 4) & 1; - if (HasVEX_4VOp3) - // Instruction format for 4VOp3: - // src1(ModR/M), MemAddr, src3(VEX_4V) - // CurOp points to start of the MemoryOperand, - // it skips TIED_TO operands if exist, then increments past src1. - // CurOp + X86::AddrNumOperands will point to src3. - VEX_4V = ~getX86RegEncoding(MI, CurOp + X86::AddrNumOperands) & 0xf; + break; + } + case X86II::MRMSrcMem4VOp3: { + // Instruction format for 4VOp3: + // src1(ModR/M), MemAddr, src3(VEX_4V) + unsigned RegEnc = getX86RegEncoding(MI, CurOp++); + VEX_R = ~(RegEnc >> 3) & 1; + + unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg); + VEX_B = ~(BaseRegEnc >> 3) & 1; + unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg); + VEX_X = ~(IndexRegEnc >> 3) & 1; + + VEX_4V = ~getX86RegEncoding(MI, CurOp + X86::AddrNumOperands) & 0xf; break; } case X86II::MRMSrcMemOp4: { @@ -837,8 +843,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, RegEnc = getX86RegEncoding(MI, CurOp++); VEX_B = ~(RegEnc >> 3) & 1; VEX_X = ~(RegEnc >> 4) & 1; - if (HasVEX_4VOp3) - VEX_4V = ~getX86RegEncoding(MI, CurOp++) & 0xf; + if (EVEX_b) { if (HasEVEX_RC) { unsigned RcOperand = NumOps-1; @@ -849,6 +854,18 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, } break; } + case X86II::MRMSrcReg4VOp3: { + // Instruction format for 4VOp3: + // src1(ModR/M), src2(ModR/M), src3(VEX_4V) + unsigned RegEnc = getX86RegEncoding(MI, CurOp++); + VEX_R = ~(RegEnc >> 3) & 1; + + RegEnc = getX86RegEncoding(MI, CurOp++); + VEX_B = ~(RegEnc >> 3) & 1; + + VEX_4V = ~getX86RegEncoding(MI, CurOp++) & 0xf; + break; + } case X86II::MRMSrcRegOp4: { // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M), unsigned RegEnc = getX86RegEncoding(MI, CurOp++); @@ -1157,7 +1174,6 @@ encodeInstruction(const MCInst &MI, raw_ostream &OS, // It uses the VEX.VVVV field? bool HasVEX_4V = TSFlags & X86II::VEX_4V; - bool HasVEX_4VOp3 = TSFlags & X86II::VEX_4VOp3; bool HasVEX_I8Reg = (TSFlags & X86II::ImmMask) == X86II::Imm8Reg; // It uses the EVEX.aaa field? @@ -1337,8 +1353,6 @@ encodeInstruction(const MCInst &MI, raw_ostream &OS, EmitRegModRMByte(MI.getOperand(SrcRegNum), GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS); CurOp = SrcRegNum + 1; - if (HasVEX_4VOp3) - ++CurOp; if (HasVEX_I8Reg) I8RegNum = getX86RegEncoding(MI, CurOp++); // do not count the rounding control operand @@ -1346,6 +1360,16 @@ encodeInstruction(const MCInst &MI, raw_ostream &OS, --NumOps; break; } + case X86II::MRMSrcReg4VOp3: { + EmitByte(BaseOpcode, CurByte, OS); + unsigned SrcRegNum = CurOp + 1; + + EmitRegModRMByte(MI.getOperand(SrcRegNum), + GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS); + CurOp = SrcRegNum + 1; + ++CurOp; // Encoded in VEX.VVVV + break; + } case X86II::MRMSrcRegOp4: { EmitByte(BaseOpcode, CurByte, OS); unsigned SrcRegNum = CurOp + 1; @@ -1376,12 +1400,21 @@ encodeInstruction(const MCInst &MI, raw_ostream &OS, emitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)), TSFlags, Rex, CurByte, OS, Fixups, STI); CurOp = FirstMemOp + X86::AddrNumOperands; - if (HasVEX_4VOp3) - ++CurOp; if (HasVEX_I8Reg) I8RegNum = getX86RegEncoding(MI, CurOp++); break; } + case X86II::MRMSrcMem4VOp3: { + unsigned FirstMemOp = CurOp+1; + + EmitByte(BaseOpcode, CurByte, OS); + + emitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)), + TSFlags, Rex, CurByte, OS, Fixups, STI); + CurOp = FirstMemOp + X86::AddrNumOperands; + ++CurOp; // Encoded in VEX.VVVV. + break; + } case X86II::MRMSrcMemOp4: { unsigned FirstMemOp = CurOp+1; diff --git a/llvm/lib/Target/X86/X86InstrFormats.td b/llvm/lib/Target/X86/X86InstrFormats.td index b790a3d..610756a 100644 --- a/llvm/lib/Target/X86/X86InstrFormats.td +++ b/llvm/lib/Target/X86/X86InstrFormats.td @@ -27,16 +27,18 @@ def RawFrmDst : Format<5>; def RawFrmDstSrc : Format<6>; def RawFrmImm8 : Format<7>; def RawFrmImm16 : Format<8>; -def MRMDestMem : Format<32>; -def MRMSrcMem : Format<33>; -def MRMSrcMemOp4 : Format<34>; +def MRMDestMem : Format<32>; +def MRMSrcMem : Format<33>; +def MRMSrcMem4VOp3 : Format<34>; +def MRMSrcMemOp4 : Format<35>; def MRMXm : Format<39>; def MRM0m : Format<40>; def MRM1m : Format<41>; def MRM2m : Format<42>; def MRM3m : Format<43>; def MRM4m : Format<44>; def MRM5m : Format<45>; def MRM6m : Format<46>; def MRM7m : Format<47>; -def MRMDestReg : Format<48>; -def MRMSrcReg : Format<49>; -def MRMSrcRegOp4 : Format<50>; +def MRMDestReg : Format<48>; +def MRMSrcReg : Format<49>; +def MRMSrcReg4VOp3 : Format<50>; +def MRMSrcRegOp4 : Format<51>; def MRMXr : Format<55>; def MRM0r : Format<56>; def MRM1r : Format<57>; def MRM2r : Format<58>; def MRM3r : Format<59>; def MRM4r : Format<60>; def MRM5r : Format<61>; @@ -199,7 +201,6 @@ class TAXD : TA { Prefix OpPrefix = XD; } class VEX { Encoding OpEnc = EncVEX; } class VEX_W { bit hasVEX_WPrefix = 1; } class VEX_4V : VEX { bit hasVEX_4V = 1; } -class VEX_4VOp3 : VEX { bit hasVEX_4VOp3 = 1; } class VEX_L { bit hasVEX_L = 1; } class VEX_LIG { bit ignoresVEX_L = 1; } class EVEX : VEX { Encoding OpEnc = EncEVEX; } @@ -222,7 +223,6 @@ class EVEX_CD8 { class Has3DNow0F0FOpcode { bit has3DNow0F0FOpcode = 1; } class XOP { Encoding OpEnc = EncXOP; } class XOP_4V : XOP { bit hasVEX_4V = 1; } -class XOP_4VOp3 : XOP { bit hasVEX_4VOp3 = 1; } class X86Inst opcod, Format f, ImmType i, dag outs, dag ins, string AsmStr, @@ -272,8 +272,6 @@ class X86Inst opcod, Format f, ImmType i, dag outs, dag ins, bits<2> OpEncBits = OpEnc.Value; bit hasVEX_WPrefix = 0; // Does this inst set the VEX_W field? bit hasVEX_4V = 0; // Does this inst require the VEX.VVVV field? - bit hasVEX_4VOp3 = 0; // Does this inst require the VEX.VVVV field to - // encode the third operand? bit hasVEX_L = 0; // Does this inst use large (256-bit) registers? bit ignoresVEX_L = 0; // Does this instruction ignore the L-bit bit hasEVEX_K = 0; // Does this inst require masking? @@ -321,16 +319,15 @@ class X86Inst opcod, Format f, ImmType i, dag outs, dag ins, let TSFlags{38-31} = Opcode; let TSFlags{39} = hasVEX_WPrefix; let TSFlags{40} = hasVEX_4V; - let TSFlags{41} = hasVEX_4VOp3; - let TSFlags{42} = hasVEX_L; - let TSFlags{43} = hasEVEX_K; - let TSFlags{44} = hasEVEX_Z; - let TSFlags{45} = hasEVEX_L2; - let TSFlags{46} = hasEVEX_B; + let TSFlags{41} = hasVEX_L; + let TSFlags{42} = hasEVEX_K; + let TSFlags{43} = hasEVEX_Z; + let TSFlags{44} = hasEVEX_L2; + let TSFlags{45} = hasEVEX_B; // If we run out of TSFlags bits, it's possible to encode this in 3 bits. - let TSFlags{53-47} = CD8_Scale; - let TSFlags{54} = has3DNow0F0FOpcode; - let TSFlags{55} = hasEVEX_RC; + let TSFlags{52-46} = CD8_Scale; + let TSFlags{53} = has3DNow0F0FOpcode; + let TSFlags{54} = hasEVEX_RC; } class PseudoI pattern> diff --git a/llvm/lib/Target/X86/X86InstrInfo.td b/llvm/lib/Target/X86/X86InstrInfo.td index 78e39f2..4a890e7 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.td +++ b/llvm/lib/Target/X86/X86InstrInfo.td @@ -2254,14 +2254,14 @@ let Predicates = [HasBMI] in { multiclass bmi_bextr_bzhi opc, string mnemonic, RegisterClass RC, X86MemOperand x86memop, Intrinsic Int, PatFrag ld_frag> { - def rr : I, - T8PS, VEX_4VOp3; - def rm : I, T8PS, VEX_4VOp3; + (implicit EFLAGS)]>, T8PS, VEX; } let Predicates = [HasBMI], Defs = [EFLAGS] in { diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td index aad9132..72879ea 100644 --- a/llvm/lib/Target/X86/X86InstrSSE.td +++ b/llvm/lib/Target/X86/X86InstrSSE.td @@ -8696,16 +8696,16 @@ let Predicates = [HasAVX2, NoVLX] in { // VGATHER - GATHER Operations multiclass avx2_gather opc, string OpcodeStr, RegisterClass RC256, X86MemOperand memop128, X86MemOperand memop256> { - def rm : AVX28I, VEX_4VOp3; - def Yrm : AVX28I, VEX; + def Yrm : AVX28I, VEX_4VOp3, VEX_L; + []>, VEX, VEX_L; } let mayLoad = 1, hasSideEffects = 0, Constraints diff --git a/llvm/lib/Target/X86/X86InstrShiftRotate.td b/llvm/lib/Target/X86/X86InstrShiftRotate.td index c1df978..3b66068 100644 --- a/llvm/lib/Target/X86/X86InstrShiftRotate.td +++ b/llvm/lib/Target/X86/X86InstrShiftRotate.td @@ -873,19 +873,19 @@ let hasSideEffects = 0 in { multiclass bmi_shift { let hasSideEffects = 0 in { - def rr : I<0xF7, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2), + def rr : I<0xF7, MRMSrcReg4VOp3, (outs RC:$dst), (ins RC:$src1, RC:$src2), !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, - VEX_4VOp3, Sched<[WriteShift]>; + VEX, Sched<[WriteShift]>; let mayLoad = 1 in - def rm : I<0xF7, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src1, RC:$src2), + def rm : I<0xF7, MRMSrcMem4VOp3, + (outs RC:$dst), (ins x86memop:$src1, RC:$src2), !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, - VEX_4VOp3, - Sched<[WriteShiftLd, - // x86memop:$src1 - ReadDefault, ReadDefault, ReadDefault, ReadDefault, - ReadDefault, - // RC:$src1 - ReadAfterLd]>; + VEX, Sched<[WriteShiftLd, + // x86memop:$src1 + ReadDefault, ReadDefault, ReadDefault, ReadDefault, + ReadDefault, + // RC:$src1 + ReadAfterLd]>; } } diff --git a/llvm/lib/Target/X86/X86InstrXOP.td b/llvm/lib/Target/X86/X86InstrXOP.td index d498797..b960164 100644 --- a/llvm/lib/Target/X86/X86InstrXOP.td +++ b/llvm/lib/Target/X86/X86InstrXOP.td @@ -85,12 +85,12 @@ let ExeDomain = SSEPackedDouble in { multiclass xop3op opc, string OpcodeStr, SDNode OpNode, ValueType vt128> { - def rr : IXOP, - XOP_4VOp3, Sched<[WriteVarVecShift]>; + XOP, Sched<[WriteVarVecShift]>; def rm : IXOP opc, string OpcodeStr, SDNode OpNode, (vt128 (OpNode (vt128 VR128:$src1), (vt128 (bitconvert (loadv2i64 addr:$src2))))))]>, XOP_4V, VEX_W, Sched<[WriteVarVecShift, ReadAfterLd]>; - def mr : IXOP, - XOP_4VOp3, Sched<[WriteVarVecShift, ReadAfterLd]>; + XOP, Sched<[WriteVarVecShift, ReadAfterLd]>; } let ExeDomain = SSEPackedInt in { diff --git a/llvm/utils/TableGen/X86RecognizableInstr.cpp b/llvm/utils/TableGen/X86RecognizableInstr.cpp index c30ef65..4736c4e 100644 --- a/llvm/utils/TableGen/X86RecognizableInstr.cpp +++ b/llvm/utils/TableGen/X86RecognizableInstr.cpp @@ -100,15 +100,17 @@ namespace X86Local { RawFrmDstSrc = 6, RawFrmImm8 = 7, RawFrmImm16 = 8, - MRMDestMem = 32, - MRMSrcMem = 33, - MRMSrcMemOp4 = 34, + MRMDestMem = 32, + MRMSrcMem = 33, + MRMSrcMem4VOp3 = 34, + MRMSrcMemOp4 = 35, MRMXm = 39, MRM0m = 40, MRM1m = 41, MRM2m = 42, MRM3m = 43, MRM4m = 44, MRM5m = 45, MRM6m = 46, MRM7m = 47, - MRMDestReg = 48, - MRMSrcReg = 49, - MRMSrcRegOp4 = 50, + MRMDestReg = 48, + MRMSrcReg = 49, + MRMSrcReg4VOp3 = 50, + MRMSrcRegOp4 = 51, MRMXr = 55, MRM0r = 56, MRM1r = 57, MRM2r = 58, MRM3r = 59, MRM4r = 60, MRM5r = 61, MRM6r = 62, MRM7r = 63, @@ -201,7 +203,6 @@ RecognizableInstr::RecognizableInstr(DisassemblerTables &tables, AdSize = byteFromRec(Rec, "AdSizeBits"); HasREX_WPrefix = Rec->getValueAsBit("hasREX_WPrefix"); HasVEX_4V = Rec->getValueAsBit("hasVEX_4V"); - HasVEX_4VOp3 = Rec->getValueAsBit("hasVEX_4VOp3"); HasVEX_WPrefix = Rec->getValueAsBit("hasVEX_WPrefix"); IgnoresVEX_L = Rec->getValueAsBit("ignoresVEX_L"); HasEVEX_L2Prefix = Rec->getValueAsBit("hasEVEX_L2"); @@ -553,7 +554,7 @@ void RecognizableInstr::emitInstructionSpecifier() { // Given the set of prefix bits, how many additional operands does the // instruction have? unsigned additionalOperands = 0; - if (HasVEX_4V || HasVEX_4VOp3) + if (HasVEX_4V) ++additionalOperands; if (HasEVEX_K) ++additionalOperands; @@ -655,14 +656,17 @@ void RecognizableInstr::emitInstructionSpecifier() { HANDLE_OPERAND(vvvvRegister) HANDLE_OPERAND(rmRegister) - - if (HasVEX_4VOp3) - HANDLE_OPERAND(vvvvRegister) - HANDLE_OPTIONAL(immediate) HANDLE_OPTIONAL(immediate) // above might be a register in 7:4 HANDLE_OPTIONAL(immediate) break; + case X86Local::MRMSrcReg4VOp3: + assert(numPhysicalOperands == 3 && + "Unexpected number of operands for MRMSrcRegFrm"); + HANDLE_OPERAND(roRegister) + HANDLE_OPERAND(rmRegister) + HANDLE_OPERAND(vvvvRegister) + break; case X86Local::MRMSrcRegOp4: assert(numPhysicalOperands >= 4 && numPhysicalOperands <= 5 && "Unexpected number of operands for MRMSrcRegOp4Frm"); @@ -693,13 +697,16 @@ void RecognizableInstr::emitInstructionSpecifier() { HANDLE_OPERAND(vvvvRegister) HANDLE_OPERAND(memory) - - if (HasVEX_4VOp3) - HANDLE_OPERAND(vvvvRegister) - HANDLE_OPTIONAL(immediate) HANDLE_OPTIONAL(immediate) // above might be a register in 7:4 break; + case X86Local::MRMSrcMem4VOp3: + assert(numPhysicalOperands == 3 && + "Unexpected number of operands for MRMSrcMemFrm"); + HANDLE_OPERAND(roRegister) + HANDLE_OPERAND(memory) + HANDLE_OPERAND(vvvvRegister) + break; case X86Local::MRMSrcMemOp4: assert(numPhysicalOperands >= 4 && numPhysicalOperands <= 5 && "Unexpected number of operands for MRMSrcMemOp4Frm"); @@ -853,12 +860,14 @@ void RecognizableInstr::emitDecodePath(DisassemblerTables &tables) const { break; case X86Local::MRMDestReg: case X86Local::MRMSrcReg: + case X86Local::MRMSrcReg4VOp3: case X86Local::MRMSrcRegOp4: case X86Local::MRMXr: filter = new ModFilter(true); break; case X86Local::MRMDestMem: case X86Local::MRMSrcMem: + case X86Local::MRMSrcMem4VOp3: case X86Local::MRMSrcMemOp4: case X86Local::MRMXm: filter = new ModFilter(false); diff --git a/llvm/utils/TableGen/X86RecognizableInstr.h b/llvm/utils/TableGen/X86RecognizableInstr.h index 402fd86..2e61158 100644 --- a/llvm/utils/TableGen/X86RecognizableInstr.h +++ b/llvm/utils/TableGen/X86RecognizableInstr.h @@ -55,8 +55,6 @@ private: bool HasREX_WPrefix; /// The hasVEX_4V field from the record bool HasVEX_4V; - /// The hasVEX_4VOp3 field from the record - bool HasVEX_4VOp3; /// The hasVEX_WPrefix field from the record bool HasVEX_WPrefix; /// Inferred from the operands; indicates whether the L bit in the VEX prefix is set -- 2.7.4