From 80f766a032fca529ddcb78d952ee882536223b3b Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Thu, 10 Sep 2015 01:23:28 +0000 Subject: [PATCH] AMDGPU/SI: Fix more cases of losing exec operands llvm-svn: 247230 --- llvm/lib/Target/AMDGPU/SIInstrFormats.td | 10 ++++------ llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 4 +--- llvm/lib/Target/AMDGPU/SIInstructions.td | 14 +++++++------- 3 files changed, 12 insertions(+), 16 deletions(-) diff --git a/llvm/lib/Target/AMDGPU/SIInstrFormats.td b/llvm/lib/Target/AMDGPU/SIInstrFormats.td index d056212f..d92c7699 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrFormats.td +++ b/llvm/lib/Target/AMDGPU/SIInstrFormats.td @@ -611,15 +611,13 @@ class VINTRPCommon pattern> : // Vector I/O operations //===----------------------------------------------------------------------===// -let Uses = [EXEC] in { - class DS pattern> : InstSI { let LGKM_CNT = 1; let DS = 1; let UseNamedOperandTable = 1; - let Uses = [M0]; + let Uses = [M0, EXEC]; // Most instruction load and store data, so set this as the default. let mayLoad = 1; @@ -636,6 +634,7 @@ class MUBUF pattern> : let VM_CNT = 1; let EXP_CNT = 1; let MUBUF = 1; + let Uses = [EXEC]; let hasSideEffects = 0; let UseNamedOperandTable = 1; @@ -649,6 +648,7 @@ class MTBUF pattern> : let VM_CNT = 1; let EXP_CNT = 1; let MTBUF = 1; + let Uses = [EXEC]; let hasSideEffects = 0; let UseNamedOperandTable = 1; @@ -678,9 +678,7 @@ class MIMG op, dag outs, dag ins, string asm, list pattern> : let VM_CNT = 1; let EXP_CNT = 1; let MIMG = 1; + let Uses = [EXEC]; let hasSideEffects = 0; // XXX ???? } - - -} // End Uses = [EXEC] diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp index bf1c350..f013a1e 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -82,6 +82,7 @@ bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI, switch (MI->getOpcode()) { case AMDGPU::V_MOV_B32_e32: case AMDGPU::V_MOV_B32_e64: + case AMDGPU::V_MOV_B64_PSEUDO: return true; default: return false; @@ -996,9 +997,6 @@ bool SIInstrInfo::FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI, AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); } - UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, - AMDGPU::OpName::src2)); - // ChangingToImmediate adds Src2 back to the instruction. Src2->ChangeToImmediate(Imm); removeModOperands(*UseMI); diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td index bd22e88..f34477e 100644 --- a/llvm/lib/Target/AMDGPU/SIInstructions.td +++ b/llvm/lib/Target/AMDGPU/SIInstructions.td @@ -1367,7 +1367,7 @@ defm V_RSQ_CLAMP_F64 : VOP1InstSI , "v_rsq_clamp_f64", // VINTRP Instructions //===----------------------------------------------------------------------===// -let Uses = [M0] in { +let Uses = [M0, EXEC] in { // FIXME: Specify SchedRW for VINTRP insturctions. @@ -1412,7 +1412,7 @@ defm V_INTERP_MOV_F32 : VINTRP_m < [(set f32:$dst, (AMDGPUinterp_mov (i32 imm:$src0), (i32 imm:$attr_chan), (i32 imm:$attr)))]>; -} // End Uses = [M0] +} // End Uses = [M0, EXEC] //===----------------------------------------------------------------------===// // VOP2 Instructions @@ -1769,7 +1769,7 @@ let SchedRW = [WriteDouble, WriteSALU] in { defm V_DIV_SCALE_F64 : VOP3b_64 , "v_div_scale_f64", []>; } // let SchedRW = [WriteDouble] -let isCommutable = 1, Uses = [VCC] in { +let isCommutable = 1, Uses = [VCC, EXEC] in { let SchedRW = [WriteFloatFMA] in { // v_div_fmas_f32: @@ -1793,7 +1793,7 @@ defm V_DIV_FMAS_F64 : VOP3_VCC_Inst , "v_div_fmas_f64", >; } // End SchedRW = [WriteDouble] -} // End isCommutable = 1 +} // End isCommutable = 1, Uses = [VCC, EXEC] //def V_MSAD_U8 : VOP3_U8 <0x00000171, "v_msad_u8", []>; //def V_QSAD_U8 : VOP3_U8 <0x00000172, "v_qsad_u8", []>; @@ -1842,7 +1842,7 @@ def V_CNDMASK_B64_PSEUDO : VOP3Common <(outs VReg_64:$dst), (ins VSrc_64:$src0, VSrc_64:$src1, SSrc_64:$src2), "", [] >; -let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { +let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [EXEC] in { // 64-bit vector move instruction. This is mainly used by the SIFoldOperands // pass to enable folding of inline immediates. def V_MOV_B64_PSEUDO : InstSI <(outs VReg_64:$dst), (ins VSrc_64:$src0), "", []>; @@ -1984,7 +1984,7 @@ def SI_INDIRECT_DST_V16 : SI_INDIRECT_DST; multiclass SI_SPILL_SGPR { - let UseNamedOperandTable = 1 in { + let UseNamedOperandTable = 1, Uses = [EXEC] in { def _SAVE : InstSI < (outs), (ins sgpr_class:$src, i32imm:$frame_idx, SReg_128:$scratch_rsrc, @@ -2016,7 +2016,7 @@ defm SI_SPILL_S256 : SI_SPILL_SGPR ; defm SI_SPILL_S512 : SI_SPILL_SGPR ; multiclass SI_SPILL_VGPR { - let UseNamedOperandTable = 1, VGPRSpill = 1 in { + let UseNamedOperandTable = 1, VGPRSpill = 1, Uses = [EXEC] in { def _SAVE : InstSI < (outs), (ins vgpr_class:$src, i32imm:$frame_idx, SReg_128:$scratch_rsrc, -- 2.7.4