[AMDGPU] Preliminary patch for divergence driven instruction selection. Operands...
authorAlexander Timofeev <Alexander.Timofeev@amd.com>
Thu, 30 Aug 2018 13:55:04 +0000 (13:55 +0000)
committerAlexander Timofeev <Alexander.Timofeev@amd.com>
Thu, 30 Aug 2018 13:55:04 +0000 (13:55 +0000)
Reviewers: rampitec

Differential revision: https://reviews/llvm/org/D51316

llvm-svn: 341068

llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir [new file with mode: 0644]

index eed9a4d..d4b64ab 100644 (file)
@@ -438,8 +438,6 @@ void SIFoldOperands::foldOperand(
 
   bool FoldingImm = OpToFold.isImm();
 
-  // In order to fold immediates into copies, we need to change the
-  // copy to a MOV.
   if (FoldingImm && UseMI->isCopy()) {
     unsigned DestReg = UseMI->getOperand(0).getReg();
     const TargetRegisterClass *DestRC
@@ -447,6 +445,31 @@ void SIFoldOperands::foldOperand(
       MRI->getRegClass(DestReg) :
       TRI->getPhysRegClass(DestReg);
 
+    unsigned SrcReg  = UseMI->getOperand(1).getReg();
+    if (TargetRegisterInfo::isVirtualRegister(DestReg) &&
+      TargetRegisterInfo::isVirtualRegister(SrcReg)) {
+      const TargetRegisterClass * SrcRC = MRI->getRegClass(SrcReg);
+      if (TRI->isSGPRClass(SrcRC) && TRI->hasVGPRs(DestRC)) {
+        MachineRegisterInfo::use_iterator NextUse;
+        SmallVector<FoldCandidate, 4> CopyUses;
+        for (MachineRegisterInfo::use_iterator
+          Use = MRI->use_begin(DestReg), E = MRI->use_end();
+          Use != E; Use = NextUse) {
+          NextUse = std::next(Use);
+          FoldCandidate FC = FoldCandidate(Use->getParent(),
+           Use.getOperandNo(), &UseMI->getOperand(1));
+          CopyUses.push_back(FC);
+       }
+        for (auto & F : CopyUses) {
+          foldOperand(*F.OpToFold, F.UseMI, F.UseOpNo,
+           FoldList, CopiesToReplace);
+        }
+      }
+    }
+
+    // In order to fold immediates into copies, we need to change the
+    // copy to a MOV.
+
     unsigned MovOp = TII->getMovOpcode(DestRC);
     if (MovOp == AMDGPU::COPY)
       return;
diff --git a/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir b/llvm/test/CodeGen/AMDGPU/fold-imm-copy.mir
new file mode 100644 (file)
index 0000000..bcdf466
--- /dev/null
@@ -0,0 +1,22 @@
+# RUN: llc -march=amdgcn -run-pass si-fold-operands %s -o - | FileCheck -check-prefix=GCN %s
+
+# GCN-LABEL:       name: fold-imm-copy
+# GCN:             [[SREG:%[0-9+]]]:sreg_32_xm0 = S_MOV_B32 65535
+# GCN:             V_AND_B32_e32 [[SREG]]
+
+name: fold-imm-copy
+body:             |
+  bb.0:
+    liveins: $vgpr0, $sgpr0_sgpr1
+    %0:vgpr_32 = COPY $vgpr0
+    %1:sgpr_64 = COPY $sgpr0_sgpr1
+    %2:sreg_128 = S_LOAD_DWORDX4_IMM %1, 9, 0
+    %3:sreg_32_xm0 = S_MOV_B32 2
+    %4:vgpr_32 = V_LSHLREV_B32_e64 killed %3, %0, implicit $exec
+    %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+    %6:vreg_64 = REG_SEQUENCE killed %4, %subreg.sub0, killed %5, %subreg.sub1
+    %7:vgpr_32 = BUFFER_LOAD_DWORD_ADDR64 %6, %2, 0, 4, 0, 0, 0, implicit $exec
+    %8:sreg_32_xm0 = S_MOV_B32 65535
+    %9:vgpr_32 = COPY %8
+    %10:vgpr_32 = V_AND_B32_e32 %7, %9, implicit $exec
+...