AMDGPU/GlobalISel: Add equiv xform for bitcast_fpimm_to_i32
authorMatt Arsenault <Matthew.Arsenault@amd.com>
Tue, 7 Jan 2020 16:45:10 +0000 (11:45 -0500)
committerMatt Arsenault <arsenm2@gmail.com>
Thu, 9 Jan 2020 15:29:31 +0000 (10:29 -0500)
Only partially fixes one pattern import.

llvm/lib/Target/AMDGPU/AMDGPUGISel.td
llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.h

index 0583610..b198bfb 100644 (file)
@@ -207,3 +207,6 @@ def gi_as_i32timm : GICustomOperandRenderer<"renderTruncImm32">,
 
 def gi_NegateImm : GICustomOperandRenderer<"renderNegateImm">,
   GISDNodeXFormEquiv<NegateImm>;
+
+def gi_bitcast_fpimm_to_i32 : GICustomOperandRenderer<"renderBitcastImm">,
+  GISDNodeXFormEquiv<bitcast_fpimm_to_i32>;
index a632e7a..876c7c3 100644 (file)
@@ -2103,6 +2103,17 @@ void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
   MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
 }
 
+void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
+                                                 const MachineInstr &MI) const {
+  const MachineOperand &Op = MI.getOperand(1);
+  if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
+    MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
+  else {
+    assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
+    MIB.addImm(Op.getCImm()->getSExtValue());
+  }
+}
+
 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
   return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
 }
index 8d44c58..1de8a0a 100644 (file)
@@ -172,6 +172,9 @@ private:
   void renderNegateImm(MachineInstrBuilder &MIB,
                        const MachineInstr &MI) const;
 
+  void renderBitcastImm(MachineInstrBuilder &MIB,
+                        const MachineInstr &MI) const;
+
   bool isInlineImmediate16(int64_t Imm) const;
   bool isInlineImmediate32(int64_t Imm) const;
   bool isInlineImmediate64(int64_t Imm) const;