From 4f6318fe1bff062adf3ee99d7c78f98573770df2 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Mon, 6 Nov 2017 17:04:37 +0000 Subject: [PATCH] AMDGPU: Select v_mad_u64_u32 and v_mad_i64_i32 llvm-svn: 317492 --- llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp | 19 +++ llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 29 +++-- llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h | 4 + llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h | 4 + llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 47 ++++++- llvm/test/CodeGen/AMDGPU/mad_64_32.ll | 168 ++++++++++++++++++++++++++ llvm/test/CodeGen/AMDGPU/mul.ll | 125 ++++++++++--------- 7 files changed, 329 insertions(+), 67 deletions(-) create mode 100644 llvm/test/CodeGen/AMDGPU/mad_64_32.ll diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp index c313e4a..f04efd71 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp @@ -204,6 +204,7 @@ private: void SelectADD_SUB_I64(SDNode *N); void SelectUADDO_USUBO(SDNode *N); void SelectDIV_SCALE(SDNode *N); + void SelectMAD_64_32(SDNode *N); void SelectFMA_W_CHAIN(SDNode *N); void SelectFMUL_W_CHAIN(SDNode *N); @@ -594,6 +595,11 @@ void AMDGPUDAGToDAGISel::Select(SDNode *N) { SelectDIV_SCALE(N); return; } + case AMDGPUISD::MAD_I64_I32: + case AMDGPUISD::MAD_U64_U32: { + SelectMAD_64_32(N); + return; + } case ISD::CopyToReg: { const SITargetLowering& Lowering = *static_cast(getTargetLowering()); @@ -814,6 +820,19 @@ void AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) { CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops); } +// We need to handle this here because tablegen doesn't support matching +// instructions with multiple outputs. +void AMDGPUDAGToDAGISel::SelectMAD_64_32(SDNode *N) { + SDLoc SL(N); + bool Signed = N->getOpcode() == AMDGPUISD::MAD_I64_I32; + unsigned Opc = Signed ? AMDGPU::V_MAD_I64_I32 : AMDGPU::V_MAD_U64_U32; + + SDValue Clamp = CurDAG->getTargetConstant(0, SL, MVT::i1); + SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2), + Clamp }; + CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops); +} + bool AMDGPUDAGToDAGISel::isDSOffsetLegal(const SDValue &Base, unsigned Offset, unsigned OffsetBits) const { if ((OffsetBits == 16 && !isUInt<16>(Offset)) || diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp index fe2c933..af22d52 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -151,6 +151,22 @@ bool AMDGPUTargetLowering::isOrEquivalentToAdd(SelectionDAG &DAG, SDValue Op) return false; } +unsigned AMDGPUTargetLowering::numBitsUnsigned(SDValue Op, SelectionDAG &DAG) { + KnownBits Known; + EVT VT = Op.getValueType(); + DAG.computeKnownBits(Op, Known); + + return VT.getSizeInBits() - Known.countMinLeadingZeros(); +} + +unsigned AMDGPUTargetLowering::numBitsSigned(SDValue Op, SelectionDAG &DAG) { + EVT VT = Op.getValueType(); + + // In order for this to be a signed 24-bit value, bit 23, must + // be a sign bit. + return VT.getSizeInBits() - DAG.ComputeNumSignBits(Op); +} + AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM, const AMDGPUSubtarget &STI) : TargetLowering(TM), Subtarget(&STI) { @@ -2615,21 +2631,14 @@ SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, //===----------------------------------------------------------------------===// static bool isU24(SDValue Op, SelectionDAG &DAG) { - KnownBits Known; - EVT VT = Op.getValueType(); - DAG.computeKnownBits(Op, Known); - - return (VT.getSizeInBits() - Known.countMinLeadingZeros()) <= 24; + return AMDGPUTargetLowering::numBitsUnsigned(Op, DAG) <= 24; } static bool isI24(SDValue Op, SelectionDAG &DAG) { EVT VT = Op.getValueType(); - - // In order for this to be a signed 24-bit value, bit 23, must - // be a sign bit. return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated // as unsigned 24-bit values. - (VT.getSizeInBits() - DAG.ComputeNumSignBits(Op)) < 24; + AMDGPUTargetLowering::numBitsSigned(Op, DAG) < 24; } static bool simplifyI24(SDNode *Node24, unsigned OpIdx, @@ -3946,6 +3955,8 @@ const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const { NODE_NAME_CASE(MUL_LOHI_I24) NODE_NAME_CASE(MAD_U24) NODE_NAME_CASE(MAD_I24) + NODE_NAME_CASE(MAD_I64_I32) + NODE_NAME_CASE(MAD_U64_U32) NODE_NAME_CASE(TEXTURE_FETCH) NODE_NAME_CASE(EXPORT) NODE_NAME_CASE(EXPORT_DONE) diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h index cdb1518..dd3cc0a 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h @@ -36,6 +36,8 @@ private: public: static bool isOrEquivalentToAdd(SelectionDAG &DAG, SDValue Op); + static unsigned numBitsUnsigned(SDValue Op, SelectionDAG &DAG); + static unsigned numBitsSigned(SDValue Op, SelectionDAG &DAG); protected: const AMDGPUSubtarget *Subtarget; @@ -379,6 +381,8 @@ enum NodeType : unsigned { MULHI_I24, MAD_U24, MAD_I24, + MAD_U64_U32, + MAD_I64_I32, MUL_LOHI_I24, MUL_LOHI_U24, TEXTURE_FETCH, diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h index 56a5fa6..6ee529c 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h @@ -462,6 +462,10 @@ public: return isAmdHsaOS() || isMesaKernel(MF); } + bool hasMad64_32() const { + return getGeneration() >= SEA_ISLANDS; + } + bool hasFminFmaxLegacy() const { return getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS; } diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 70e21a2..d1120f5 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -5962,18 +5962,57 @@ unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG, return 0; } +static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL, + EVT VT, + SDValue N0, SDValue N1, SDValue N2, + bool Signed) { + unsigned MadOpc = Signed ? AMDGPUISD::MAD_I64_I32 : AMDGPUISD::MAD_U64_U32; + SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i1); + SDValue Mad = DAG.getNode(MadOpc, SL, VTs, N0, N1, N2); + return DAG.getNode(ISD::TRUNCATE, SL, VT, Mad); +} + SDValue SITargetLowering::performAddCombine(SDNode *N, DAGCombinerInfo &DCI) const { SelectionDAG &DAG = DCI.DAG; EVT VT = N->getValueType(0); - - if (VT != MVT::i32) - return SDValue(); - SDLoc SL(N); SDValue LHS = N->getOperand(0); SDValue RHS = N->getOperand(1); + if ((LHS.getOpcode() == ISD::MUL || RHS.getOpcode() == ISD::MUL) + && Subtarget->hasMad64_32() && + !VT.isVector() && VT.getScalarSizeInBits() > 32 && + VT.getScalarSizeInBits() <= 64) { + if (LHS.getOpcode() != ISD::MUL) + std::swap(LHS, RHS); + + SDValue MulLHS = LHS.getOperand(0); + SDValue MulRHS = LHS.getOperand(1); + SDValue AddRHS = RHS; + + // TODO: Maybe restrict if SGPR inputs. + if (numBitsUnsigned(MulLHS, DAG) <= 32 && + numBitsUnsigned(MulRHS, DAG) <= 32) { + MulLHS = DAG.getZExtOrTrunc(MulLHS, SL, MVT::i32); + MulRHS = DAG.getZExtOrTrunc(MulRHS, SL, MVT::i32); + AddRHS = DAG.getZExtOrTrunc(AddRHS, SL, MVT::i64); + return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, false); + } + + if (numBitsSigned(MulLHS, DAG) < 32 && numBitsSigned(MulRHS, DAG) < 32) { + MulLHS = DAG.getSExtOrTrunc(MulLHS, SL, MVT::i32); + MulRHS = DAG.getSExtOrTrunc(MulRHS, SL, MVT::i32); + AddRHS = DAG.getSExtOrTrunc(AddRHS, SL, MVT::i64); + return getMad64_32(DAG, SL, VT, MulLHS, MulRHS, AddRHS, true); + } + + return SDValue(); + } + + if (VT != MVT::i32) + return SDValue(); + // add x, zext (setcc) => addcarry x, 0, setcc // add x, sext (setcc) => subcarry x, 0, setcc unsigned Opc = LHS.getOpcode(); diff --git a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll new file mode 100644 index 0000000..b4d9d92 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll @@ -0,0 +1,168 @@ +; RUN: llc -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,CI %s +; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI %s + +; GCN-LABEL: {{^}}mad_i64_i32_sextops: +; CI: v_mad_i64_i32 v[0:1], s[6:7], v0, v1, v[2:3] + +; SI: v_mul_lo_i32 +; SI: v_mul_hi_i32 +; SI: v_add_i32 +; SI: v_addc_u32 +define i64 @mad_i64_i32_sextops(i32 %arg0, i32 %arg1, i64 %arg2) #0 { + %sext0 = sext i32 %arg0 to i64 + %sext1 = sext i32 %arg1 to i64 + %mul = mul i64 %sext0, %sext1 + %mad = add i64 %mul, %arg2 + ret i64 %mad +} + +; GCN-LABEL: {{^}}mad_i64_i32_sextops_commute: +; CI: v_mad_i64_i32 v[0:1], s[6:7], v0, v1, v[2:3] + +; SI-DAG: v_mul_lo_i32 +; SI-DAG: v_mul_hi_i32 +; SI: v_add_i32 +; SI: v_addc_u32 +define i64 @mad_i64_i32_sextops_commute(i32 %arg0, i32 %arg1, i64 %arg2) #0 { + %sext0 = sext i32 %arg0 to i64 + %sext1 = sext i32 %arg1 to i64 + %mul = mul i64 %sext0, %sext1 + %mad = add i64 %arg2, %mul + ret i64 %mad +} + +; GCN-LABEL: {{^}}mad_u64_u32_zextops: +; CI: v_mad_u64_u32 v[0:1], s[6:7], v0, v1, v[2:3] + +; SI-DAG: v_mul_lo_i32 +; SI-DAG: v_mul_hi_u32 +; SI: v_add_i32 +; SI: v_addc_u32 +define i64 @mad_u64_u32_zextops(i32 %arg0, i32 %arg1, i64 %arg2) #0 { + %sext0 = zext i32 %arg0 to i64 + %sext1 = zext i32 %arg1 to i64 + %mul = mul i64 %sext0, %sext1 + %mad = add i64 %mul, %arg2 + ret i64 %mad +} + +; GCN-LABEL: {{^}}mad_u64_u32_zextops_commute: +; CI: v_mad_u64_u32 v[0:1], s[6:7], v0, v1, v[2:3] + +; SI-DAG: v_mul_lo_i32 +; SI-DAG: v_mul_hi_u32 +; SI: v_add_i32 +; SI: v_addc_u32 +define i64 @mad_u64_u32_zextops_commute(i32 %arg0, i32 %arg1, i64 %arg2) #0 { + %sext0 = zext i32 %arg0 to i64 + %sext1 = zext i32 %arg1 to i64 + %mul = mul i64 %sext0, %sext1 + %mad = add i64 %arg2, %mul + ret i64 %mad +} + + + + + + +; GCN-LABEL: {{^}}mad_i64_i32_sextops_i32_i128: +; CI: v_mad_u64_u32 +; CI: v_mad_u64_u32 +; CI: v_mad_u64_u32 +; CI: v_mad_i64_i32 + +; SI-NOT: v_mad_ +define i128 @mad_i64_i32_sextops_i32_i128(i32 %arg0, i32 %arg1, i128 %arg2) #0 { + %sext0 = sext i32 %arg0 to i128 + %sext1 = sext i32 %arg1 to i128 + %mul = mul i128 %sext0, %sext1 + %mad = add i128 %mul, %arg2 + ret i128 %mad +} + +; GCN-LABEL: {{^}}mad_i64_i32_sextops_i32_i63: +; CI: v_lshl_b64 +; CI: v_ashr +; CI: v_mad_i64_i32 v[0:1], s[6:7], v0, v1, v[2:3] + +; SI-NOT: v_mad_u64_u32 +define i63 @mad_i64_i32_sextops_i32_i63(i32 %arg0, i32 %arg1, i63 %arg2) #0 { + %sext0 = sext i32 %arg0 to i63 + %sext1 = sext i32 %arg1 to i63 + %mul = mul i63 %sext0, %sext1 + %mad = add i63 %mul, %arg2 + ret i63 %mad +} + +; GCN-LABEL: {{^}}mad_i64_i32_sextops_i31_i63: +; CI: v_lshl_b64 +; CI: v_ashr_i64 +; CI: v_bfe_i32 v1, v1, 0, 31 +; CI: v_bfe_i32 v0, v0, 0, 31 +; CI: v_mad_i64_i32 v[0:1], s[6:7], v0, v1, v[2:3] +define i63 @mad_i64_i32_sextops_i31_i63(i31 %arg0, i31 %arg1, i63 %arg2) #0 { + %sext0 = sext i31 %arg0 to i63 + %sext1 = sext i31 %arg1 to i63 + %mul = mul i63 %sext0, %sext1 + %mad = add i63 %mul, %arg2 + ret i63 %mad +} + +; GCN-LABEL: {{^}}mad_u64_u32_bitops: +; CI: v_mad_u64_u32 v[0:1], s[6:7], v0, v2, v[4:5] +define i64 @mad_u64_u32_bitops(i64 %arg0, i64 %arg1, i64 %arg2) #0 { + %trunc.lhs = and i64 %arg0, 4294967295 + %trunc.rhs = and i64 %arg1, 4294967295 + %mul = mul i64 %trunc.lhs, %trunc.rhs + %add = add i64 %mul, %arg2 + ret i64 %add +} + +; GCN-LABEL: {{^}}mad_u64_u32_bitops_lhs_mask_small: +; GCN-NOT: v_mad_ +define i64 @mad_u64_u32_bitops_lhs_mask_small(i64 %arg0, i64 %arg1, i64 %arg2) #0 { + %trunc.lhs = and i64 %arg0, 8589934591 + %trunc.rhs = and i64 %arg1, 4294967295 + %mul = mul i64 %trunc.lhs, %trunc.rhs + %add = add i64 %mul, %arg2 + ret i64 %add +} + +; GCN-LABEL: {{^}}mad_u64_u32_bitops_rhs_mask_small: +; GCN-NOT: v_mad_ +define i64 @mad_u64_u32_bitops_rhs_mask_small(i64 %arg0, i64 %arg1, i64 %arg2) #0 { + %trunc.lhs = and i64 %arg0, 4294967295 + %trunc.rhs = and i64 %arg1, 8589934591 + %mul = mul i64 %trunc.lhs, %trunc.rhs + %add = add i64 %mul, %arg2 + ret i64 %add +} + +; GCN-LABEL: {{^}}mad_i64_i32_bitops: +; CI: v_mad_i64_i32 v[0:1], s[6:7], v0, v2, v[4:5] +; SI-NOT: v_mad_ +define i64 @mad_i64_i32_bitops(i64 %arg0, i64 %arg1, i64 %arg2) #0 { + %shl.lhs = shl i64 %arg0, 32 + %trunc.lhs = ashr i64 %shl.lhs, 32 + %shl.rhs = shl i64 %arg1, 32 + %trunc.rhs = ashr i64 %shl.rhs, 32 + %mul = mul i64 %trunc.lhs, %trunc.rhs + %add = add i64 %mul, %arg2 + ret i64 %add +} + +; Example from bug report +; GCN-LABEL: {{^}}mad_i64_i32_unpack_i64ops: +; CI: v_mad_u64_u32 v[0:1], s[6:7], v1, v0, v[0:1] +; SI-NOT: v_mad_u64_u32 +define i64 @mad_i64_i32_unpack_i64ops(i64 %arg0) #0 { + %tmp4 = lshr i64 %arg0, 32 + %tmp5 = and i64 %arg0, 4294967295 + %mul = mul nuw i64 %tmp4, %tmp5 + %mad = add i64 %mul, %arg0 + ret i64 %mad +} + +attributes #0 = { nounwind } +attributes #1 = { nounwind readnone speculatable } diff --git a/llvm/test/CodeGen/AMDGPU/mul.ll b/llvm/test/CodeGen/AMDGPU/mul.ll index a029078..8839fcd 100644 --- a/llvm/test/CodeGen/AMDGPU/mul.ll +++ b/llvm/test/CodeGen/AMDGPU/mul.ll @@ -1,6 +1,6 @@ -; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s -; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s -; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG %s -check-prefix=FUNC +; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI,FUNC %s +; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI,FUNC %s +; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood < %s | FileCheck -check-prefixes=EG,FUNC %s ; mul24 and mad24 are affected @@ -8,8 +8,8 @@ ; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} ; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} -; SI: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}} -; SI: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}} +; GCN: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}} +; GCN: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}} define amdgpu_kernel void @test_mul_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) { %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1 @@ -26,10 +26,10 @@ define amdgpu_kernel void @test_mul_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32 ; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} ; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} -; SI: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}} -; SI: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}} -; SI: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}} -; SI: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}} +; GCN: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}} +; GCN: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}} +; GCN: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}} +; GCN: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}} define amdgpu_kernel void @v_mul_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) { %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1 @@ -41,10 +41,10 @@ define amdgpu_kernel void @v_mul_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> a } ; FUNC-LABEL: {{^}}s_trunc_i64_mul_to_i32: -; SI: s_load_dword -; SI: s_load_dword -; SI: s_mul_i32 -; SI: buffer_store_dword +; GCN: s_load_dword +; GCN: s_load_dword +; GCN: s_mul_i32 +; GCN: buffer_store_dword define amdgpu_kernel void @s_trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) { %mul = mul i64 %b, %a %trunc = trunc i64 %mul to i32 @@ -53,10 +53,10 @@ define amdgpu_kernel void @s_trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 %a } ; FUNC-LABEL: {{^}}v_trunc_i64_mul_to_i32: -; SI: s_load_dword -; SI: s_load_dword -; SI: v_mul_lo_i32 -; SI: buffer_store_dword +; GCN: s_load_dword +; GCN: s_load_dword +; GCN: v_mul_lo_i32 +; GCN: buffer_store_dword define amdgpu_kernel void @v_trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind { %a = load i64, i64 addrspace(1)* %aptr, align 8 %b = load i64, i64 addrspace(1)* %bptr, align 8 @@ -71,8 +71,8 @@ define amdgpu_kernel void @v_trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 ad ; FUNC-LABEL: {{^}}mul64_sext_c: ; EG-DAG: MULLO_INT ; EG-DAG: MULHI_INT -; SI-DAG: s_mul_i32 -; SI-DAG: v_mul_hi_i32 +; GCN-DAG: s_mul_i32 +; GCN-DAG: v_mul_hi_i32 define amdgpu_kernel void @mul64_sext_c(i64 addrspace(1)* %out, i32 %in) { entry: %0 = sext i32 %in to i64 @@ -84,9 +84,9 @@ entry: ; FUNC-LABEL: {{^}}v_mul64_sext_c: ; EG-DAG: MULLO_INT ; EG-DAG: MULHI_INT -; SI-DAG: v_mul_lo_i32 -; SI-DAG: v_mul_hi_i32 -; SI: s_endpgm +; GCN-DAG: v_mul_lo_i32 +; GCN-DAG: v_mul_hi_i32 +; GCN: s_endpgm define amdgpu_kernel void @v_mul64_sext_c(i64 addrspace(1)* %out, i32 addrspace(1)* %in) { %val = load i32, i32 addrspace(1)* %in, align 4 %ext = sext i32 %val to i64 @@ -96,9 +96,9 @@ define amdgpu_kernel void @v_mul64_sext_c(i64 addrspace(1)* %out, i32 addrspace( } ; FUNC-LABEL: {{^}}v_mul64_sext_inline_imm: -; SI-DAG: v_mul_lo_i32 v{{[0-9]+}}, v{{[0-9]+}}, 9 -; SI-DAG: v_mul_hi_i32 v{{[0-9]+}}, v{{[0-9]+}}, 9 -; SI: s_endpgm +; GCN-DAG: v_mul_lo_i32 v{{[0-9]+}}, v{{[0-9]+}}, 9 +; GCN-DAG: v_mul_hi_i32 v{{[0-9]+}}, v{{[0-9]+}}, 9 +; GCN: s_endpgm define amdgpu_kernel void @v_mul64_sext_inline_imm(i64 addrspace(1)* %out, i32 addrspace(1)* %in) { %val = load i32, i32 addrspace(1)* %in, align 4 %ext = sext i32 %val to i64 @@ -108,12 +108,12 @@ define amdgpu_kernel void @v_mul64_sext_inline_imm(i64 addrspace(1)* %out, i32 a } ; FUNC-LABEL: {{^}}s_mul_i32: -; SI: s_load_dword [[SRC0:s[0-9]+]], -; SI: s_load_dword [[SRC1:s[0-9]+]], -; SI: s_mul_i32 [[SRESULT:s[0-9]+]], [[SRC0]], [[SRC1]] -; SI: v_mov_b32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]] -; SI: buffer_store_dword [[VRESULT]], -; SI: s_endpgm +; GCN: s_load_dword [[SRC0:s[0-9]+]], +; GCN: s_load_dword [[SRC1:s[0-9]+]], +; GCN: s_mul_i32 [[SRESULT:s[0-9]+]], [[SRC0]], [[SRC1]] +; GCN: v_mov_b32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]] +; GCN: buffer_store_dword [[VRESULT]], +; GCN: s_endpgm define amdgpu_kernel void @s_mul_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind { %mul = mul i32 %a, %b store i32 %mul, i32 addrspace(1)* %out, align 4 @@ -121,7 +121,7 @@ define amdgpu_kernel void @s_mul_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nou } ; FUNC-LABEL: {{^}}v_mul_i32: -; SI: v_mul_lo_i32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} +; GCN: v_mul_lo_i32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} define amdgpu_kernel void @v_mul_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) { %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1 %a = load i32, i32 addrspace(1)* %in @@ -146,7 +146,7 @@ define amdgpu_kernel void @s_mul_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nou } ; FUNC-LABEL: {{^}}v_mul_i64: -; SI: v_mul_lo_i32 +; GCN: v_mul_lo_i32 define amdgpu_kernel void @v_mul_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) { %a = load i64, i64 addrspace(1)* %aptr, align 8 %b = load i64, i64 addrspace(1)* %bptr, align 8 @@ -156,7 +156,7 @@ define amdgpu_kernel void @v_mul_i64(i64 addrspace(1)* %out, i64 addrspace(1)* % } ; FUNC-LABEL: {{^}}mul32_in_branch: -; SI: s_mul_i32 +; GCN: s_mul_i32 define amdgpu_kernel void @mul32_in_branch(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %a, i32 %b, i32 %c) { entry: %0 = icmp eq i32 %a, 0 @@ -177,9 +177,9 @@ endif: } ; FUNC-LABEL: {{^}}mul64_in_branch: -; SI-DAG: s_mul_i32 -; SI-DAG: v_mul_hi_u32 -; SI: s_endpgm +; GCN-DAG: s_mul_i32 +; GCN-DAG: v_mul_hi_u32 +; GCN: s_endpgm define amdgpu_kernel void @mul64_in_branch(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %a, i64 %b, i64 %c) { entry: %0 = icmp eq i64 %a, 0 @@ -201,29 +201,41 @@ endif: ; FIXME: Load dwordx4 ; FUNC-LABEL: {{^}}s_mul_i128: -; SI: s_load_dwordx2 -; SI: s_load_dwordx2 -; SI: s_load_dwordx2 -; SI: s_load_dwordx2 +; GCN: s_load_dwordx2 +; GCN: s_load_dwordx2 +; GCN: s_load_dwordx2 +; GCN: s_load_dwordx2 ; SI: v_mul_hi_u32 ; SI: v_mul_hi_u32 ; SI: s_mul_i32 ; SI: v_mul_hi_u32 ; SI: s_mul_i32 + ; SI-DAG: s_mul_i32 ; SI-DAG: v_mul_hi_u32 ; SI-DAG: v_mul_hi_u32 ; SI-DAG: s_mul_i32 ; SI-DAG: s_mul_i32 ; SI-DAG: v_mul_hi_u32 + ; SI: s_mul_i32 ; SI: s_mul_i32 ; SI: s_mul_i32 ; SI: s_mul_i32 ; SI: s_mul_i32 -; SI: buffer_store_dwordx4 + +; VI: s_mul_i32 +; VI: s_mul_i32 +; VI: v_mul_hi_u32 +; VI: v_mul_hi_u32 +; VI: v_mad_u64_u32 +; VI: v_mad_u64_u32 +; VI: v_mad_u64_u32 + + +; GCN: buffer_store_dwordx4 define amdgpu_kernel void @s_mul_i128(i128 addrspace(1)* %out, i128 %a, i128 %b) nounwind #0 { %mul = mul i128 %a, %b store i128 %mul, i128 addrspace(1)* %out @@ -231,18 +243,19 @@ define amdgpu_kernel void @s_mul_i128(i128 addrspace(1)* %out, i128 %a, i128 %b) } ; FUNC-LABEL: {{^}}v_mul_i128: -; SI: {{buffer|flat}}_load_dwordx4 -; SI: {{buffer|flat}}_load_dwordx4 +; GCN: {{buffer|flat}}_load_dwordx4 +; GCN: {{buffer|flat}}_load_dwordx4 + +; GCN-DAG: v_mul_lo_i32 +; GCN-DAG: v_mul_hi_u32 +; GCN-DAG: v_mul_hi_u32 +; GCN-DAG: v_mul_lo_i32 +; GCN-DAG: v_mul_hi_u32 +; GCN-DAG: v_mul_hi_u32 +; GCN-DAG: v_mul_lo_i32 +; GCN-DAG: v_mul_lo_i32 +; GCN: v_add_i32_e32 -; SI-DAG: v_mul_lo_i32 -; SI-DAG: v_mul_hi_u32 -; SI-DAG: v_mul_hi_u32 -; SI-DAG: v_mul_lo_i32 -; SI-DAG: v_mul_hi_u32 -; SI-DAG: v_mul_hi_u32 -; SI-DAG: v_mul_lo_i32 -; SI-DAG: v_mul_lo_i32 -; SI: v_add_i32_e32 ; SI-DAG: v_mul_hi_u32 ; SI-DAG: v_mul_lo_i32 ; SI-DAG: v_mul_hi_u32 @@ -252,7 +265,11 @@ define amdgpu_kernel void @s_mul_i128(i128 addrspace(1)* %out, i128 %a, i128 %b) ; SI-DAG: v_mul_lo_i32 ; SI-DAG: v_mul_lo_i32 -; SI: {{buffer|flat}}_store_dwordx4 +; VI: v_mad_u64_u32 +; VI: v_mad_u64_u32 +; VI: v_mad_u64_u32 + +; GCN: {{buffer|flat}}_store_dwordx4 define amdgpu_kernel void @v_mul_i128(i128 addrspace(1)* %out, i128 addrspace(1)* %aptr, i128 addrspace(1)* %bptr) #0 { %tid = call i32 @llvm.r600.read.tidig.x() %gep.a = getelementptr inbounds i128, i128 addrspace(1)* %aptr, i32 %tid -- 2.7.4