From: Nirav Dave Date: Mon, 7 Aug 2017 14:07:49 +0000 (+0000) Subject: [DAG] Extend visitSCALAR_TO_VECTOR optimization to truncated vector. X-Git-Tag: llvmorg-6.0.0-rc1~10708 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=3d3bde7682eb73d3d520376a8272abab1cb3c064;p=platform%2Fupstream%2Fllvm.git [DAG] Extend visitSCALAR_TO_VECTOR optimization to truncated vector. Relanding after case to insert explicit truncation as necessary. Allow SCALAR_TO_VECTOR of EXTRACT_VECTOR_ELT to reduce to EXTRACT_SUBVECTOR of vector shuffle when output is smaller. Marginally improves vector shuffle computations. Reviewers: efriedma, RKSimon, spatel Subscribers: javed.absar, llvm-commits Differential Revision: https://reviews.llvm.org/D35566 llvm-svn: 310256 --- diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 353c86f..4d817f1 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -15760,23 +15760,46 @@ SDValue DAGCombiner::visitSCALAR_TO_VECTOR(SDNode *N) { EVT VT = N->getValueType(0); // Replace a SCALAR_TO_VECTOR(EXTRACT_VECTOR_ELT(V,C0)) pattern - // with a VECTOR_SHUFFLE. + // with a VECTOR_SHUFFLE and possible truncate. if (InVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { SDValue InVec = InVal->getOperand(0); SDValue EltNo = InVal->getOperand(1); - - // FIXME: We could support implicit truncation if the shuffle can be - // scaled to a smaller vector scalar type. - ConstantSDNode *C0 = dyn_cast(EltNo); - if (C0 && VT == InVec.getValueType() && - VT.getScalarType() == InVal.getValueType()) { - SmallVector NewMask(VT.getVectorNumElements(), -1); + auto InVecT = InVec.getValueType(); + if (ConstantSDNode *C0 = dyn_cast(EltNo)) { + SmallVector NewMask(InVecT.getVectorNumElements(), -1); int Elt = C0->getZExtValue(); NewMask[0] = Elt; - - if (TLI.isShuffleMaskLegal(NewMask, VT)) - return DAG.getVectorShuffle(VT, SDLoc(N), InVec, DAG.getUNDEF(VT), - NewMask); + SDValue Val; + // If we have an implict truncate do truncate here as long as it's legal. + // if it's not legal, this should + if (VT.getScalarType() != InVal.getValueType() && + InVal.getValueType().isScalarInteger() && + isTypeLegal(VT.getScalarType())) { + Val = + DAG.getNode(ISD::TRUNCATE, SDLoc(InVal), VT.getScalarType(), InVal); + return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), VT, Val); + } + if (VT.getScalarType() == InVecT.getScalarType() && + VT.getVectorNumElements() <= InVecT.getVectorNumElements() && + TLI.isShuffleMaskLegal(NewMask, VT)) { + Val = DAG.getVectorShuffle(InVecT, SDLoc(N), InVec, + DAG.getUNDEF(InVecT), NewMask); + // If the initial vector is the correct size this shuffle is a + // valid result. + if (VT == InVecT) + return Val; + // If not we must truncate the vector. + if (VT.getVectorNumElements() != InVecT.getVectorNumElements()) { + MVT IdxTy = TLI.getVectorIdxTy(DAG.getDataLayout()); + SDValue ZeroIdx = DAG.getConstant(0, SDLoc(N), IdxTy); + EVT SubVT = + EVT::getVectorVT(*DAG.getContext(), InVecT.getVectorElementType(), + VT.getVectorNumElements()); + Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), SubVT, Val, + ZeroIdx); + return Val; + } + } } } diff --git a/llvm/test/CodeGen/AArch64/arm64-neon-copy.ll b/llvm/test/CodeGen/AArch64/arm64-neon-copy.ll index 16834b7..2585676 100644 --- a/llvm/test/CodeGen/AArch64/arm64-neon-copy.ll +++ b/llvm/test/CodeGen/AArch64/arm64-neon-copy.ll @@ -188,7 +188,7 @@ define <2 x float> @ins4f2(<4 x float> %tmp1, <2 x float> %tmp2) { define <1 x double> @ins2f1(<2 x double> %tmp1, <1 x double> %tmp2) { ; CHECK-LABEL: ins2f1: -; CHECK: mov {{d[0-9]+}}, {{v[0-9]+}}.d[1] +; CHECK: dup {{v[0-9]+}}.2d, {{v[0-9]+}}.d[1] %tmp3 = extractelement <2 x double> %tmp1, i32 1 %tmp4 = insertelement <1 x double> %tmp2, double %tmp3, i32 0 ret <1 x double> %tmp4 diff --git a/llvm/test/CodeGen/AArch64/neon-scalar-copy.ll b/llvm/test/CodeGen/AArch64/neon-scalar-copy.ll index 3f77060..2384e48 100644 --- a/llvm/test/CodeGen/AArch64/neon-scalar-copy.ll +++ b/llvm/test/CodeGen/AArch64/neon-scalar-copy.ll @@ -79,8 +79,7 @@ define half @test_dup_hv8H_0(<8 x half> %v) #0 { define <1 x i8> @test_vector_dup_bv16B(<16 x i8> %v1) #0 { ; CHECK-LABEL: test_vector_dup_bv16B: - ; CHECK-NEXT: umov [[W:w[0-9]+]], v0.b[14] - ; CHECK-NEXT: fmov s0, [[W]] + ; CHECK-NEXT: dup v0.16b, v0.b[14] ; CHECK-NEXT: ret %shuffle.i = shufflevector <16 x i8> %v1, <16 x i8> undef, <1 x i32> ret <1 x i8> %shuffle.i @@ -96,8 +95,7 @@ define <1 x i8> @test_vector_dup_bv8B(<8 x i8> %v1) #0 { define <1 x i16> @test_vector_dup_hv8H(<8 x i16> %v1) #0 { ; CHECK-LABEL: test_vector_dup_hv8H: - ; CHECK-NEXT: umov [[W:w[0-9]+]], v0.h[7] - ; CHECK-NEXT: fmov s0, [[W]] + ; CHECK-NEXT: dup v0.8h, v0.h[7] ; CHECK-NEXT: ret %shuffle.i = shufflevector <8 x i16> %v1, <8 x i16> undef, <1 x i32> ret <1 x i16> %shuffle.i @@ -113,8 +111,7 @@ define <1 x i16> @test_vector_dup_hv4H(<4 x i16> %v1) #0 { define <1 x i32> @test_vector_dup_sv4S(<4 x i32> %v1) #0 { ; CHECK-LABEL: test_vector_dup_sv4S: - ; CHECK-NEXT: mov [[W:w[0-9]+]], v0.s[3] - ; CHECK-NEXT: fmov s0, [[W]] + ; CHECK-NEXT: dup v0.4s, v0.s[3] ; CHECK-NEXT: ret %shuffle = shufflevector <4 x i32> %v1, <4 x i32> undef, <1 x i32> ret <1 x i32> %shuffle @@ -138,7 +135,7 @@ define <1 x i64> @test_vector_dup_dv2D(<2 x i64> %v1) #0 { define <1 x i64> @test_vector_copy_dup_dv2D(<1 x i64> %a, <2 x i64> %c) #0 { ; CHECK-LABEL: test_vector_copy_dup_dv2D: - ; CHECK-NEXT: {{dup|mov}} {{d[0-9]+}}, {{v[0-9]+}}.d[1] + ; CHECK-NEXT: dup v0.2d, v1.d[1] ; CHECK-NEXT: ret %vget_lane = extractelement <2 x i64> %c, i32 1 %vset_lane = insertelement <1 x i64> undef, i64 %vget_lane, i32 0