From 5b78c9d58d9043db6a9c4ca489a40486375776d8 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Mon, 20 Aug 2018 13:05:48 +0000 Subject: [PATCH] [SelectionDAG] Add partial sign-bit support to ComputeNumSignBits for BITCAST nodes Only adds support to the existing 'large element' scalar/vector to 'small element' vector bitcasts. Handle the case where the sign bit extends to only part of the small elements. llvm-svn: 340169 --- llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 18 +++++++++++-- llvm/test/CodeGen/X86/packss.ll | 36 ++++++++++---------------- 2 files changed, 30 insertions(+), 24 deletions(-) diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 7b28a06d..c40fda3 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -3240,9 +3240,9 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, if (VTBits == SrcBits) return ComputeNumSignBits(N0, DemandedElts, Depth + 1); + bool IsLE = getDataLayout().isLittleEndian(); + // Bitcast 'large element' scalar/vector to 'small element' vector. - // TODO: Handle cases other than 'sign splat' when we have a use case. - // Requires handling of DemandedElts and Endianness. if ((SrcBits % VTBits) == 0) { assert(Op.getValueType().isVector() && "Expected bitcast to vector"); @@ -3252,9 +3252,23 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, if (DemandedElts[i]) SrcDemandedElts.setBit(i / Scale); + // Fast case - sign splat can be simply split across the small elements. Tmp = ComputeNumSignBits(N0, SrcDemandedElts, Depth + 1); if (Tmp == SrcBits) return VTBits; + + // Slow case - determine how far the sign extends into each sub-element. + Tmp2 = VTBits; + for (unsigned i = 0; i != NumElts; ++i) + if (DemandedElts[i]) { + unsigned SubOffset = i % Scale; + SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset); + SubOffset = SubOffset * VTBits; + if (Tmp <= SubOffset) + return 1; + Tmp2 = std::min(Tmp2, Tmp - SubOffset); + } + return Tmp2; } break; } diff --git a/llvm/test/CodeGen/X86/packss.ll b/llvm/test/CodeGen/X86/packss.ll index 0965947..38b4b1dd 100644 --- a/llvm/test/CodeGen/X86/packss.ll +++ b/llvm/test/CodeGen/X86/packss.ll @@ -42,40 +42,33 @@ define <4 x i32> @trunc_ashr_v4i64(<4 x i64> %a) nounwind { define <8 x i16> @trunc_ashr_v4i64_bitcast(<4 x i64> %a0) { ; SSE-LABEL: trunc_ashr_v4i64_bitcast: ; SSE: # %bb.0: -; SSE-NEXT: movdqa %xmm0, %xmm2 -; SSE-NEXT: psrad $31, %xmm2 -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] -; SSE-NEXT: psrad $17, %xmm0 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] -; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; SSE-NEXT: movdqa %xmm1, %xmm2 ; SSE-NEXT: psrad $31, %xmm2 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] ; SSE-NEXT: psrad $17, %xmm1 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] -; SSE-NEXT: pslld $16, %xmm1 -; SSE-NEXT: psrad $16, %xmm1 -; SSE-NEXT: pslld $16, %xmm0 -; SSE-NEXT: psrad $16, %xmm0 +; SSE-NEXT: movdqa %xmm0, %xmm2 +; SSE-NEXT: psrad $31, %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] +; SSE-NEXT: psrad $17, %xmm0 +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: ret{{[l|q]}} ; ; AVX1-LABEL: trunc_ashr_v4i64_bitcast: ; AVX1: # %bb.0: -; AVX1-NEXT: vpsrad $31, %xmm0, %xmm1 -; AVX1-NEXT: vpsrad $17, %xmm0, %xmm2 -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2 +; AVX1-NEXT: vpsrad $17, %xmm1, %xmm1 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] ; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2 ; AVX1-NEXT: vpsrad $17, %xmm0, %xmm0 ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] -; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] -; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1 -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: ret{{[l|q]}} ; @@ -85,9 +78,8 @@ define <8 x i16> @trunc_ashr_v4i64_bitcast(<4 x i64> %a0) { ; AVX2-NEXT: vpsrad $17, %ymm0, %ymm0 ; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7] -; AVX2-NEXT: vpackssdw %ymm0, %ymm0, %ymm0 -; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: ret{{[l|q]}} %1 = ashr <4 x i64> %a0, -- 2.7.4