From 7613a7b56472bd5864ec6ef1012f714b49a49392 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Sun, 29 Oct 2017 18:19:37 +0000 Subject: [PATCH] [SelectionDAG] Add SRA/SHL demanded elts support to ComputeNumSignBits Introduce a isConstOrDemandedConstSplat helper function that can recognise a constant splat build vector for at least the demanded elts we care about. llvm-svn: 316866 --- llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 32 ++++++++++++++++++++-- llvm/test/CodeGen/X86/known-signbits-vector.ll | 38 ++++++++++---------------- 2 files changed, 43 insertions(+), 27 deletions(-) diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 377f84a..bcf2128 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -2056,6 +2056,30 @@ bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask, return Mask.isSubsetOf(Known.Zero); } +/// Helper function that checks to see if a node is a constant or a +/// build vector of splat constants at least within the demanded elts. +static ConstantSDNode *isConstOrDemandedConstSplat(SDValue N, + const APInt &DemandedElts) { + if (ConstantSDNode *CN = dyn_cast(N)) + return CN; + if (N.getOpcode() != ISD::BUILD_VECTOR) + return nullptr; + EVT VT = N.getValueType(); + ConstantSDNode *Cst = nullptr; + unsigned NumElts = VT.getVectorNumElements(); + assert(DemandedElts.getBitWidth() == NumElts && "Unexpected vector size"); + for (unsigned i = 0; i != NumElts; ++i) { + if (!DemandedElts[i]) + continue; + ConstantSDNode *C = dyn_cast(N.getOperand(i)); + if (!C || (Cst && Cst->getAPIntValue() != C->getAPIntValue()) || + C->getValueType(0) != VT.getScalarType()) + return nullptr; + Cst = C; + } + return Cst; +} + /// If a SHL/SRA/SRL node has a constant or splat constant shift amount that /// is less than the element bit-width of the shift node, return it. static const APInt *getValidShiftAmountConstant(SDValue V) { @@ -3121,16 +3145,18 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, case ISD::SRA: Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); // SRA X, C -> adds C sign bits. - if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(1))) { + if (ConstantSDNode *C = + isConstOrDemandedConstSplat(Op.getOperand(1), DemandedElts)) { APInt ShiftVal = C->getAPIntValue(); ShiftVal += Tmp; Tmp = ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue(); } return Tmp; case ISD::SHL: - if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(1))) { + if (ConstantSDNode *C = + isConstOrDemandedConstSplat(Op.getOperand(1), DemandedElts)) { // shl destroys sign bits. - Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); + Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); if (C->getAPIntValue().uge(VTBits) || // Bad shift. C->getAPIntValue().uge(Tmp)) break; // Shifted all sign bits out. return Tmp - C->getZExtValue(); diff --git a/llvm/test/CodeGen/X86/known-signbits-vector.ll b/llvm/test/CodeGen/X86/known-signbits-vector.ll index 24002f4..0b78017 100644 --- a/llvm/test/CodeGen/X86/known-signbits-vector.ll +++ b/llvm/test/CodeGen/X86/known-signbits-vector.ll @@ -100,10 +100,7 @@ define float @signbits_ashr_extract_sitofp_0(<2 x i64> %a0) nounwind { define float @signbits_ashr_extract_sitofp_1(<2 x i64> %a0) nounwind { ; X32-LABEL: signbits_ashr_extract_sitofp_1: ; X32: # BB#0: -; X32-NEXT: pushl %ebp -; X32-NEXT: movl %esp, %ebp -; X32-NEXT: andl $-8, %esp -; X32-NEXT: subl $16, %esp +; X32-NEXT: pushl %eax ; X32-NEXT: vmovdqa {{.*#+}} xmm1 = [0,2147483648,0,2147483648] ; X32-NEXT: vpsrlq $63, %xmm1, %xmm2 ; X32-NEXT: vpsrlq $32, %xmm1, %xmm1 @@ -113,12 +110,11 @@ define float @signbits_ashr_extract_sitofp_1(<2 x i64> %a0) nounwind { ; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] ; X32-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X32-NEXT: vpsubq %xmm1, %xmm0, %xmm0 -; X32-NEXT: vmovq %xmm0, {{[0-9]+}}(%esp) -; X32-NEXT: fildll {{[0-9]+}}(%esp) -; X32-NEXT: fstps {{[0-9]+}}(%esp) -; X32-NEXT: flds {{[0-9]+}}(%esp) -; X32-NEXT: movl %ebp, %esp -; X32-NEXT: popl %ebp +; X32-NEXT: vmovd %xmm0, %eax +; X32-NEXT: vcvtsi2ssl %eax, %xmm3, %xmm0 +; X32-NEXT: vmovss %xmm0, (%esp) +; X32-NEXT: flds (%esp) +; X32-NEXT: popl %eax ; X32-NEXT: retl ; ; X64-LABEL: signbits_ashr_extract_sitofp_1: @@ -130,7 +126,7 @@ define float @signbits_ashr_extract_sitofp_1(<2 x i64> %a0) nounwind { ; X64-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-NEXT: vpsubq %xmm1, %xmm0, %xmm0 ; X64-NEXT: vmovq %xmm0, %rax -; X64-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm0 +; X64-NEXT: vcvtsi2ssl %eax, %xmm2, %xmm0 ; X64-NEXT: retq %1 = ashr <2 x i64> %a0, %2 = extractelement <2 x i64> %1, i32 0 @@ -141,10 +137,7 @@ define float @signbits_ashr_extract_sitofp_1(<2 x i64> %a0) nounwind { define float @signbits_ashr_shl_extract_sitofp(<2 x i64> %a0) nounwind { ; X32-LABEL: signbits_ashr_shl_extract_sitofp: ; X32: # BB#0: -; X32-NEXT: pushl %ebp -; X32-NEXT: movl %esp, %ebp -; X32-NEXT: andl $-8, %esp -; X32-NEXT: subl $16, %esp +; X32-NEXT: pushl %eax ; X32-NEXT: vmovdqa {{.*#+}} xmm1 = [0,2147483648,0,2147483648] ; X32-NEXT: vpsrlq $60, %xmm1, %xmm2 ; X32-NEXT: vpsrlq $61, %xmm1, %xmm1 @@ -154,15 +147,12 @@ define float @signbits_ashr_shl_extract_sitofp(<2 x i64> %a0) nounwind { ; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] ; X32-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X32-NEXT: vpsubq %xmm1, %xmm0, %xmm0 -; X32-NEXT: vpsllq $16, %xmm0, %xmm1 ; X32-NEXT: vpsllq $20, %xmm0, %xmm0 -; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] -; X32-NEXT: vmovq %xmm0, {{[0-9]+}}(%esp) -; X32-NEXT: fildll {{[0-9]+}}(%esp) -; X32-NEXT: fstps {{[0-9]+}}(%esp) -; X32-NEXT: flds {{[0-9]+}}(%esp) -; X32-NEXT: movl %ebp, %esp -; X32-NEXT: popl %ebp +; X32-NEXT: vmovd %xmm0, %eax +; X32-NEXT: vcvtsi2ssl %eax, %xmm3, %xmm0 +; X32-NEXT: vmovss %xmm0, (%esp) +; X32-NEXT: flds (%esp) +; X32-NEXT: popl %eax ; X32-NEXT: retl ; ; X64-LABEL: signbits_ashr_shl_extract_sitofp: @@ -175,7 +165,7 @@ define float @signbits_ashr_shl_extract_sitofp(<2 x i64> %a0) nounwind { ; X64-NEXT: vpsubq %xmm1, %xmm0, %xmm0 ; X64-NEXT: vpsllq $20, %xmm0, %xmm0 ; X64-NEXT: vmovq %xmm0, %rax -; X64-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm0 +; X64-NEXT: vcvtsi2ssl %eax, %xmm2, %xmm0 ; X64-NEXT: retq %1 = ashr <2 x i64> %a0, %2 = shl <2 x i64> %1, -- 2.7.4