From 601ae238b79cd62bf70703306cff5c606df329b3 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Sun, 29 Oct 2017 22:03:37 +0000 Subject: [PATCH] [SelectionDAG] Add SEXT/AND/XOR/Or demanded elts support to ComputeNumSignBits llvm-svn: 316875 --- llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 18 +++++++----- llvm/test/CodeGen/X86/known-signbits-vector.ll | 38 ++++++++++---------------- 2 files changed, 26 insertions(+), 30 deletions(-) diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index bcf2128..27a02bb 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -3130,17 +3130,21 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, } case ISD::SIGN_EXTEND: - case ISD::SIGN_EXTEND_VECTOR_INREG: Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits(); - return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp; - + return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp; case ISD::SIGN_EXTEND_INREG: // Max of the input and what this extends. Tmp = cast(Op.getOperand(1))->getVT().getScalarSizeInBits(); Tmp = VTBits-Tmp+1; - - Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1); + Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); return std::max(Tmp, Tmp2); + case ISD::SIGN_EXTEND_VECTOR_INREG: { + SDValue Src = Op.getOperand(0); + EVT SrcVT = Src.getValueType(); + APInt DemandedSrcElts = DemandedElts.zext(SrcVT.getVectorNumElements()); + Tmp = VTBits - SrcVT.getScalarSizeInBits(); + return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp; + } case ISD::SRA: Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); @@ -3166,9 +3170,9 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, case ISD::OR: case ISD::XOR: // NOT is handled here. // Logical binary ops preserve the number of sign bits at the worst. - Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); + Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); if (Tmp != 1) { - Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); + Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1); FirstAnswer = std::min(Tmp, Tmp2); // We computed what we know about the sign bits as our first // answer. Now proceed to the generic code that uses diff --git a/llvm/test/CodeGen/X86/known-signbits-vector.ll b/llvm/test/CodeGen/X86/known-signbits-vector.ll index f886379..6ad45d1 100644 --- a/llvm/test/CodeGen/X86/known-signbits-vector.ll +++ b/llvm/test/CodeGen/X86/known-signbits-vector.ll @@ -279,10 +279,7 @@ define <2 x double> @signbits_ashr_concat_ashr_extract_sitofp(<2 x i64> %a0, <4 define float @signbits_ashr_sext_sextinreg_and_extract_sitofp(<2 x i64> %a0, <2 x i64> %a1, i32 %a2) nounwind { ; X32-LABEL: signbits_ashr_sext_sextinreg_and_extract_sitofp: ; X32: # BB#0: -; X32-NEXT: pushl %ebp -; X32-NEXT: movl %esp, %ebp -; X32-NEXT: andl $-8, %esp -; X32-NEXT: subl $16, %esp +; X32-NEXT: pushl %eax ; X32-NEXT: vmovdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648] ; X32-NEXT: vpsrlq $60, %xmm2, %xmm3 ; X32-NEXT: vpsrlq $61, %xmm2, %xmm2 @@ -292,7 +289,7 @@ define float @signbits_ashr_sext_sextinreg_and_extract_sitofp(<2 x i64> %a0, <2 ; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7] ; X32-NEXT: vpxor %xmm2, %xmm0, %xmm0 ; X32-NEXT: vpsubq %xmm2, %xmm0, %xmm0 -; X32-NEXT: movl 8(%ebp), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: vpinsrd $0, %eax, %xmm1, %xmm1 ; X32-NEXT: sarl $31, %eax ; X32-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1 @@ -301,12 +298,11 @@ define float @signbits_ashr_sext_sextinreg_and_extract_sitofp(<2 x i64> %a0, <2 ; X32-NEXT: vpsrlq $20, %xmm1, %xmm1 ; X32-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] ; X32-NEXT: vpand %xmm1, %xmm0, %xmm0 -; X32-NEXT: vmovq %xmm0, {{[0-9]+}}(%esp) -; X32-NEXT: fildll {{[0-9]+}}(%esp) -; X32-NEXT: fstps {{[0-9]+}}(%esp) -; X32-NEXT: flds {{[0-9]+}}(%esp) -; X32-NEXT: movl %ebp, %esp -; X32-NEXT: popl %ebp +; X32-NEXT: vmovd %xmm0, %eax +; X32-NEXT: vcvtsi2ssl %eax, %xmm4, %xmm0 +; X32-NEXT: vmovss %xmm0, (%esp) +; X32-NEXT: flds (%esp) +; X32-NEXT: popl %eax ; X32-NEXT: retl ; ; X64-LABEL: signbits_ashr_sext_sextinreg_and_extract_sitofp: @@ -325,7 +321,7 @@ define float @signbits_ashr_sext_sextinreg_and_extract_sitofp(<2 x i64> %a0, <2 ; X64-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] ; X64-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X64-NEXT: vmovq %xmm0, %rax -; X64-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0 +; X64-NEXT: vcvtsi2ssl %eax, %xmm3, %xmm0 ; X64-NEXT: retq %1 = ashr <2 x i64> %a0, %2 = sext i32 %a2 to i64 @@ -341,10 +337,7 @@ define float @signbits_ashr_sext_sextinreg_and_extract_sitofp(<2 x i64> %a0, <2 define float @signbits_ashr_sextvecinreg_bitops_extract_sitofp(<2 x i64> %a0, <4 x i32> %a1) nounwind { ; X32-LABEL: signbits_ashr_sextvecinreg_bitops_extract_sitofp: ; X32: # BB#0: -; X32-NEXT: pushl %ebp -; X32-NEXT: movl %esp, %ebp -; X32-NEXT: andl $-8, %esp -; X32-NEXT: subl $16, %esp +; X32-NEXT: pushl %eax ; X32-NEXT: vmovdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648] ; X32-NEXT: vpsrlq $60, %xmm2, %xmm3 ; X32-NEXT: vpsrlq $61, %xmm2, %xmm2 @@ -358,12 +351,11 @@ define float @signbits_ashr_sextvecinreg_bitops_extract_sitofp(<2 x i64> %a0, <4 ; X32-NEXT: vpand %xmm1, %xmm0, %xmm2 ; X32-NEXT: vpor %xmm1, %xmm2, %xmm1 ; X32-NEXT: vpxor %xmm0, %xmm1, %xmm0 -; X32-NEXT: vmovq %xmm0, {{[0-9]+}}(%esp) -; X32-NEXT: fildll {{[0-9]+}}(%esp) -; X32-NEXT: fstps {{[0-9]+}}(%esp) -; X32-NEXT: flds {{[0-9]+}}(%esp) -; X32-NEXT: movl %ebp, %esp -; X32-NEXT: popl %ebp +; X32-NEXT: vmovd %xmm0, %eax +; X32-NEXT: vcvtsi2ssl %eax, %xmm4, %xmm0 +; X32-NEXT: vmovss %xmm0, (%esp) +; X32-NEXT: flds (%esp) +; X32-NEXT: popl %eax ; X32-NEXT: retl ; ; X64-LABEL: signbits_ashr_sextvecinreg_bitops_extract_sitofp: @@ -379,7 +371,7 @@ define float @signbits_ashr_sextvecinreg_bitops_extract_sitofp(<2 x i64> %a0, <4 ; X64-NEXT: vpor %xmm1, %xmm2, %xmm1 ; X64-NEXT: vpxor %xmm0, %xmm1, %xmm0 ; X64-NEXT: vmovq %xmm0, %rax -; X64-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm0 +; X64-NEXT: vcvtsi2ssl %eax, %xmm3, %xmm0 ; X64-NEXT: retq %1 = ashr <2 x i64> %a0, %2 = shufflevector <4 x i32> %a1, <4 x i32> undef, <2 x i32> -- 2.7.4