From 38e2c01221a9751c0b797417747200d2e9513b9f Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Mon, 13 Jan 2020 16:30:09 +0000 Subject: [PATCH] [SelectionDAG] ComputeNumSignBits add getValidMinimumShiftAmountConstant() ISD::SRA support Allows us to handle more non-uniform SRA sign bits cases --- llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 5 +- llvm/test/CodeGen/X86/known-signbits-vector.ll | 68 ++++++-------------------- 2 files changed, 18 insertions(+), 55 deletions(-) diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 03efc51..54899ab 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -3608,9 +3608,12 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, } case ISD::SRA: Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1); - // SRA X, C -> adds C sign bits. + // SRA X, C -> adds C sign bits. if (const APInt *ShAmt = getValidShiftAmountConstant(Op, DemandedElts)) Tmp = std::min(Tmp + ShAmt->getZExtValue(), VTBits); + else if (const APInt *ShAmt = + getValidMinimumShiftAmountConstant(Op, DemandedElts)) + Tmp = std::min(Tmp + ShAmt->getZExtValue(), VTBits); return Tmp; case ISD::SHL: if (const APInt *ShAmt = getValidShiftAmountConstant(Op, DemandedElts)) { diff --git a/llvm/test/CodeGen/X86/known-signbits-vector.ll b/llvm/test/CodeGen/X86/known-signbits-vector.ll index 97cb432..a3cf754 100644 --- a/llvm/test/CodeGen/X86/known-signbits-vector.ll +++ b/llvm/test/CodeGen/X86/known-signbits-vector.ll @@ -58,10 +58,6 @@ define <4 x float> @signbits_sext_v4i64_sitofp_v4f32(i8 signext %a0, i16 signext define <4 x double> @signbits_ashr_sitofp(<4 x i64> %a0) nounwind { ; X86-LABEL: signbits_ashr_sitofp: ; X86: # %bb.0: -; X86-NEXT: pushl %ebp -; X86-NEXT: movl %esp, %ebp -; X86-NEXT: andl $-8, %esp -; X86-NEXT: subl $64, %esp ; X86-NEXT: vextractf128 $1, %ymm0, %xmm1 ; X86-NEXT: vpsrlq $36, %xmm1, %xmm2 ; X86-NEXT: vpsrlq $35, %xmm1, %xmm1 @@ -75,54 +71,27 @@ define <4 x double> @signbits_ashr_sitofp(<4 x i64> %a0) nounwind { ; X86-NEXT: vmovdqa {{.*#+}} xmm2 = [1073741824,0,536870912,0] ; X86-NEXT: vpxor %xmm2, %xmm0, %xmm0 ; X86-NEXT: vpsubq %xmm2, %xmm0, %xmm0 -; X86-NEXT: vmovq %xmm0, {{[0-9]+}}(%esp) -; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; X86-NEXT: vmovq %xmm0, {{[0-9]+}}(%esp) -; X86-NEXT: vmovq %xmm1, {{[0-9]+}}(%esp) -; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[2,3,0,1] -; X86-NEXT: vmovq %xmm0, {{[0-9]+}}(%esp) -; X86-NEXT: fildll {{[0-9]+}}(%esp) -; X86-NEXT: fstpl {{[0-9]+}}(%esp) -; X86-NEXT: fildll {{[0-9]+}}(%esp) -; X86-NEXT: fstpl {{[0-9]+}}(%esp) -; X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero -; X86-NEXT: vmovhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1] -; X86-NEXT: fildll {{[0-9]+}}(%esp) -; X86-NEXT: fstpl {{[0-9]+}}(%esp) -; X86-NEXT: fildll {{[0-9]+}}(%esp) -; X86-NEXT: fstpl (%esp) -; X86-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero -; X86-NEXT: vmovhps {{.*#+}} xmm1 = xmm1[0,1],mem[0,1] -; X86-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; X86-NEXT: movl %ebp, %esp -; X86-NEXT: popl %ebp +; X86-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] +; X86-NEXT: vcvtdq2pd %xmm0, %ymm0 ; X86-NEXT: retl ; ; X64-AVX1-LABEL: signbits_ashr_sitofp: ; X64-AVX1: # %bb.0: -; X64-AVX1-NEXT: vpsrlq $34, %xmm0, %xmm1 -; X64-AVX1-NEXT: vpsrlq $33, %xmm0, %xmm2 -; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7] -; X64-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1073741824,536870912] +; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; X64-AVX1-NEXT: vpsrlq $36, %xmm1, %xmm2 +; X64-AVX1-NEXT: vpsrlq $35, %xmm1, %xmm1 +; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] +; X64-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [268435456,134217728] ; X64-AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm1 ; X64-AVX1-NEXT: vpsubq %xmm2, %xmm1, %xmm1 -; X64-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; X64-AVX1-NEXT: vpsrlq $36, %xmm0, %xmm2 -; X64-AVX1-NEXT: vpsrlq $35, %xmm0, %xmm0 +; X64-AVX1-NEXT: vpsrlq $34, %xmm0, %xmm2 +; X64-AVX1-NEXT: vpsrlq $33, %xmm0, %xmm0 ; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] -; X64-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [268435456,134217728] +; X64-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1073741824,536870912] ; X64-AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0 ; X64-AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm0 -; X64-AVX1-NEXT: vcvtdq2pd %xmm0, %xmm2 -; X64-AVX1-NEXT: vpextrq $1, %xmm0, %rax -; X64-AVX1-NEXT: vcvtsi2sd %eax, %xmm3, %xmm0 -; X64-AVX1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm2[0],xmm0[0] -; X64-AVX1-NEXT: vpextrq $1, %xmm1, %rax -; X64-AVX1-NEXT: vcvtsi2sd %rax, %xmm3, %xmm2 -; X64-AVX1-NEXT: vmovq %xmm1, %rax -; X64-AVX1-NEXT: vcvtsi2sd %rax, %xmm3, %xmm1 -; X64-AVX1-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0] -; X64-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; X64-AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] +; X64-AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0 ; X64-AVX1-NEXT: retq ; ; X64-AVX2-LABEL: signbits_ashr_sitofp: @@ -132,17 +101,8 @@ define <4 x double> @signbits_ashr_sitofp(<4 x i64> %a0) nounwind { ; X64-AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0 ; X64-AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0 ; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; X64-AVX2-NEXT: vpextrq $1, %xmm1, %rax -; X64-AVX2-NEXT: vcvtsi2sd %rax, %xmm2, %xmm2 -; X64-AVX2-NEXT: vmovq %xmm1, %rax -; X64-AVX2-NEXT: vcvtsi2sd %rax, %xmm3, %xmm1 -; X64-AVX2-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0] -; X64-AVX2-NEXT: vpextrq $1, %xmm0, %rax -; X64-AVX2-NEXT: vcvtsi2sd %rax, %xmm3, %xmm2 -; X64-AVX2-NEXT: vmovq %xmm0, %rax -; X64-AVX2-NEXT: vcvtsi2sd %rax, %xmm3, %xmm0 -; X64-AVX2-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0] -; X64-AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X64-AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] +; X64-AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0 ; X64-AVX2-NEXT: retq %1 = ashr <4 x i64> %a0, %2 = sitofp <4 x i64> %1 to <4 x double> -- 2.7.4