From: Simon Pilgrim Date: Mon, 30 Oct 2017 19:31:08 +0000 (+0000) Subject: [SelectionDAG] Add VSELECT demanded elts support to computeKnownBits X-Git-Tag: llvmorg-6.0.0-rc1~4565 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=017f896adb9d41702d0727e7e008917a8a8d5bc3;p=platform%2Fupstream%2Fllvm.git [SelectionDAG] Add VSELECT demanded elts support to computeKnownBits llvm-svn: 316947 --- diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index a7b19ee..16d157a 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -2417,22 +2417,22 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known, } case ISD::SELECT: case ISD::VSELECT: - computeKnownBits(Op.getOperand(2), Known, Depth+1); + computeKnownBits(Op.getOperand(2), Known, DemandedElts, Depth+1); // If we don't know any bits, early out. if (Known.isUnknown()) break; - computeKnownBits(Op.getOperand(1), Known2, Depth+1); + computeKnownBits(Op.getOperand(1), Known2, DemandedElts, Depth+1); // Only known if known in both the LHS and RHS. Known.One &= Known2.One; Known.Zero &= Known2.Zero; break; case ISD::SELECT_CC: - computeKnownBits(Op.getOperand(3), Known, Depth+1); + computeKnownBits(Op.getOperand(3), Known, DemandedElts, Depth+1); // If we don't know any bits, early out. if (Known.isUnknown()) break; - computeKnownBits(Op.getOperand(2), Known2, Depth+1); + computeKnownBits(Op.getOperand(2), Known2, DemandedElts, Depth+1); // Only known if known in both the LHS and RHS. Known.One &= Known2.One; diff --git a/llvm/test/CodeGen/X86/known-bits-vector.ll b/llvm/test/CodeGen/X86/known-bits-vector.ll index 67c0b6a..e9b2d67 100644 --- a/llvm/test/CodeGen/X86/known-bits-vector.ll +++ b/llvm/test/CodeGen/X86/known-bits-vector.ll @@ -618,11 +618,7 @@ define <4 x float> @knownbits_and_select_shuffle_uitofp(<4 x i32> %a0, <4 x i32> ; X32-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; X32-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 ; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,2,2] -; X32-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7] -; X32-NEXT: vpsrld $16, %xmm0, %xmm0 -; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7] -; X32-NEXT: vaddps {{\.LCPI.*}}, %xmm0, %xmm0 -; X32-NEXT: vaddps %xmm0, %xmm1, %xmm0 +; X32-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X32-NEXT: movl %ebp, %esp ; X32-NEXT: popl %ebp ; X32-NEXT: retl @@ -634,11 +630,7 @@ define <4 x float> @knownbits_and_select_shuffle_uitofp(<4 x i32> %a0, <4 x i32> ; X64-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; X64-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 ; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,2,2] -; X64-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7] -; X64-NEXT: vpsrld $16, %xmm0, %xmm0 -; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7] -; X64-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0 -; X64-NEXT: vaddps %xmm0, %xmm1, %xmm0 +; X64-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X64-NEXT: retq %1 = and <4 x i32> %a2, %2 = and <4 x i32> %a3, @@ -664,11 +656,7 @@ define <4 x float> @knownbits_lshr_and_select_shuffle_uitofp(<4 x i32> %a0, <4 x ; X32-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; X32-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 ; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,2,2] -; X32-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7] -; X32-NEXT: vpsrld $16, %xmm0, %xmm0 -; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7] -; X32-NEXT: vaddps {{\.LCPI.*}}, %xmm0, %xmm0 -; X32-NEXT: vaddps %xmm0, %xmm1, %xmm0 +; X32-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X32-NEXT: movl %ebp, %esp ; X32-NEXT: popl %ebp ; X32-NEXT: retl @@ -682,11 +670,7 @@ define <4 x float> @knownbits_lshr_and_select_shuffle_uitofp(<4 x i32> %a0, <4 x ; X64-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; X64-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 ; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,2,2] -; X64-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7] -; X64-NEXT: vpsrld $16, %xmm0, %xmm0 -; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7] -; X64-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0 -; X64-NEXT: vaddps %xmm0, %xmm1, %xmm0 +; X64-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X64-NEXT: retq %1 = lshr <4 x i32> %a2, %2 = and <4 x i32> %a3,