From: Simon Pilgrim Date: Thu, 10 Nov 2016 15:05:09 +0000 (+0000) Subject: [SelectionDAG] Add support for vector demandedelts in SRA opcodes X-Git-Tag: llvmorg-4.0.0-rc1~5032 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=ca57e53ded1f0c0d697c99c1d8442c2dce011d0c;p=platform%2Fupstream%2Fllvm.git [SelectionDAG] Add support for vector demandedelts in SRA opcodes llvm-svn: 286461 --- diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 6d25ead..e272315 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -2265,7 +2265,8 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero, break; case ISD::SRA: if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { - computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth + 1); + computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts, + Depth + 1); KnownZero = KnownZero.lshr(*ShAmt); KnownOne = KnownOne.lshr(*ShAmt); // If we know the value of the sign bit, then we know it is copied across diff --git a/llvm/test/CodeGen/X86/known-bits-vector.ll b/llvm/test/CodeGen/X86/known-bits-vector.ll index 63ac994..0ba3cb1 100644 --- a/llvm/test/CodeGen/X86/known-bits-vector.ll +++ b/llvm/test/CodeGen/X86/known-bits-vector.ll @@ -156,18 +156,12 @@ define <4 x i32> @knownbits_mask_shl_shuffle_lshr(<4 x i32> %a0) nounwind { define <4 x i32> @knownbits_mask_ashr_shuffle_lshr(<4 x i32> %a0) nounwind { ; X32-LABEL: knownbits_mask_ashr_shuffle_lshr: ; X32: # BB#0: -; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 -; X32-NEXT: vpsrad $15, %xmm0, %xmm0 -; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3] -; X32-NEXT: vpsrld $30, %xmm0, %xmm0 +; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0 ; X32-NEXT: retl ; ; X64-LABEL: knownbits_mask_ashr_shuffle_lshr: ; X64: # BB#0: -; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 -; X64-NEXT: vpsrad $15, %xmm0, %xmm0 -; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3] -; X64-NEXT: vpsrld $30, %xmm0, %xmm0 +; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0 ; X64-NEXT: retq %1 = and <4 x i32> %a0, %2 = ashr <4 x i32> %1,