From: Sanjay Patel Date: Mon, 15 Nov 2021 15:47:11 +0000 (-0500) Subject: [x86] fold vector (X > -1) & Y to shift+andn (2nd try) X-Git-Tag: upstream/15.0.7~25688 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=3d01507c2dec61121a1efcc84d3a2328ffc651cd;p=platform%2Fupstream%2Fllvm.git [x86] fold vector (X > -1) & Y to shift+andn (2nd try) The first try at this patch ( bf5748a1af0d ) was reverted ( 5be64d416481 ) because it could crash. The cause of that problem was failing to account for the optional peek-through-bitcast in the enclosing function. This version of the patch adds a clause to avoid the fold in case of bitcasts because it is unlikely to be profitable in that scenario. A test case based on https://llvm.org/PR52504 was added to make sure we don't have that problem again. Original commit message: and (pcmpgt X, -1), Y --> pandn (vsrai X, BitWidth-1), Y This avoids the -1 constant vector in favor of an arithmetic shift instruction if it exists (the ISA is still not complete after all these years...). We catch this pattern late in combining by matching PCMPGT, so it should not interfere with more general folds. Differential Revision: https://reviews.llvm.org/D113603 --- diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index dbd4e50..f6a9bff 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -45902,6 +45902,36 @@ static SDValue combineAndMaskToShift(SDNode *N, SelectionDAG &DAG, if (VT != Op1.getValueType() || !VT.isSimple() || !VT.isInteger()) return SDValue(); + // Try to convert an "is positive" signbit masking operation into arithmetic + // shift and "andn". This saves a materialization of a -1 vector constant. + // The "is negative" variant should be handled more generally because it only + // requires "and" rather than "andn": + // and (pcmpgt X, -1), Y --> pandn (vsrai X, BitWidth - 1), Y + // + // This is limited to the original type to avoid producing even more bitcasts. + // If the bitcasts can't be eliminated, then it is unlikely that this fold + // will be profitable. + if (N->getValueType(0) == VT && + supportedVectorShiftWithImm(VT.getSimpleVT(), Subtarget, ISD::SRA)) { + SDValue X, Y; + if (Op1.hasOneUse() && Op1.getOpcode() == X86ISD::PCMPGT && + isAllOnesOrAllOnesSplat(Op1.getOperand(1))) { + X = Op1.getOperand(0); + Y = Op0; + } else if (Op0.hasOneUse() && Op0.getOpcode() == X86ISD::PCMPGT && + isAllOnesOrAllOnesSplat(Op0.getOperand(1))) { + X = Op0.getOperand(0); + Y = Op1; + } + if (X && Y) { + SDLoc DL(N); + SDValue Sra = + getTargetVShiftByConstNode(X86ISD::VSRAI, DL, VT.getSimpleVT(), X, + VT.getScalarSizeInBits() - 1, DAG); + return DAG.getNode(X86ISD::ANDNP, DL, VT, Sra, Y); + } + } + APInt SplatVal; if (!ISD::isConstantSplatVector(Op1.getNode(), SplatVal) || !SplatVal.isMask()) diff --git a/llvm/test/CodeGen/X86/vector-pcmp.ll b/llvm/test/CodeGen/X86/vector-pcmp.ll index a9a7744..d809595 100644 --- a/llvm/test/CodeGen/X86/vector-pcmp.ll +++ b/llvm/test/CodeGen/X86/vector-pcmp.ll @@ -640,12 +640,25 @@ define <2 x i64> @not_signbit_mask_v2i64(<2 x i64> %x, <2 x i64> %y) { ; SSE42-NEXT: pand %xmm1, %xmm0 ; SSE42-NEXT: retq ; -; AVX-LABEL: not_signbit_mask_v2i64: -; AVX: # %bb.0: -; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vpand %xmm0, %xmm1, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: not_signbit_mask_v2i64: +; AVX1: # %bb.0: +; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpand %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: not_signbit_mask_v2i64: +; AVX2: # %bb.0: +; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vpand %xmm0, %xmm1, %xmm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: not_signbit_mask_v2i64: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsraq $63, %xmm0, %xmm0 +; AVX512-NEXT: vpandn %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: retq %sh = ashr <2 x i64> %x, %not = xor <2 x i64> %sh, %and = and <2 x i64> %y, %not @@ -655,16 +668,14 @@ define <2 x i64> @not_signbit_mask_v2i64(<2 x i64> %x, <2 x i64> %y) { define <4 x i32> @not_signbit_mask_v4i32(<4 x i32> %x, <4 x i32> %y) { ; SSE-LABEL: not_signbit_mask_v4i32: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm2, %xmm2 -; SSE-NEXT: pcmpgtd %xmm2, %xmm0 -; SSE-NEXT: pand %xmm1, %xmm0 +; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: pandn %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: not_signbit_mask_v4i32: ; AVX: # %bb.0: -; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpcmpgtd %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX-NEXT: vpandn %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %sh = ashr <4 x i32> %x, %not = xor <4 x i32> %sh, @@ -675,16 +686,14 @@ define <4 x i32> @not_signbit_mask_v4i32(<4 x i32> %x, <4 x i32> %y) { define <8 x i16> @not_signbit_mask_v8i16(<8 x i16> %x, <8 x i16> %y) { ; SSE-LABEL: not_signbit_mask_v8i16: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm2, %xmm2 -; SSE-NEXT: pcmpgtw %xmm2, %xmm0 -; SSE-NEXT: pand %xmm1, %xmm0 +; SSE-NEXT: psraw $15, %xmm0 +; SSE-NEXT: pandn %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: not_signbit_mask_v8i16: ; AVX: # %bb.0: -; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vpand %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vpsraw $15, %xmm0, %xmm0 +; AVX-NEXT: vpandn %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %sh = ashr <8 x i16> %x, %not = xor <8 x i16> %sh, @@ -752,9 +761,8 @@ define <4 x i64> @not_signbit_mask_v4i64(<4 x i64> %x, <4 x i64> %y) { ; ; AVX512-LABEL: not_signbit_mask_v4i64: ; AVX512: # %bb.0: -; AVX512-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 -; AVX512-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm0 -; AVX512-NEXT: vpand %ymm0, %ymm1, %ymm0 +; AVX512-NEXT: vpsraq $63, %ymm0, %ymm0 +; AVX512-NEXT: vpandn %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: retq %sh = ashr <4 x i64> %x, %not = xor <4 x i64> %sh, @@ -765,11 +773,10 @@ define <4 x i64> @not_signbit_mask_v4i64(<4 x i64> %x, <4 x i64> %y) { define <8 x i32> @not_signbit_mask_v8i32(<8 x i32> %x, <8 x i32> %y) { ; SSE-LABEL: not_signbit_mask_v8i32: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm4, %xmm4 -; SSE-NEXT: pcmpgtd %xmm4, %xmm1 -; SSE-NEXT: pcmpgtd %xmm4, %xmm0 -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: pand %xmm3, %xmm1 +; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: pandn %xmm2, %xmm0 +; SSE-NEXT: psrad $31, %xmm1 +; SSE-NEXT: pandn %xmm3, %xmm1 ; SSE-NEXT: retq ; ; AVX1-LABEL: not_signbit_mask_v8i32: @@ -783,16 +790,14 @@ define <8 x i32> @not_signbit_mask_v8i32(<8 x i32> %x, <8 x i32> %y) { ; ; AVX2-LABEL: not_signbit_mask_v8i32: ; AVX2: # %bb.0: -; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 -; AVX2-NEXT: vpcmpgtd %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0 +; AVX2-NEXT: vpandn %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: not_signbit_mask_v8i32: ; AVX512: # %bb.0: -; AVX512-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 -; AVX512-NEXT: vpcmpgtd %ymm2, %ymm0, %ymm0 -; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: vpsrad $31, %ymm0, %ymm0 +; AVX512-NEXT: vpandn %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: retq %sh = ashr <8 x i32> %x, %not = xor <8 x i32> %sh, @@ -803,11 +808,10 @@ define <8 x i32> @not_signbit_mask_v8i32(<8 x i32> %x, <8 x i32> %y) { define <16 x i16> @not_signbit_mask_v16i16(<16 x i16> %x, <16 x i16> %y) { ; SSE-LABEL: not_signbit_mask_v16i16: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm4, %xmm4 -; SSE-NEXT: pcmpgtw %xmm4, %xmm1 -; SSE-NEXT: pcmpgtw %xmm4, %xmm0 -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: pand %xmm3, %xmm1 +; SSE-NEXT: psraw $15, %xmm0 +; SSE-NEXT: pandn %xmm2, %xmm0 +; SSE-NEXT: psraw $15, %xmm1 +; SSE-NEXT: pandn %xmm3, %xmm1 ; SSE-NEXT: retq ; ; AVX1-LABEL: not_signbit_mask_v16i16: @@ -821,16 +825,14 @@ define <16 x i16> @not_signbit_mask_v16i16(<16 x i16> %x, <16 x i16> %y) { ; ; AVX2-LABEL: not_signbit_mask_v16i16: ; AVX2: # %bb.0: -; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 -; AVX2-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpand %ymm0, %ymm1, %ymm0 +; AVX2-NEXT: vpsraw $15, %ymm0, %ymm0 +; AVX2-NEXT: vpandn %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: not_signbit_mask_v16i16: ; AVX512: # %bb.0: -; AVX512-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 -; AVX512-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0 -; AVX512-NEXT: vpand %ymm0, %ymm1, %ymm0 +; AVX512-NEXT: vpsraw $15, %ymm0, %ymm0 +; AVX512-NEXT: vpandn %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: retq %sh = ashr <16 x i16> %x, %not = xor <16 x i16> %sh, @@ -893,12 +895,25 @@ define <2 x i64> @ispositive_mask_v2i64(<2 x i64> %x, <2 x i64> %y) { ; SSE42-NEXT: pand %xmm1, %xmm0 ; SSE42-NEXT: retq ; -; AVX-LABEL: ispositive_mask_v2i64: -; AVX: # %bb.0: -; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: ispositive_mask_v2i64: +; AVX1: # %bb.0: +; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: ispositive_mask_v2i64: +; AVX2: # %bb.0: +; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: ispositive_mask_v2i64: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsraq $63, %xmm0, %xmm0 +; AVX512-NEXT: vpandn %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: retq %cmp = icmp sgt <2 x i64> %x, %mask = sext <2 x i1> %cmp to <2 x i64> %and = and <2 x i64> %mask, %y @@ -908,16 +923,14 @@ define <2 x i64> @ispositive_mask_v2i64(<2 x i64> %x, <2 x i64> %y) { define <4 x i32> @is_positive_mask_v4i32(<4 x i32> %x, <4 x i32> %y) { ; SSE-LABEL: is_positive_mask_v4i32: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm2, %xmm2 -; SSE-NEXT: pcmpgtd %xmm2, %xmm0 -; SSE-NEXT: pand %xmm1, %xmm0 +; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: pandn %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: is_positive_mask_v4i32: ; AVX: # %bb.0: -; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpcmpgtd %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vpand %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX-NEXT: vpandn %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %cmp = icmp sgt <4 x i32> %x, %mask = sext <4 x i1> %cmp to <4 x i32> @@ -928,16 +941,14 @@ define <4 x i32> @is_positive_mask_v4i32(<4 x i32> %x, <4 x i32> %y) { define <8 x i16> @is_positive_mask_v8i16(<8 x i16> %x, <8 x i16> %y) { ; SSE-LABEL: is_positive_mask_v8i16: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm2, %xmm2 -; SSE-NEXT: pcmpgtw %xmm2, %xmm0 -; SSE-NEXT: pand %xmm1, %xmm0 +; SSE-NEXT: psraw $15, %xmm0 +; SSE-NEXT: pandn %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: is_positive_mask_v8i16: ; AVX: # %bb.0: -; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsraw $15, %xmm0, %xmm0 +; AVX-NEXT: vpandn %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %cmp = icmp sgt <8 x i16> %x, %mask = sext <8 x i1> %cmp to <8 x i16> @@ -1005,9 +1016,8 @@ define <4 x i64> @is_positive_mask_v4i64(<4 x i64> %x, <4 x i64> %y) { ; ; AVX512-LABEL: is_positive_mask_v4i64: ; AVX512: # %bb.0: -; AVX512-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 -; AVX512-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm0 -; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: vpsraq $63, %ymm0, %ymm0 +; AVX512-NEXT: vpandn %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: retq %cmp = icmp sgt <4 x i64> %x, %mask = sext <4 x i1> %cmp to <4 x i64> @@ -1018,11 +1028,10 @@ define <4 x i64> @is_positive_mask_v4i64(<4 x i64> %x, <4 x i64> %y) { define <8 x i32> @is_positive_mask_v8i32(<8 x i32> %x, <8 x i32> %y) { ; SSE-LABEL: is_positive_mask_v8i32: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm4, %xmm4 -; SSE-NEXT: pcmpgtd %xmm4, %xmm1 -; SSE-NEXT: pcmpgtd %xmm4, %xmm0 -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: pand %xmm3, %xmm1 +; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: pandn %xmm2, %xmm0 +; SSE-NEXT: psrad $31, %xmm1 +; SSE-NEXT: pandn %xmm3, %xmm1 ; SSE-NEXT: retq ; ; AVX1-LABEL: is_positive_mask_v8i32: @@ -1037,16 +1046,14 @@ define <8 x i32> @is_positive_mask_v8i32(<8 x i32> %x, <8 x i32> %y) { ; ; AVX2-LABEL: is_positive_mask_v8i32: ; AVX2: # %bb.0: -; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 -; AVX2-NEXT: vpcmpgtd %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpand %ymm0, %ymm1, %ymm0 +; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0 +; AVX2-NEXT: vpandn %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: is_positive_mask_v8i32: ; AVX512: # %bb.0: -; AVX512-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 -; AVX512-NEXT: vpcmpgtd %ymm2, %ymm0, %ymm0 -; AVX512-NEXT: vpand %ymm0, %ymm1, %ymm0 +; AVX512-NEXT: vpsrad $31, %ymm0, %ymm0 +; AVX512-NEXT: vpandn %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: retq %cmp = icmp sgt <8 x i32> %x, %mask = sext <8 x i1> %cmp to <8 x i32> @@ -1057,11 +1064,10 @@ define <8 x i32> @is_positive_mask_v8i32(<8 x i32> %x, <8 x i32> %y) { define <16 x i16> @is_positive_mask_v16i16(<16 x i16> %x, <16 x i16> %y) { ; SSE-LABEL: is_positive_mask_v16i16: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm4, %xmm4 -; SSE-NEXT: pcmpgtw %xmm4, %xmm1 -; SSE-NEXT: pcmpgtw %xmm4, %xmm0 -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: pand %xmm3, %xmm1 +; SSE-NEXT: psraw $15, %xmm0 +; SSE-NEXT: pandn %xmm2, %xmm0 +; SSE-NEXT: psraw $15, %xmm1 +; SSE-NEXT: pandn %xmm3, %xmm1 ; SSE-NEXT: retq ; ; AVX1-LABEL: is_positive_mask_v16i16: @@ -1076,16 +1082,14 @@ define <16 x i16> @is_positive_mask_v16i16(<16 x i16> %x, <16 x i16> %y) { ; ; AVX2-LABEL: is_positive_mask_v16i16: ; AVX2: # %bb.0: -; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 -; AVX2-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpsraw $15, %ymm0, %ymm0 +; AVX2-NEXT: vpandn %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: is_positive_mask_v16i16: ; AVX512: # %bb.0: -; AVX512-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 -; AVX512-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0 -; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: vpsraw $15, %ymm0, %ymm0 +; AVX512-NEXT: vpandn %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: retq %cmp = icmp sgt <16 x i16> %x, %mask = sext <16 x i1> %cmp to <16 x i16> @@ -1148,12 +1152,25 @@ define <2 x i64> @ispositive_mask_load_v2i64(<2 x i64> %x, <2 x i64>* %p) { ; SSE42-NEXT: pand (%rdi), %xmm0 ; SSE42-NEXT: retq ; -; AVX-LABEL: ispositive_mask_load_v2i64: -; AVX: # %bb.0: -; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vpand (%rdi), %xmm0, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: ispositive_mask_load_v2i64: +; AVX1: # %bb.0: +; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpand (%rdi), %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: ispositive_mask_load_v2i64: +; AVX2: # %bb.0: +; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpand (%rdi), %xmm0, %xmm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: ispositive_mask_load_v2i64: +; AVX512: # %bb.0: +; AVX512-NEXT: vpsraq $63, %xmm0, %xmm0 +; AVX512-NEXT: vpandn (%rdi), %xmm0, %xmm0 +; AVX512-NEXT: retq %cmp = icmp sgt <2 x i64> %x, %mask = sext <2 x i1> %cmp to <2 x i64> %y = load <2 x i64>, <2 x i64>* %p @@ -1164,16 +1181,14 @@ define <2 x i64> @ispositive_mask_load_v2i64(<2 x i64> %x, <2 x i64>* %p) { define <4 x i32> @is_positive_mask_load_v4i32(<4 x i32> %x, <4 x i32>* %p) { ; SSE-LABEL: is_positive_mask_load_v4i32: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm1, %xmm1 -; SSE-NEXT: pcmpgtd %xmm1, %xmm0 -; SSE-NEXT: pand (%rdi), %xmm0 +; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: pandn (%rdi), %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: is_positive_mask_load_v4i32: ; AVX: # %bb.0: -; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vpand (%rdi), %xmm0, %xmm0 +; AVX-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX-NEXT: vpandn (%rdi), %xmm0, %xmm0 ; AVX-NEXT: retq %cmp = icmp sgt <4 x i32> %x, %mask = sext <4 x i1> %cmp to <4 x i32> @@ -1185,16 +1200,14 @@ define <4 x i32> @is_positive_mask_load_v4i32(<4 x i32> %x, <4 x i32>* %p) { define <8 x i16> @is_positive_mask_load_v8i16(<8 x i16> %x, <8 x i16>* %p) { ; SSE-LABEL: is_positive_mask_load_v8i16: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm1, %xmm1 -; SSE-NEXT: pcmpgtw %xmm1, %xmm0 -; SSE-NEXT: pand (%rdi), %xmm0 +; SSE-NEXT: psraw $15, %xmm0 +; SSE-NEXT: pandn (%rdi), %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: is_positive_mask_load_v8i16: ; AVX: # %bb.0: -; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vpand (%rdi), %xmm0, %xmm0 +; AVX-NEXT: vpsraw $15, %xmm0, %xmm0 +; AVX-NEXT: vpandn (%rdi), %xmm0, %xmm0 ; AVX-NEXT: retq %cmp = icmp sgt <8 x i16> %x, %mask = sext <8 x i1> %cmp to <8 x i16> @@ -1264,9 +1277,8 @@ define <4 x i64> @is_positive_mask_load_v4i64(<4 x i64> %x, <4 x i64>* %p) { ; ; AVX512-LABEL: is_positive_mask_load_v4i64: ; AVX512: # %bb.0: -; AVX512-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 -; AVX512-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0 -; AVX512-NEXT: vpand (%rdi), %ymm0, %ymm0 +; AVX512-NEXT: vpsraq $63, %ymm0, %ymm0 +; AVX512-NEXT: vpandn (%rdi), %ymm0, %ymm0 ; AVX512-NEXT: retq %cmp = icmp sgt <4 x i64> %x, %mask = sext <4 x i1> %cmp to <4 x i64> @@ -1278,11 +1290,10 @@ define <4 x i64> @is_positive_mask_load_v4i64(<4 x i64> %x, <4 x i64>* %p) { define <8 x i32> @is_positive_mask_load_v8i32(<8 x i32> %x, <8 x i32>* %p) { ; SSE-LABEL: is_positive_mask_load_v8i32: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm2, %xmm2 -; SSE-NEXT: pcmpgtd %xmm2, %xmm1 -; SSE-NEXT: pcmpgtd %xmm2, %xmm0 -; SSE-NEXT: pand (%rdi), %xmm0 -; SSE-NEXT: pand 16(%rdi), %xmm1 +; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: pandn (%rdi), %xmm0 +; SSE-NEXT: psrad $31, %xmm1 +; SSE-NEXT: pandn 16(%rdi), %xmm1 ; SSE-NEXT: retq ; ; AVX1-LABEL: is_positive_mask_load_v8i32: @@ -1297,16 +1308,14 @@ define <8 x i32> @is_positive_mask_load_v8i32(<8 x i32> %x, <8 x i32>* %p) { ; ; AVX2-LABEL: is_positive_mask_load_v8i32: ; AVX2: # %bb.0: -; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 -; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 -; AVX2-NEXT: vpand (%rdi), %ymm0, %ymm0 +; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0 +; AVX2-NEXT: vpandn (%rdi), %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: is_positive_mask_load_v8i32: ; AVX512: # %bb.0: -; AVX512-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 -; AVX512-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0 -; AVX512-NEXT: vpand (%rdi), %ymm0, %ymm0 +; AVX512-NEXT: vpsrad $31, %ymm0, %ymm0 +; AVX512-NEXT: vpandn (%rdi), %ymm0, %ymm0 ; AVX512-NEXT: retq %cmp = icmp sgt <8 x i32> %x, %mask = sext <8 x i1> %cmp to <8 x i32> @@ -1318,11 +1327,10 @@ define <8 x i32> @is_positive_mask_load_v8i32(<8 x i32> %x, <8 x i32>* %p) { define <16 x i16> @is_positive_mask_load_v16i16(<16 x i16> %x, <16 x i16>* %p) { ; SSE-LABEL: is_positive_mask_load_v16i16: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm2, %xmm2 -; SSE-NEXT: pcmpgtw %xmm2, %xmm1 -; SSE-NEXT: pcmpgtw %xmm2, %xmm0 -; SSE-NEXT: pand (%rdi), %xmm0 -; SSE-NEXT: pand 16(%rdi), %xmm1 +; SSE-NEXT: psraw $15, %xmm0 +; SSE-NEXT: pandn (%rdi), %xmm0 +; SSE-NEXT: psraw $15, %xmm1 +; SSE-NEXT: pandn 16(%rdi), %xmm1 ; SSE-NEXT: retq ; ; AVX1-LABEL: is_positive_mask_load_v16i16: @@ -1337,16 +1345,14 @@ define <16 x i16> @is_positive_mask_load_v16i16(<16 x i16> %x, <16 x i16>* %p) { ; ; AVX2-LABEL: is_positive_mask_load_v16i16: ; AVX2: # %bb.0: -; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 -; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0 -; AVX2-NEXT: vpand (%rdi), %ymm0, %ymm0 +; AVX2-NEXT: vpsraw $15, %ymm0, %ymm0 +; AVX2-NEXT: vpandn (%rdi), %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: is_positive_mask_load_v16i16: ; AVX512: # %bb.0: -; AVX512-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 -; AVX512-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0 -; AVX512-NEXT: vpand (%rdi), %ymm0, %ymm0 +; AVX512-NEXT: vpsraw $15, %ymm0, %ymm0 +; AVX512-NEXT: vpandn (%rdi), %ymm0, %ymm0 ; AVX512-NEXT: retq %cmp = icmp sgt <16 x i16> %x, %mask = sext <16 x i1> %cmp to <16 x i16> @@ -1451,23 +1457,20 @@ define <2 x i1> @ispositive_mask_v2i64_v2i1(<2 x i64> %x, <2 x i1> %y) { define <4 x i1> @is_positive_mask_v4i32_v4i1(<4 x i32> %x, <4 x i1> %y) { ; SSE-LABEL: is_positive_mask_v4i32_v4i1: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm2, %xmm2 -; SSE-NEXT: pcmpgtd %xmm2, %xmm0 -; SSE-NEXT: pand %xmm1, %xmm0 +; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: pandn %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX1-LABEL: is_positive_mask_v4i32_v4i1: ; AVX1: # %bb.0: -; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpcmpgtd %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vpand %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX1-NEXT: vpandn %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: is_positive_mask_v4i32_v4i1: ; AVX2: # %bb.0: -; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX2-NEXT: vpcmpgtd %xmm2, %xmm0, %xmm0 -; AVX2-NEXT: vpand %xmm0, %xmm1, %xmm0 +; AVX2-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX2-NEXT: vpandn %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: is_positive_mask_v4i32_v4i1: @@ -1495,30 +1498,26 @@ define <4 x i1> @is_positive_mask_v4i32_v4i1(<4 x i32> %x, <4 x i1> %y) { define <8 x i1> @is_positive_mask_v8i16_v8i1(<8 x i16> %x, <8 x i1> %y) { ; SSE-LABEL: is_positive_mask_v8i16_v8i1: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm2, %xmm2 -; SSE-NEXT: pcmpgtw %xmm2, %xmm0 -; SSE-NEXT: pand %xmm1, %xmm0 +; SSE-NEXT: psraw $15, %xmm0 +; SSE-NEXT: pandn %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX1-LABEL: is_positive_mask_v8i16_v8i1: ; AVX1: # %bb.0: -; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $15, %xmm0, %xmm0 +; AVX1-NEXT: vpandn %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: is_positive_mask_v8i16_v8i1: ; AVX2: # %bb.0: -; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX2-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm0 -; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpsraw $15, %xmm0, %xmm0 +; AVX2-NEXT: vpandn %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: is_positive_mask_v8i16_v8i1: ; AVX512F: # %bb.0: -; AVX512F-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX512F-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm0 -; AVX512F-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX512F-NEXT: vpsraw $15, %xmm0, %xmm0 +; AVX512F-NEXT: vpandn %xmm1, %xmm0, %xmm0 ; AVX512F-NEXT: retq ; ; AVX512DQBW-LABEL: is_positive_mask_v8i16_v8i1: @@ -1581,9 +1580,8 @@ define <4 x i1> @is_positive_mask_v4i64_v4i1(<4 x i64> %x, <4 x i1> %y) { ; SSE2-LABEL: is_positive_mask_v4i64_v4i1: ; SSE2: # %bb.0: ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3] -; SSE2-NEXT: pcmpeqd %xmm1, %xmm1 -; SSE2-NEXT: pcmpgtd %xmm1, %xmm0 -; SSE2-NEXT: pand %xmm2, %xmm0 +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: pandn %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE42-LABEL: is_positive_mask_v4i64_v4i1: diff --git a/llvm/test/CodeGen/X86/vselect-zero.ll b/llvm/test/CodeGen/X86/vselect-zero.ll index f1e67d9..e00f06c 100644 --- a/llvm/test/CodeGen/X86/vselect-zero.ll +++ b/llvm/test/CodeGen/X86/vselect-zero.ll @@ -945,23 +945,20 @@ define <16 x i8> @not_signbit_mask_v16i8(<16 x i8> %a, <16 x i8> %b) { define <8 x i16> @not_signbit_mask_v8i16(<8 x i16> %a, <8 x i16> %b) { ; SSE-LABEL: not_signbit_mask_v8i16: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm2, %xmm2 -; SSE-NEXT: pcmpgtw %xmm2, %xmm0 -; SSE-NEXT: pand %xmm1, %xmm0 +; SSE-NEXT: psraw $15, %xmm0 +; SSE-NEXT: pandn %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: not_signbit_mask_v8i16: ; AVX: # %bb.0: -; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsraw $15, %xmm0, %xmm0 +; AVX-NEXT: vpandn %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: not_signbit_mask_v8i16: ; AVX512: # %bb.0: -; AVX512-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX512-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm0 -; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vpsraw $15, %xmm0, %xmm0 +; AVX512-NEXT: vpandn %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: retq %cond = icmp sgt <8 x i16> %a, %r = select <8 x i1> %cond, <8 x i16> %b, <8 x i16> zeroinitializer @@ -973,23 +970,20 @@ define <8 x i16> @not_signbit_mask_v8i16(<8 x i16> %a, <8 x i16> %b) { define <8 x i16> @not_signbit_mask_swap_v8i16(<8 x i16> %a, <8 x i16> %b) { ; SSE-LABEL: not_signbit_mask_swap_v8i16: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm2, %xmm2 -; SSE-NEXT: pcmpgtw %xmm2, %xmm0 -; SSE-NEXT: pand %xmm1, %xmm0 +; SSE-NEXT: psraw $15, %xmm0 +; SSE-NEXT: pandn %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: not_signbit_mask_swap_v8i16: ; AVX: # %bb.0: -; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsraw $15, %xmm0, %xmm0 +; AVX-NEXT: vpandn %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: not_signbit_mask_swap_v8i16: ; AVX512: # %bb.0: -; AVX512-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX512-NEXT: vpcmpgtw %xmm2, %xmm0, %xmm0 -; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vpsraw $15, %xmm0, %xmm0 +; AVX512-NEXT: vpandn %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: retq %cond = icmp slt <8 x i16> %a, zeroinitializer %r = select <8 x i1> %cond, <8 x i16> zeroinitializer, <8 x i16> %b @@ -999,23 +993,20 @@ define <8 x i16> @not_signbit_mask_swap_v8i16(<8 x i16> %a, <8 x i16> %b) { define <4 x i32> @not_signbit_mask_v4i32(<4 x i32> %a, <4 x i32> %b) { ; SSE-LABEL: not_signbit_mask_v4i32: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm2, %xmm2 -; SSE-NEXT: pcmpgtd %xmm2, %xmm0 -; SSE-NEXT: pand %xmm1, %xmm0 +; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: pandn %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: not_signbit_mask_v4i32: ; AVX: # %bb.0: -; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vpcmpgtd %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX-NEXT: vpandn %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: not_signbit_mask_v4i32: ; AVX512: # %bb.0: -; AVX512-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX512-NEXT: vpcmpgtd %xmm2, %xmm0, %xmm0 -; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX512-NEXT: vpandn %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: retq %cond = icmp sgt <4 x i32> %a, %r = select <4 x i1> %cond, <4 x i32> %b, <4 x i32> zeroinitializer @@ -1047,9 +1038,8 @@ define <2 x i64> @not_signbit_mask_v2i64(<2 x i64> %a, <2 x i64> %b) { ; ; AVX512-LABEL: not_signbit_mask_v2i64: ; AVX512: # %bb.0: -; AVX512-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX512-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm0 -; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vpsraq $63, %xmm0, %xmm0 +; AVX512-NEXT: vpandn %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: retq %cond = icmp sgt <2 x i64> %a, %r = select <2 x i1> %cond, <2 x i64> %b, <2 x i64> zeroinitializer @@ -1097,11 +1087,10 @@ define <32 x i8> @not_signbit_mask_v32i8(<32 x i8> %a, <32 x i8> %b) { define <16 x i16> @not_signbit_mask_v16i16(<16 x i16> %a, <16 x i16> %b) { ; SSE-LABEL: not_signbit_mask_v16i16: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm4, %xmm4 -; SSE-NEXT: pcmpgtw %xmm4, %xmm0 -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: pcmpgtw %xmm4, %xmm1 -; SSE-NEXT: pand %xmm3, %xmm1 +; SSE-NEXT: psraw $15, %xmm0 +; SSE-NEXT: pandn %xmm2, %xmm0 +; SSE-NEXT: psraw $15, %xmm1 +; SSE-NEXT: pandn %xmm3, %xmm1 ; SSE-NEXT: retq ; ; AVX1-LABEL: not_signbit_mask_v16i16: @@ -1115,16 +1104,14 @@ define <16 x i16> @not_signbit_mask_v16i16(<16 x i16> %a, <16 x i16> %b) { ; ; AVX2-LABEL: not_signbit_mask_v16i16: ; AVX2: # %bb.0: -; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 -; AVX2-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpsraw $15, %ymm0, %ymm0 +; AVX2-NEXT: vpandn %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: not_signbit_mask_v16i16: ; AVX512: # %bb.0: -; AVX512-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 -; AVX512-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0 -; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: vpsraw $15, %ymm0, %ymm0 +; AVX512-NEXT: vpandn %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: retq %cond = icmp sgt <16 x i16> %a, %r = select <16 x i1> %cond, <16 x i16> %b, <16 x i16> zeroinitializer @@ -1134,11 +1121,10 @@ define <16 x i16> @not_signbit_mask_v16i16(<16 x i16> %a, <16 x i16> %b) { define <8 x i32> @not_signbit_mask_v8i32(<8 x i32> %a, <8 x i32> %b) { ; SSE-LABEL: not_signbit_mask_v8i32: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm4, %xmm4 -; SSE-NEXT: pcmpgtd %xmm4, %xmm0 -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: pcmpgtd %xmm4, %xmm1 -; SSE-NEXT: pand %xmm3, %xmm1 +; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: pandn %xmm2, %xmm0 +; SSE-NEXT: psrad $31, %xmm1 +; SSE-NEXT: pandn %xmm3, %xmm1 ; SSE-NEXT: retq ; ; AVX1-LABEL: not_signbit_mask_v8i32: @@ -1152,16 +1138,14 @@ define <8 x i32> @not_signbit_mask_v8i32(<8 x i32> %a, <8 x i32> %b) { ; ; AVX2-LABEL: not_signbit_mask_v8i32: ; AVX2: # %bb.0: -; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 -; AVX2-NEXT: vpcmpgtd %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0 +; AVX2-NEXT: vpandn %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: not_signbit_mask_v8i32: ; AVX512: # %bb.0: -; AVX512-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 -; AVX512-NEXT: vpcmpgtd %ymm2, %ymm0, %ymm0 -; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: vpsrad $31, %ymm0, %ymm0 +; AVX512-NEXT: vpandn %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: retq %cond = icmp sgt <8 x i32> %a, %r = select <8 x i1> %cond, <8 x i32> %b, <8 x i32> zeroinitializer @@ -1173,11 +1157,10 @@ define <8 x i32> @not_signbit_mask_v8i32(<8 x i32> %a, <8 x i32> %b) { define <8 x i32> @not_signbit_mask_swap_v8i32(<8 x i32> %a, <8 x i32> %b) { ; SSE-LABEL: not_signbit_mask_swap_v8i32: ; SSE: # %bb.0: -; SSE-NEXT: pcmpeqd %xmm4, %xmm4 -; SSE-NEXT: pcmpgtd %xmm4, %xmm0 -; SSE-NEXT: pand %xmm2, %xmm0 -; SSE-NEXT: pcmpgtd %xmm4, %xmm1 -; SSE-NEXT: pand %xmm3, %xmm1 +; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: pandn %xmm2, %xmm0 +; SSE-NEXT: psrad $31, %xmm1 +; SSE-NEXT: pandn %xmm3, %xmm1 ; SSE-NEXT: retq ; ; AVX1-LABEL: not_signbit_mask_swap_v8i32: @@ -1191,16 +1174,14 @@ define <8 x i32> @not_signbit_mask_swap_v8i32(<8 x i32> %a, <8 x i32> %b) { ; ; AVX2-LABEL: not_signbit_mask_swap_v8i32: ; AVX2: # %bb.0: -; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 -; AVX2-NEXT: vpcmpgtd %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0 +; AVX2-NEXT: vpandn %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: not_signbit_mask_swap_v8i32: ; AVX512: # %bb.0: -; AVX512-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 -; AVX512-NEXT: vpcmpgtd %ymm2, %ymm0, %ymm0 -; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: vpsrad $31, %ymm0, %ymm0 +; AVX512-NEXT: vpandn %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: retq %cond = icmp slt <8 x i32> %a, zeroinitializer %r = select <8 x i1> %cond, <8 x i32> zeroinitializer, <8 x i32> %b @@ -1247,9 +1228,8 @@ define <4 x i64> @not_signbit_mask_v4i64(<4 x i64> %a, <4 x i64> %b) { ; ; AVX512-LABEL: not_signbit_mask_v4i64: ; AVX512: # %bb.0: -; AVX512-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 -; AVX512-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm0 -; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: vpsraq $63, %ymm0, %ymm0 +; AVX512-NEXT: vpandn %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: retq %cond = icmp sgt <4 x i64> %a, %r = select <4 x i1> %cond, <4 x i64> %b, <4 x i64> zeroinitializer