From: Simon Pilgrim Date: Tue, 31 Mar 2020 13:37:48 +0000 (+0100) Subject: [X86][SSE] lowerShuffleWithPACK - extend to use chained PACKs for larger truncations X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=efe59d6717dcdf7777acb9b7a734e1a520bdf22a;p=platform%2Fupstream%2Fllvm.git [X86][SSE] lowerShuffleWithPACK - extend to use chained PACKs for larger truncations If canLowerByDroppingEvenElements indicates that the shuffle is a N:1 compaction pattern and the inputs are suitably sign/zero extended then we can use a chain of PACKSS/PACKUS to compact. This helps avoid PSHUFB (and its mask load) for short shuffle chains, shuffle combining will still replace with a PSHUFB if we have enough shuffles as getFauxShuffleMask can recognise PACKSS/PACKUS chains. --- diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index c7c599c..7f2046d 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -11358,6 +11358,72 @@ static SDValue lowerShuffleWithPACK(const SDLoc &DL, MVT VT, ArrayRef Mask, return DAG.getNode(PackOpcode, DL, VT, DAG.getBitcast(PackVT, V1), DAG.getBitcast(PackVT, V2)); + // See if we can detect a compaction mask that we can lower with a chain of + // PACKUS/PACKSS. + // TODO: Handle 256/512-bit types. + // TODO: Can this be merged into matchShuffleWithPACK? + if ((VT == MVT::v8i16 || VT == MVT::v16i8) && !Subtarget.hasAVX512()) { + if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask, V2.isUndef())) { + if (VT == MVT::v8i16 && NumEvenDrops == 3) + return SDValue(); + unsigned NumElts = VT.getVectorNumElements(); + unsigned NumDstEltBits = VT.getScalarSizeInBits(); + unsigned NumSrcEltBits = NumDstEltBits << NumEvenDrops; + unsigned NumPackedEltBits = NumSrcEltBits - NumDstEltBits; + MVT SrcSVT = MVT::getIntegerVT(NumSrcEltBits); + MVT SrcVT = MVT::getVectorVT(SrcSVT, 128 / NumSrcEltBits); + SDValue Src1 = DAG.getBitcast(SrcVT, V1); + SDValue Src2 = DAG.getBitcast(SrcVT, V2.isUndef() ? V1 : V2); + + // Try to use PACKUS - PACKUSDW (SSE41+ only). + unsigned PackOpcode = ISD::DELETED_NODE; + if (Subtarget.hasSSE41() || NumDstEltBits == 8) { + KnownBits KnownV1 = DAG.computeKnownBits(Src1); + KnownBits KnownV2 = DAG.computeKnownBits(Src2); + if (KnownV1.countMinLeadingZeros() >= NumPackedEltBits && + KnownV2.countMinLeadingZeros() >= NumPackedEltBits) { + PackOpcode = X86ISD::PACKUS; + } + } + + // Try to use PACKSS. + if (PackOpcode == ISD::DELETED_NODE && + DAG.ComputeNumSignBits(Src1) > NumPackedEltBits && + DAG.ComputeNumSignBits(Src2) > NumPackedEltBits) { + PackOpcode = X86ISD::PACKSS; + } + + // Repeatedly pack until we're down to the target size. + if (PackOpcode != ISD::DELETED_NODE) { + SDValue Res; + unsigned NumPackEltBits = NumSrcEltBits; + for (int i = 0; i < NumEvenDrops; ++i, NumPackEltBits >>= 1) { + // Pack to the largest type possible: + // vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB. + MVT InSVT = MVT::i16, OutSVT = MVT::i8; + if (NumPackEltBits > 16 && + (PackOpcode == X86ISD::PACKSS || Subtarget.hasSSE41())) { + InSVT = MVT::i32; + OutSVT = MVT::i16; + } + MVT InVT = MVT::getVectorVT(InSVT, 128 / InSVT.getSizeInBits()); + MVT OutVT = MVT::getVectorVT(OutSVT, 128 / OutSVT.getSizeInBits()); + Src1 = DAG.getBitcast(InVT, i == 0 ? Src1 : Res); + Src2 = DAG.getBitcast(InVT, i == 0 ? Src2 : Res); + // If the mask doesn't need these packed elements, just use UNDEF. + unsigned UpperElts = NumElts >> (i + 1); + if (isUndefInRange(Mask, 0, UpperElts)) + Src1 = DAG.getUNDEF(InVT); + if (isUndefInRange(Mask, UpperElts, NumElts - UpperElts)) + Src2 = DAG.getUNDEF(InVT); + Res = DAG.getNode(PackOpcode, DL, OutVT, Src1, Src2); + } + assert(Res && Res.getValueType() == VT && "Failed to pack"); + return Res; + } + } + } + return SDValue(); } diff --git a/llvm/test/CodeGen/X86/avx-fp2int.ll b/llvm/test/CodeGen/X86/avx-fp2int.ll index ac5fcfe..195b6f6 100644 --- a/llvm/test/CodeGen/X86/avx-fp2int.ll +++ b/llvm/test/CodeGen/X86/avx-fp2int.ll @@ -7,7 +7,8 @@ define <4 x i8> @test1(<4 x double> %d) { ; CHECK-LABEL: test1: ; CHECK: ## %bb.0: ; CHECK-NEXT: vcvttpd2dq %ymm0, %xmm0 -; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; CHECK-NEXT: vpackusdw %xmm0, %xmm0, %xmm0 +; CHECK-NEXT: vpackuswb %xmm0, %xmm0, %xmm0 ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retl %c = fptoui <4 x double> %d to <4 x i8> @@ -17,7 +18,8 @@ define <4 x i8> @test2(<4 x double> %d) { ; CHECK-LABEL: test2: ; CHECK: ## %bb.0: ; CHECK-NEXT: vcvttpd2dq %ymm0, %xmm0 -; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; CHECK-NEXT: vpackssdw %xmm0, %xmm0, %xmm0 +; CHECK-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retl %c = fptosi <4 x double> %d to <4 x i8> diff --git a/llvm/test/CodeGen/X86/bitcast-and-setcc-512.ll b/llvm/test/CodeGen/X86/bitcast-and-setcc-512.ll index 3852b0e..10d5975 100644 --- a/llvm/test/CodeGen/X86/bitcast-and-setcc-512.ll +++ b/llvm/test/CodeGen/X86/bitcast-and-setcc-512.ll @@ -8,43 +8,25 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i64> %d) { ; SSE-LABEL: v8i64: ; SSE: # %bb.0: -; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 -; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9 -; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10 -; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11 +; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm1 +; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm2 ; SSE-NEXT: pcmpgtq %xmm7, %xmm3 -; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] -; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,0,2,4,5,6,7] -; SSE-NEXT: pcmpgtq %xmm6, %xmm2 -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] -; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,0,2,4,5,6,7] -; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] -; SSE-NEXT: pcmpgtq %xmm5, %xmm1 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7] +; SSE-NEXT: packssdw %xmm3, %xmm3 +; SSE-NEXT: packssdw %xmm3, %xmm3 ; SSE-NEXT: pcmpgtq %xmm4, %xmm0 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] -; SSE-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm11 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[0,2,2,3] -; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,0,2,4,5,6,7] -; SSE-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm10 -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm10[0,2,2,3] -; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,0,2,4,5,6,7] -; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; SSE-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm9 -; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm9[0,2,2,3] -; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7] -; SSE-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm8 -; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm8[0,2,2,3] -; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,2,2,3,4,5,6,7] -; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] -; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7] -; SSE-NEXT: pand %xmm0, %xmm3 -; SSE-NEXT: packsswb %xmm0, %xmm3 -; SSE-NEXT: pmovmskb %xmm3, %eax +; SSE-NEXT: packssdw %xmm0, %xmm0 +; SSE-NEXT: packssdw %xmm0, %xmm0 +; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7] +; SSE-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm2 +; SSE-NEXT: packssdw %xmm2, %xmm2 +; SSE-NEXT: packssdw %xmm2, %xmm2 +; SSE-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm1 +; SSE-NEXT: packssdw %xmm0, %xmm1 +; SSE-NEXT: packssdw %xmm0, %xmm1 +; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] +; SSE-NEXT: pand %xmm0, %xmm1 +; SSE-NEXT: packsswb %xmm0, %xmm1 +; SSE-NEXT: pmovmskb %xmm1, %eax ; SSE-NEXT: # kill: def $al killed $al killed $eax ; SSE-NEXT: retq ; @@ -120,43 +102,25 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i64> %d) { define i8 @v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x double> %d) { ; SSE-LABEL: v8f64: ; SSE: # %bb.0: -; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm8 -; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm9 -; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm10 -; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm11 +; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm1 +; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm2 ; SSE-NEXT: cmpltpd %xmm3, %xmm7 -; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2,2,3] -; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm7[0,1,0,2,4,5,6,7] -; SSE-NEXT: cmpltpd %xmm2, %xmm6 -; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2,2,3] -; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm6[0,1,0,2,4,5,6,7] -; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] -; SSE-NEXT: cmpltpd %xmm1, %xmm5 -; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2,2,3] -; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm5[0,2,2,3,4,5,6,7] +; SSE-NEXT: packssdw %xmm7, %xmm3 +; SSE-NEXT: packssdw %xmm3, %xmm3 ; SSE-NEXT: cmpltpd %xmm0, %xmm4 -; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2,2,3] -; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm4[0,2,2,3,4,5,6,7] -; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] -; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm11 -; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,2,2,3] -; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm11[0,1,0,2,4,5,6,7] -; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm10 -; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,2,2,3] -; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm10[0,1,0,2,4,5,6,7] -; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm9 -; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,2,2,3] -; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm9[0,2,2,3,4,5,6,7] -; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm8 -; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,2,2,3] -; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm8[0,2,2,3,4,5,6,7] -; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] -; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7] -; SSE-NEXT: pand %xmm0, %xmm3 -; SSE-NEXT: packsswb %xmm0, %xmm3 -; SSE-NEXT: pmovmskb %xmm3, %eax +; SSE-NEXT: packssdw %xmm0, %xmm4 +; SSE-NEXT: packssdw %xmm0, %xmm4 +; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7] +; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm2 +; SSE-NEXT: packssdw %xmm2, %xmm0 +; SSE-NEXT: packssdw %xmm0, %xmm0 +; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm1 +; SSE-NEXT: packssdw %xmm0, %xmm1 +; SSE-NEXT: packssdw %xmm0, %xmm1 +; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm0[4,5,6,7] +; SSE-NEXT: pand %xmm4, %xmm1 +; SSE-NEXT: packsswb %xmm0, %xmm1 +; SSE-NEXT: pmovmskb %xmm1, %eax ; SSE-NEXT: # kill: def $al killed $al killed $eax ; SSE-NEXT: retq ; @@ -336,36 +300,24 @@ define i32 @v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i16> %d) { define i16 @v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i32> %d) { ; SSE-LABEL: v16i32: ; SSE: # %bb.0: -; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 -; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10 -; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9 -; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11 +; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm1 +; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm2 ; SSE-NEXT: pcmpgtd %xmm7, %xmm3 -; SSE-NEXT: movdqa {{.*#+}} xmm7 = -; SSE-NEXT: pshufb %xmm7, %xmm3 -; SSE-NEXT: pcmpgtd %xmm6, %xmm2 -; SSE-NEXT: pshufb %xmm7, %xmm2 -; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] -; SSE-NEXT: pcmpgtd %xmm5, %xmm1 -; SSE-NEXT: movdqa {{.*#+}} xmm3 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u> -; SSE-NEXT: pshufb %xmm3, %xmm1 +; SSE-NEXT: packssdw %xmm3, %xmm3 +; SSE-NEXT: packsswb %xmm3, %xmm3 ; SSE-NEXT: pcmpgtd %xmm4, %xmm0 -; SSE-NEXT: pshufb %xmm3, %xmm0 -; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] -; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm11 -; SSE-NEXT: pshufb %xmm7, %xmm11 -; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm9 -; SSE-NEXT: pshufb %xmm7, %xmm9 -; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm11[0],xmm9[1],xmm11[1] -; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm10 -; SSE-NEXT: pshufb %xmm3, %xmm10 -; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm8 -; SSE-NEXT: pshufb %xmm3, %xmm8 -; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm10[0],xmm8[1],xmm10[1] -; SSE-NEXT: pblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm9[4,5,6,7] -; SSE-NEXT: pand %xmm0, %xmm8 -; SSE-NEXT: pmovmskb %xmm8, %eax +; SSE-NEXT: packssdw %xmm0, %xmm0 +; SSE-NEXT: packsswb %xmm0, %xmm0 +; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7] +; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm2 +; SSE-NEXT: packssdw %xmm2, %xmm2 +; SSE-NEXT: packsswb %xmm2, %xmm2 +; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm1 +; SSE-NEXT: packssdw %xmm0, %xmm1 +; SSE-NEXT: packsswb %xmm0, %xmm1 +; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] +; SSE-NEXT: pand %xmm0, %xmm1 +; SSE-NEXT: pmovmskb %xmm1, %eax ; SSE-NEXT: # kill: def $ax killed $ax killed $eax ; SSE-NEXT: retq ; @@ -448,36 +400,24 @@ define i16 @v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i32> %d) { define i16 @v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x float> %d) { ; SSE-LABEL: v16f32: ; SSE: # %bb.0: -; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm8 -; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm10 -; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm9 -; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm11 +; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1 +; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm2 ; SSE-NEXT: cmpltps %xmm3, %xmm7 -; SSE-NEXT: movdqa {{.*#+}} xmm3 = -; SSE-NEXT: pshufb %xmm3, %xmm7 -; SSE-NEXT: cmpltps %xmm2, %xmm6 -; SSE-NEXT: pshufb %xmm3, %xmm6 -; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1] -; SSE-NEXT: cmpltps %xmm1, %xmm5 -; SSE-NEXT: movdqa {{.*#+}} xmm1 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u> -; SSE-NEXT: pshufb %xmm1, %xmm5 +; SSE-NEXT: packssdw %xmm7, %xmm3 +; SSE-NEXT: packsswb %xmm3, %xmm3 ; SSE-NEXT: cmpltps %xmm0, %xmm4 -; SSE-NEXT: pshufb %xmm1, %xmm4 -; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] -; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm6[4,5,6,7] -; SSE-NEXT: cmpltps {{[0-9]+}}(%rsp), %xmm11 -; SSE-NEXT: pshufb %xmm3, %xmm11 -; SSE-NEXT: cmpltps {{[0-9]+}}(%rsp), %xmm9 -; SSE-NEXT: pshufb %xmm3, %xmm9 -; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm11[0],xmm9[1],xmm11[1] -; SSE-NEXT: cmpltps {{[0-9]+}}(%rsp), %xmm10 -; SSE-NEXT: pshufb %xmm1, %xmm10 -; SSE-NEXT: cmpltps {{[0-9]+}}(%rsp), %xmm8 -; SSE-NEXT: pshufb %xmm1, %xmm8 -; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm10[0],xmm8[1],xmm10[1] -; SSE-NEXT: pblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm9[4,5,6,7] -; SSE-NEXT: pand %xmm4, %xmm8 -; SSE-NEXT: pmovmskb %xmm8, %eax +; SSE-NEXT: packssdw %xmm0, %xmm4 +; SSE-NEXT: packsswb %xmm0, %xmm4 +; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7] +; SSE-NEXT: cmpltps {{[0-9]+}}(%rsp), %xmm2 +; SSE-NEXT: packssdw %xmm2, %xmm0 +; SSE-NEXT: packsswb %xmm0, %xmm0 +; SSE-NEXT: cmpltps {{[0-9]+}}(%rsp), %xmm1 +; SSE-NEXT: packssdw %xmm0, %xmm1 +; SSE-NEXT: packsswb %xmm0, %xmm1 +; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm0[4,5,6,7] +; SSE-NEXT: pand %xmm4, %xmm1 +; SSE-NEXT: pmovmskb %xmm1, %eax ; SSE-NEXT: # kill: def $ax killed $ax killed $eax ; SSE-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/masked_store_trunc_ssat.ll b/llvm/test/CodeGen/X86/masked_store_trunc_ssat.ll index b82d781..f2f9cc3 100644 --- a/llvm/test/CodeGen/X86/masked_store_trunc_ssat.ll +++ b/llvm/test/CodeGen/X86/masked_store_trunc_ssat.ll @@ -5263,7 +5263,8 @@ define void @truncstore_v4i32_v4i8(<4 x i32> %x, <4 x i8>* %p, <4 x i32> %mask) ; SSE4-NEXT: pxor %xmm2, %xmm2 ; SSE4-NEXT: pminsd {{.*}}(%rip), %xmm0 ; SSE4-NEXT: pmaxsd {{.*}}(%rip), %xmm0 -; SSE4-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; SSE4-NEXT: packssdw %xmm0, %xmm0 +; SSE4-NEXT: packsswb %xmm0, %xmm0 ; SSE4-NEXT: pcmpeqd %xmm1, %xmm2 ; SSE4-NEXT: movmskps %xmm2, %eax ; SSE4-NEXT: xorl $15, %eax @@ -5301,7 +5302,8 @@ define void @truncstore_v4i32_v4i8(<4 x i32> %x, <4 x i8>* %p, <4 x i32> %mask) ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vpminsd {{.*}}(%rip), %xmm0, %xmm0 ; AVX1-NEXT: vpmaxsd {{.*}}(%rip), %xmm0, %xmm0 -; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; AVX1-NEXT: vpackssdw %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 ; AVX1-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vmovmskps %xmm1, %eax ; AVX1-NEXT: xorl $15, %eax @@ -5341,7 +5343,8 @@ define void @truncstore_v4i32_v4i8(<4 x i32> %x, <4 x i8>* %p, <4 x i32> %mask) ; AVX2-NEXT: vpminsd %xmm3, %xmm0, %xmm0 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [4294967168,4294967168,4294967168,4294967168] ; AVX2-NEXT: vpmaxsd %xmm3, %xmm0, %xmm0 -; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; AVX2-NEXT: vpackssdw %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm1 ; AVX2-NEXT: vmovmskps %xmm1, %eax ; AVX2-NEXT: xorl $15, %eax diff --git a/llvm/test/CodeGen/X86/masked_store_trunc_usat.ll b/llvm/test/CodeGen/X86/masked_store_trunc_usat.ll index 84958fe..e1ee5d1 100644 --- a/llvm/test/CodeGen/X86/masked_store_trunc_usat.ll +++ b/llvm/test/CodeGen/X86/masked_store_trunc_usat.ll @@ -4436,19 +4436,15 @@ define void @truncstore_v8i32_v8i8(<8 x i32> %x, <8 x i8>* %p, <8 x i32> %mask) ; ; SSE4-LABEL: truncstore_v8i32_v8i8: ; SSE4: # %bb.0: -; SSE4-NEXT: pxor %xmm4, %xmm4 -; SSE4-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255] -; SSE4-NEXT: pminud %xmm5, %xmm0 -; SSE4-NEXT: pminud %xmm5, %xmm1 -; SSE4-NEXT: movdqa {{.*#+}} xmm5 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u> -; SSE4-NEXT: pshufb %xmm5, %xmm1 -; SSE4-NEXT: pshufb %xmm5, %xmm0 -; SSE4-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; SSE4-NEXT: pcmpeqd %xmm4, %xmm3 -; SSE4-NEXT: pcmpeqd %xmm1, %xmm1 -; SSE4-NEXT: pxor %xmm1, %xmm3 -; SSE4-NEXT: pcmpeqd %xmm4, %xmm2 -; SSE4-NEXT: pxor %xmm1, %xmm2 +; SSE4-NEXT: pxor %xmm1, %xmm1 +; SSE4-NEXT: pminud {{.*}}(%rip), %xmm0 +; SSE4-NEXT: packusdw %xmm0, %xmm0 +; SSE4-NEXT: packuswb %xmm0, %xmm0 +; SSE4-NEXT: pcmpeqd %xmm1, %xmm3 +; SSE4-NEXT: pcmpeqd %xmm4, %xmm4 +; SSE4-NEXT: pxor %xmm4, %xmm3 +; SSE4-NEXT: pcmpeqd %xmm1, %xmm2 +; SSE4-NEXT: pxor %xmm4, %xmm2 ; SSE4-NEXT: packssdw %xmm3, %xmm2 ; SSE4-NEXT: packsswb %xmm0, %xmm2 ; SSE4-NEXT: pmovmskb %xmm2, %eax @@ -4511,14 +4507,9 @@ define void @truncstore_v8i32_v8i8(<8 x i32> %x, <8 x i8>* %p, <8 x i32> %mask) ; ; AVX1-LABEL: truncstore_v8i32_v8i8: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [255,255,255,255] -; AVX1-NEXT: vpminud %xmm2, %xmm0, %xmm3 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vpminud %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u> -; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vpshufb %xmm2, %xmm3, %xmm2 -; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] +; AVX1-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpackusdw %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 ; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2 @@ -4588,13 +4579,10 @@ define void @truncstore_v8i32_v8i8(<8 x i32> %x, <8 x i8>* %p, <8 x i32> %mask) ; AVX2-LABEL: truncstore_v8i32_v8i8: ; AVX2: # %bb.0: ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255] -; AVX2-NEXT: vpminud %ymm3, %ymm0, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3 -; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u> -; AVX2-NEXT: vpshufb %xmm4, %xmm3, %xmm3 -; AVX2-NEXT: vpshufb %xmm4, %xmm0, %xmm0 -; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] +; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [255,255,255,255] +; AVX2-NEXT: vpminud %xmm3, %xmm0, %xmm0 +; AVX2-NEXT: vpackusdw %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: vpackuswb %xmm0, %xmm0, %xmm0 ; AVX2-NEXT: vpcmpeqd %ymm2, %ymm1, %ymm1 ; AVX2-NEXT: vmovmskps %ymm1, %eax ; AVX2-NEXT: notl %eax @@ -5030,7 +5018,8 @@ define void @truncstore_v4i32_v4i8(<4 x i32> %x, <4 x i8>* %p, <4 x i32> %mask) ; SSE4: # %bb.0: ; SSE4-NEXT: pxor %xmm2, %xmm2 ; SSE4-NEXT: pminud {{.*}}(%rip), %xmm0 -; SSE4-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; SSE4-NEXT: packusdw %xmm0, %xmm0 +; SSE4-NEXT: packuswb %xmm0, %xmm0 ; SSE4-NEXT: pcmpeqd %xmm1, %xmm2 ; SSE4-NEXT: movmskps %xmm2, %eax ; SSE4-NEXT: xorl $15, %eax @@ -5067,7 +5056,8 @@ define void @truncstore_v4i32_v4i8(<4 x i32> %x, <4 x i8>* %p, <4 x i32> %mask) ; AVX1: # %bb.0: ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm0 -; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; AVX1-NEXT: vpackusdw %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0 ; AVX1-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vmovmskps %xmm1, %eax ; AVX1-NEXT: xorl $15, %eax @@ -5105,7 +5095,8 @@ define void @truncstore_v4i32_v4i8(<4 x i32> %x, <4 x i8>* %p, <4 x i32> %mask) ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [255,255,255,255] ; AVX2-NEXT: vpminud %xmm3, %xmm0, %xmm0 -; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; AVX2-NEXT: vpackusdw %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: vpackuswb %xmm0, %xmm0, %xmm0 ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm1 ; AVX2-NEXT: vmovmskps %xmm1, %eax ; AVX2-NEXT: xorl $15, %eax diff --git a/llvm/test/CodeGen/X86/psubus.ll b/llvm/test/CodeGen/X86/psubus.ll index 2903cbd..d4ada24 100644 --- a/llvm/test/CodeGen/X86/psubus.ll +++ b/llvm/test/CodeGen/X86/psubus.ll @@ -607,49 +607,37 @@ define <16 x i8> @test14(<16 x i8> %x, <16 x i32> %y) nounwind { ; SSE2: # %bb.0: # %vector.ph ; SSE2-NEXT: movdqa %xmm0, %xmm5 ; SSE2-NEXT: pxor %xmm0, %xmm0 -; SSE2-NEXT: movdqa %xmm5, %xmm6 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3],xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7] -; SSE2-NEXT: movdqa %xmm6, %xmm8 +; SSE2-NEXT: movdqa %xmm5, %xmm7 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm0[8],xmm7[9],xmm0[9],xmm7[10],xmm0[10],xmm7[11],xmm0[11],xmm7[12],xmm0[12],xmm7[13],xmm0[13],xmm7[14],xmm0[14],xmm7[15],xmm0[15] +; SSE2-NEXT: movdqa %xmm7, %xmm8 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3] -; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7] -; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15] -; SSE2-NEXT: movdqa %xmm5, %xmm10 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3] -; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3],xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7] +; SSE2-NEXT: movdqa %xmm5, %xmm9 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3] +; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7] ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648] -; SSE2-NEXT: movdqa %xmm4, %xmm9 -; SSE2-NEXT: pxor %xmm0, %xmm9 -; SSE2-NEXT: psubd %xmm5, %xmm4 -; SSE2-NEXT: por %xmm0, %xmm5 -; SSE2-NEXT: pcmpgtd %xmm9, %xmm5 -; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [255,0,255,0,255,0,255,0] -; SSE2-NEXT: pand %xmm9, %xmm5 -; SSE2-NEXT: movdqa %xmm3, %xmm7 +; SSE2-NEXT: movdqa %xmm4, %xmm6 +; SSE2-NEXT: pxor %xmm0, %xmm6 +; SSE2-NEXT: psubd %xmm7, %xmm4 +; SSE2-NEXT: por %xmm0, %xmm7 +; SSE2-NEXT: pcmpgtd %xmm6, %xmm7 +; SSE2-NEXT: packssdw %xmm7, %xmm6 +; SSE2-NEXT: movdqa %xmm1, %xmm7 ; SSE2-NEXT: pxor %xmm0, %xmm7 -; SSE2-NEXT: psubd %xmm10, %xmm3 -; SSE2-NEXT: por %xmm0, %xmm10 -; SSE2-NEXT: pcmpgtd %xmm7, %xmm10 -; SSE2-NEXT: pand %xmm9, %xmm10 -; SSE2-NEXT: packuswb %xmm5, %xmm10 -; SSE2-NEXT: movdqa %xmm2, %xmm5 -; SSE2-NEXT: pxor %xmm0, %xmm5 -; SSE2-NEXT: psubd %xmm6, %xmm2 -; SSE2-NEXT: por %xmm0, %xmm6 -; SSE2-NEXT: pcmpgtd %xmm5, %xmm6 -; SSE2-NEXT: pand %xmm9, %xmm6 -; SSE2-NEXT: movdqa %xmm1, %xmm5 -; SSE2-NEXT: pxor %xmm0, %xmm5 -; SSE2-NEXT: por %xmm8, %xmm0 -; SSE2-NEXT: pcmpgtd %xmm5, %xmm0 -; SSE2-NEXT: pand %xmm9, %xmm0 -; SSE2-NEXT: packuswb %xmm6, %xmm0 -; SSE2-NEXT: packuswb %xmm10, %xmm0 -; SSE2-NEXT: psubd %xmm8, %xmm1 -; SSE2-NEXT: pand %xmm9, %xmm4 -; SSE2-NEXT: pand %xmm9, %xmm3 +; SSE2-NEXT: por %xmm5, %xmm0 +; SSE2-NEXT: pcmpgtd %xmm7, %xmm0 +; SSE2-NEXT: packssdw %xmm0, %xmm0 +; SSE2-NEXT: packsswb %xmm6, %xmm0 +; SSE2-NEXT: psubd %xmm5, %xmm1 +; SSE2-NEXT: psubd %xmm9, %xmm2 +; SSE2-NEXT: psubd %xmm8, %xmm3 +; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] +; SSE2-NEXT: pand %xmm5, %xmm4 +; SSE2-NEXT: pand %xmm5, %xmm3 ; SSE2-NEXT: packuswb %xmm4, %xmm3 -; SSE2-NEXT: pand %xmm9, %xmm2 -; SSE2-NEXT: pand %xmm9, %xmm1 +; SSE2-NEXT: pand %xmm5, %xmm2 +; SSE2-NEXT: pand %xmm5, %xmm1 ; SSE2-NEXT: packuswb %xmm2, %xmm1 ; SSE2-NEXT: packuswb %xmm3, %xmm1 ; SSE2-NEXT: pandn %xmm1, %xmm0 @@ -657,47 +645,32 @@ define <16 x i8> @test14(<16 x i8> %x, <16 x i32> %y) nounwind { ; ; SSSE3-LABEL: test14: ; SSSE3: # %bb.0: # %vector.ph -; SSSE3-NEXT: pxor %xmm7, %xmm7 -; SSSE3-NEXT: movdqa %xmm0, %xmm11 -; SSSE3-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm7[8],xmm11[9],xmm7[9],xmm11[10],xmm7[10],xmm11[11],xmm7[11],xmm11[12],xmm7[12],xmm11[13],xmm7[13],xmm11[14],xmm7[14],xmm11[15],xmm7[15] -; SSSE3-NEXT: movdqa %xmm11, %xmm8 -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3] -; SSSE3-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm7[4],xmm11[5],xmm7[5],xmm11[6],xmm7[6],xmm11[7],xmm7[7] -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7] -; SSSE3-NEXT: movdqa %xmm0, %xmm10 -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm7[0],xmm10[1],xmm7[1],xmm10[2],xmm7[2],xmm10[3],xmm7[3] -; SSSE3-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7] -; SSSE3-NEXT: movdqa {{.*#+}} xmm7 = [2147483648,2147483648,2147483648,2147483648] -; SSSE3-NEXT: movdqa %xmm2, %xmm9 -; SSSE3-NEXT: pxor %xmm7, %xmm9 -; SSSE3-NEXT: psubd %xmm0, %xmm2 ; SSSE3-NEXT: movdqa %xmm0, %xmm5 -; SSSE3-NEXT: por %xmm7, %xmm5 -; SSSE3-NEXT: pcmpgtd %xmm9, %xmm5 -; SSSE3-NEXT: movdqa {{.*#+}} xmm9 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u> -; SSSE3-NEXT: pshufb %xmm9, %xmm5 -; SSSE3-NEXT: movdqa %xmm1, %xmm6 -; SSSE3-NEXT: pxor %xmm7, %xmm6 -; SSSE3-NEXT: psubd %xmm10, %xmm1 -; SSSE3-NEXT: movdqa %xmm10, %xmm0 -; SSSE3-NEXT: por %xmm7, %xmm0 -; SSSE3-NEXT: pcmpgtd %xmm6, %xmm0 -; SSSE3-NEXT: pshufb %xmm9, %xmm0 -; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1] -; SSSE3-NEXT: movdqa %xmm4, %xmm5 -; SSSE3-NEXT: pxor %xmm7, %xmm5 -; SSSE3-NEXT: psubd %xmm11, %xmm4 -; SSSE3-NEXT: por %xmm7, %xmm11 -; SSSE3-NEXT: pcmpgtd %xmm5, %xmm11 -; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = -; SSSE3-NEXT: pshufb %xmm5, %xmm11 -; SSSE3-NEXT: movdqa %xmm3, %xmm6 -; SSSE3-NEXT: pxor %xmm7, %xmm6 -; SSSE3-NEXT: por %xmm8, %xmm7 +; SSSE3-NEXT: pxor %xmm0, %xmm0 +; SSSE3-NEXT: movdqa %xmm5, %xmm7 +; SSSE3-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm0[8],xmm7[9],xmm0[9],xmm7[10],xmm0[10],xmm7[11],xmm0[11],xmm7[12],xmm0[12],xmm7[13],xmm0[13],xmm7[14],xmm0[14],xmm7[15],xmm0[15] +; SSSE3-NEXT: movdqa %xmm7, %xmm8 +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3] +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3],xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7] +; SSSE3-NEXT: movdqa %xmm5, %xmm9 +; SSSE3-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7] +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3] +; SSSE3-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7] +; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648] +; SSSE3-NEXT: movdqa %xmm4, %xmm6 +; SSSE3-NEXT: pxor %xmm0, %xmm6 +; SSSE3-NEXT: psubd %xmm7, %xmm4 +; SSSE3-NEXT: por %xmm0, %xmm7 ; SSSE3-NEXT: pcmpgtd %xmm6, %xmm7 -; SSSE3-NEXT: pshufb %xmm5, %xmm7 -; SSSE3-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1] -; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm7[2,3] +; SSSE3-NEXT: packssdw %xmm7, %xmm6 +; SSSE3-NEXT: movdqa %xmm1, %xmm7 +; SSSE3-NEXT: pxor %xmm0, %xmm7 +; SSSE3-NEXT: por %xmm5, %xmm0 +; SSSE3-NEXT: pcmpgtd %xmm7, %xmm0 +; SSSE3-NEXT: packssdw %xmm0, %xmm0 +; SSSE3-NEXT: packsswb %xmm6, %xmm0 +; SSSE3-NEXT: psubd %xmm5, %xmm1 +; SSSE3-NEXT: psubd %xmm9, %xmm2 ; SSSE3-NEXT: psubd %xmm8, %xmm3 ; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] ; SSSE3-NEXT: pand %xmm5, %xmm4 @@ -712,43 +685,31 @@ define <16 x i8> @test14(<16 x i8> %x, <16 x i32> %y) nounwind { ; ; SSE41-LABEL: test14: ; SSE41: # %bb.0: # %vector.ph -; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,2,3] -; SSE41-NEXT: pmovzxbd {{.*#+}} xmm11 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero -; SSE41-NEXT: pmovzxbd {{.*#+}} xmm8 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[2,3,0,1] +; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[2,3,0,1] +; SSE41-NEXT: pmovzxbd {{.*#+}} xmm8 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero +; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,2,3] ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm9 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero -; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3] -; SSE41-NEXT: pmovzxbd {{.*#+}} xmm10 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm0[3,1,2,3] +; SSE41-NEXT: pmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero +; SSE41-NEXT: pmovzxbd {{.*#+}} xmm5 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; SSE41-NEXT: movdqa %xmm1, %xmm6 +; SSE41-NEXT: pmaxud %xmm5, %xmm6 +; SSE41-NEXT: pcmpeqd %xmm1, %xmm6 +; SSE41-NEXT: pcmpeqd %xmm10, %xmm10 +; SSE41-NEXT: pxor %xmm10, %xmm6 +; SSE41-NEXT: packssdw %xmm0, %xmm6 +; SSE41-NEXT: packsswb %xmm0, %xmm6 ; SSE41-NEXT: movdqa %xmm4, %xmm0 -; SSE41-NEXT: pmaxud %xmm10, %xmm0 +; SSE41-NEXT: pmaxud %xmm7, %xmm0 ; SSE41-NEXT: pcmpeqd %xmm4, %xmm0 -; SSE41-NEXT: pcmpeqd %xmm6, %xmm6 -; SSE41-NEXT: pxor %xmm6, %xmm0 -; SSE41-NEXT: movdqa {{.*#+}} xmm7 = -; SSE41-NEXT: pshufb %xmm7, %xmm0 -; SSE41-NEXT: movdqa %xmm3, %xmm5 -; SSE41-NEXT: pmaxud %xmm9, %xmm5 -; SSE41-NEXT: pcmpeqd %xmm3, %xmm5 -; SSE41-NEXT: pxor %xmm6, %xmm5 -; SSE41-NEXT: pshufb %xmm7, %xmm5 -; SSE41-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1] -; SSE41-NEXT: movdqa %xmm1, %xmm0 -; SSE41-NEXT: pmaxud %xmm8, %xmm0 -; SSE41-NEXT: pcmpeqd %xmm1, %xmm0 -; SSE41-NEXT: pxor %xmm6, %xmm0 -; SSE41-NEXT: movdqa {{.*#+}} xmm12 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u> -; SSE41-NEXT: pshufb %xmm12, %xmm0 -; SSE41-NEXT: movdqa %xmm2, %xmm7 -; SSE41-NEXT: pmaxud %xmm11, %xmm7 -; SSE41-NEXT: pcmpeqd %xmm2, %xmm7 -; SSE41-NEXT: pxor %xmm6, %xmm7 -; SSE41-NEXT: pshufb %xmm12, %xmm7 -; SSE41-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1] -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm5[4,5,6,7] -; SSE41-NEXT: psubd %xmm11, %xmm2 -; SSE41-NEXT: psubd %xmm8, %xmm1 -; SSE41-NEXT: psubd %xmm9, %xmm3 -; SSE41-NEXT: psubd %xmm10, %xmm4 +; SSE41-NEXT: pxor %xmm10, %xmm0 +; SSE41-NEXT: packssdw %xmm0, %xmm0 +; SSE41-NEXT: packsswb %xmm0, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm6[0,1,2,3],xmm0[4,5,6,7] +; SSE41-NEXT: psubd %xmm9, %xmm2 +; SSE41-NEXT: psubd %xmm5, %xmm1 +; SSE41-NEXT: psubd %xmm8, %xmm3 +; SSE41-NEXT: psubd %xmm7, %xmm4 ; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] ; SSE41-NEXT: pand %xmm5, %xmm4 ; SSE41-NEXT: pand %xmm5, %xmm3 diff --git a/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll b/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll index ed2db51..bd23c35 100644 --- a/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll +++ b/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll @@ -2638,23 +2638,22 @@ define <4 x i8> @strict_vector_fptosi_v4f32_to_v4i8(<4 x float> %a) #0 { ; SSE-32-LABEL: strict_vector_fptosi_v4f32_to_v4i8: ; SSE-32: # %bb.0: ; SSE-32-NEXT: cvttps2dq %xmm0, %xmm0 -; SSE-32-NEXT: pand {{\.LCPI.*}}, %xmm0 -; SSE-32-NEXT: packuswb %xmm0, %xmm0 -; SSE-32-NEXT: packuswb %xmm0, %xmm0 +; SSE-32-NEXT: packssdw %xmm0, %xmm0 +; SSE-32-NEXT: packsswb %xmm0, %xmm0 ; SSE-32-NEXT: retl ; ; SSE-64-LABEL: strict_vector_fptosi_v4f32_to_v4i8: ; SSE-64: # %bb.0: ; SSE-64-NEXT: cvttps2dq %xmm0, %xmm0 -; SSE-64-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE-64-NEXT: packuswb %xmm0, %xmm0 -; SSE-64-NEXT: packuswb %xmm0, %xmm0 +; SSE-64-NEXT: packssdw %xmm0, %xmm0 +; SSE-64-NEXT: packsswb %xmm0, %xmm0 ; SSE-64-NEXT: retq ; ; AVX-LABEL: strict_vector_fptosi_v4f32_to_v4i8: ; AVX: # %bb.0: ; AVX-NEXT: vcvttps2dq %xmm0, %xmm0 -; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; AVX-NEXT: vpackssdw %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 ; AVX-NEXT: ret{{[l|q]}} ; ; AVX512F-LABEL: strict_vector_fptosi_v4f32_to_v4i8: @@ -2689,7 +2688,6 @@ define <4 x i8> @strict_vector_fptoui_v4f32_to_v4i8(<4 x float> %a) #0 { ; SSE-32-LABEL: strict_vector_fptoui_v4f32_to_v4i8: ; SSE-32: # %bb.0: ; SSE-32-NEXT: cvttps2dq %xmm0, %xmm0 -; SSE-32-NEXT: pand {{\.LCPI.*}}, %xmm0 ; SSE-32-NEXT: packuswb %xmm0, %xmm0 ; SSE-32-NEXT: packuswb %xmm0, %xmm0 ; SSE-32-NEXT: retl @@ -2697,7 +2695,6 @@ define <4 x i8> @strict_vector_fptoui_v4f32_to_v4i8(<4 x float> %a) #0 { ; SSE-64-LABEL: strict_vector_fptoui_v4f32_to_v4i8: ; SSE-64: # %bb.0: ; SSE-64-NEXT: cvttps2dq %xmm0, %xmm0 -; SSE-64-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE-64-NEXT: packuswb %xmm0, %xmm0 ; SSE-64-NEXT: packuswb %xmm0, %xmm0 ; SSE-64-NEXT: retq @@ -2705,7 +2702,8 @@ define <4 x i8> @strict_vector_fptoui_v4f32_to_v4i8(<4 x float> %a) #0 { ; AVX-LABEL: strict_vector_fptoui_v4f32_to_v4i8: ; AVX: # %bb.0: ; AVX-NEXT: vcvttps2dq %xmm0, %xmm0 -; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; AVX-NEXT: vpackusdw %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vpackuswb %xmm0, %xmm0, %xmm0 ; AVX-NEXT: ret{{[l|q]}} ; ; AVX512F-LABEL: strict_vector_fptoui_v4f32_to_v4i8: diff --git a/llvm/test/CodeGen/X86/vec-strict-fptoint-256.ll b/llvm/test/CodeGen/X86/vec-strict-fptoint-256.ll index 0f9fc8d..8b69a9e 100644 --- a/llvm/test/CodeGen/X86/vec-strict-fptoint-256.ll +++ b/llvm/test/CodeGen/X86/vec-strict-fptoint-256.ll @@ -1250,7 +1250,8 @@ define <4 x i8> @strict_vector_fptosi_v4f64_to_v4i8(<4 x double> %a) #0 { ; AVX-LABEL: strict_vector_fptosi_v4f64_to_v4i8: ; AVX: # %bb.0: ; AVX-NEXT: vcvttpd2dq %ymm0, %xmm0 -; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; AVX-NEXT: vpackssdw %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: ret{{[l|q]}} ; @@ -1290,7 +1291,8 @@ define <4 x i8> @strict_vector_fptoui_v4f64_to_v4i8(<4 x double> %a) #0 { ; AVX-LABEL: strict_vector_fptoui_v4f64_to_v4i8: ; AVX: # %bb.0: ; AVX-NEXT: vcvttpd2dq %ymm0, %xmm0 -; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; AVX-NEXT: vpackusdw %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vpackuswb %xmm0, %xmm0, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: ret{{[l|q]}} ; diff --git a/llvm/test/CodeGen/X86/vec_cast2.ll b/llvm/test/CodeGen/X86/vec_cast2.ll index e43216d..ed703f1 100644 --- a/llvm/test/CodeGen/X86/vec_cast2.ll +++ b/llvm/test/CodeGen/X86/vec_cast2.ll @@ -122,7 +122,8 @@ define <4 x i8> @cvt_v4f32_v4i8(<4 x float> %src) { ; CHECK-LABEL: cvt_v4f32_v4i8: ; CHECK: ## %bb.0: ; CHECK-NEXT: vcvttps2dq %xmm0, %xmm0 -; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; CHECK-NEXT: vpackssdw %xmm0, %xmm0, %xmm0 +; CHECK-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 ; CHECK-NEXT: retl %res = fptosi <4 x float> %src to <4 x i8> ret <4 x i8> %res @@ -167,7 +168,8 @@ define <4 x i8> @cvt_v4f32_v4u8(<4 x float> %src) { ; CHECK-LABEL: cvt_v4f32_v4u8: ; CHECK: ## %bb.0: ; CHECK-NEXT: vcvttps2dq %xmm0, %xmm0 -; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; CHECK-NEXT: vpackusdw %xmm0, %xmm0, %xmm0 +; CHECK-NEXT: vpackuswb %xmm0, %xmm0, %xmm0 ; CHECK-NEXT: retl %res = fptoui <4 x float> %src to <4 x i8> ret <4 x i8> %res diff --git a/llvm/test/CodeGen/X86/vector-trunc-packus.ll b/llvm/test/CodeGen/X86/vector-trunc-packus.ll index c74b879..9d044d5 100644 --- a/llvm/test/CodeGen/X86/vector-trunc-packus.ll +++ b/llvm/test/CodeGen/X86/vector-trunc-packus.ll @@ -5212,7 +5212,8 @@ define <4 x i8> @trunc_packus_v4i32_v4i8(<4 x i32> %a0) "min-legal-vector-width" ; SSE41-NEXT: pminsd {{.*}}(%rip), %xmm0 ; SSE41-NEXT: pxor %xmm1, %xmm1 ; SSE41-NEXT: pmaxsd %xmm1, %xmm0 -; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; SSE41-NEXT: packusdw %xmm0, %xmm0 +; SSE41-NEXT: packuswb %xmm0, %xmm0 ; SSE41-NEXT: retq ; ; AVX1-LABEL: trunc_packus_v4i32_v4i8: @@ -5220,7 +5221,8 @@ define <4 x i8> @trunc_packus_v4i32_v4i8(<4 x i32> %a0) "min-legal-vector-width" ; AVX1-NEXT: vpminsd {{.*}}(%rip), %xmm0, %xmm0 ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; AVX1-NEXT: vpackusdw %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: trunc_packus_v4i32_v4i8: @@ -5229,7 +5231,8 @@ define <4 x i8> @trunc_packus_v4i32_v4i8(<4 x i32> %a0) "min-legal-vector-width" ; AVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; AVX2-NEXT: vpackusdw %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: vpackuswb %xmm0, %xmm0, %xmm0 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: trunc_packus_v4i32_v4i8: @@ -5316,7 +5319,8 @@ define void @trunc_packus_v4i32_v4i8_store(<4 x i32> %a0, <4 x i8> *%p1) { ; SSE41-NEXT: pminsd {{.*}}(%rip), %xmm0 ; SSE41-NEXT: pxor %xmm1, %xmm1 ; SSE41-NEXT: pmaxsd %xmm0, %xmm1 -; SSE41-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; SSE41-NEXT: packusdw %xmm0, %xmm1 +; SSE41-NEXT: packuswb %xmm0, %xmm1 ; SSE41-NEXT: movd %xmm1, (%rdi) ; SSE41-NEXT: retq ; @@ -5325,7 +5329,8 @@ define void @trunc_packus_v4i32_v4i8_store(<4 x i32> %a0, <4 x i8> *%p1) { ; AVX1-NEXT: vpminsd {{.*}}(%rip), %xmm0, %xmm0 ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; AVX1-NEXT: vpackusdw %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0 ; AVX1-NEXT: vmovd %xmm0, (%rdi) ; AVX1-NEXT: retq ; @@ -5335,7 +5340,8 @@ define void @trunc_packus_v4i32_v4i8_store(<4 x i32> %a0, <4 x i8> *%p1) { ; AVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; AVX2-NEXT: vpackusdw %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: vpackuswb %xmm0, %xmm0, %xmm0 ; AVX2-NEXT: vmovd %xmm0, (%rdi) ; AVX2-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/vector-trunc-ssat.ll b/llvm/test/CodeGen/X86/vector-trunc-ssat.ll index 665e3fe..bed4553 100644 --- a/llvm/test/CodeGen/X86/vector-trunc-ssat.ll +++ b/llvm/test/CodeGen/X86/vector-trunc-ssat.ll @@ -5080,14 +5080,16 @@ define <4 x i8> @trunc_ssat_v4i32_v4i8(<4 x i32> %a0) { ; SSE41: # %bb.0: ; SSE41-NEXT: pminsd {{.*}}(%rip), %xmm0 ; SSE41-NEXT: pmaxsd {{.*}}(%rip), %xmm0 -; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; SSE41-NEXT: packssdw %xmm0, %xmm0 +; SSE41-NEXT: packsswb %xmm0, %xmm0 ; SSE41-NEXT: retq ; ; AVX1-LABEL: trunc_ssat_v4i32_v4i8: ; AVX1: # %bb.0: ; AVX1-NEXT: vpminsd {{.*}}(%rip), %xmm0, %xmm0 ; AVX1-NEXT: vpmaxsd {{.*}}(%rip), %xmm0, %xmm0 -; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; AVX1-NEXT: vpackssdw %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: trunc_ssat_v4i32_v4i8: @@ -5096,7 +5098,8 @@ define <4 x i8> @trunc_ssat_v4i32_v4i8(<4 x i32> %a0) { ; AVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294967168,4294967168,4294967168,4294967168] ; AVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; AVX2-NEXT: vpackssdw %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: trunc_ssat_v4i32_v4i8: @@ -5178,7 +5181,8 @@ define void @trunc_ssat_v4i32_v4i8_store(<4 x i32> %a0, <4 x i8> *%p1) { ; SSE41: # %bb.0: ; SSE41-NEXT: pminsd {{.*}}(%rip), %xmm0 ; SSE41-NEXT: pmaxsd {{.*}}(%rip), %xmm0 -; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; SSE41-NEXT: packssdw %xmm0, %xmm0 +; SSE41-NEXT: packsswb %xmm0, %xmm0 ; SSE41-NEXT: movd %xmm0, (%rdi) ; SSE41-NEXT: retq ; @@ -5186,7 +5190,8 @@ define void @trunc_ssat_v4i32_v4i8_store(<4 x i32> %a0, <4 x i8> *%p1) { ; AVX1: # %bb.0: ; AVX1-NEXT: vpminsd {{.*}}(%rip), %xmm0, %xmm0 ; AVX1-NEXT: vpmaxsd {{.*}}(%rip), %xmm0, %xmm0 -; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; AVX1-NEXT: vpackssdw %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 ; AVX1-NEXT: vmovd %xmm0, (%rdi) ; AVX1-NEXT: retq ; @@ -5196,7 +5201,8 @@ define void @trunc_ssat_v4i32_v4i8_store(<4 x i32> %a0, <4 x i8> *%p1) { ; AVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294967168,4294967168,4294967168,4294967168] ; AVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; AVX2-NEXT: vpackssdw %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: vpacksswb %xmm0, %xmm0, %xmm0 ; AVX2-NEXT: vmovd %xmm0, (%rdi) ; AVX2-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/vector-trunc-usat.ll b/llvm/test/CodeGen/X86/vector-trunc-usat.ll index 8434bac..586514e 100644 --- a/llvm/test/CodeGen/X86/vector-trunc-usat.ll +++ b/llvm/test/CodeGen/X86/vector-trunc-usat.ll @@ -3694,20 +3694,23 @@ define <4 x i8> @trunc_usat_v4i32_v4i8(<4 x i32> %a0) { ; SSE41-LABEL: trunc_usat_v4i32_v4i8: ; SSE41: # %bb.0: ; SSE41-NEXT: pminud {{.*}}(%rip), %xmm0 -; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; SSE41-NEXT: packusdw %xmm0, %xmm0 +; SSE41-NEXT: packuswb %xmm0, %xmm0 ; SSE41-NEXT: retq ; ; AVX1-LABEL: trunc_usat_v4i32_v4i8: ; AVX1: # %bb.0: ; AVX1-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm0 -; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; AVX1-NEXT: vpackusdw %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: trunc_usat_v4i32_v4i8: ; AVX2: # %bb.0: ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [255,255,255,255] ; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; AVX2-NEXT: vpackusdw %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: vpackuswb %xmm0, %xmm0, %xmm0 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: trunc_usat_v4i32_v4i8: @@ -3776,14 +3779,16 @@ define void @trunc_usat_v4i32_v4i8_store(<4 x i32> %a0, <4 x i8> *%p1) { ; SSE41-LABEL: trunc_usat_v4i32_v4i8_store: ; SSE41: # %bb.0: ; SSE41-NEXT: pminud {{.*}}(%rip), %xmm0 -; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; SSE41-NEXT: packusdw %xmm0, %xmm0 +; SSE41-NEXT: packuswb %xmm0, %xmm0 ; SSE41-NEXT: movd %xmm0, (%rdi) ; SSE41-NEXT: retq ; ; AVX1-LABEL: trunc_usat_v4i32_v4i8_store: ; AVX1: # %bb.0: ; AVX1-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm0 -; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; AVX1-NEXT: vpackusdw %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0 ; AVX1-NEXT: vmovd %xmm0, (%rdi) ; AVX1-NEXT: retq ; @@ -3791,7 +3796,8 @@ define void @trunc_usat_v4i32_v4i8_store(<4 x i32> %a0, <4 x i8> *%p1) { ; AVX2: # %bb.0: ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [255,255,255,255] ; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] +; AVX2-NEXT: vpackusdw %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: vpackuswb %xmm0, %xmm0, %xmm0 ; AVX2-NEXT: vmovd %xmm0, (%rdi) ; AVX2-NEXT: retq ; @@ -3880,37 +3886,25 @@ define <8 x i8> @trunc_usat_v8i32_v8i8(<8 x i32> %a0) { ; ; SSE41-LABEL: trunc_usat_v8i32_v8i8: ; SSE41: # %bb.0: -; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255] -; SSE41-NEXT: pminud %xmm2, %xmm0 -; SSE41-NEXT: pminud %xmm2, %xmm1 -; SSE41-NEXT: movdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u> -; SSE41-NEXT: pshufb %xmm2, %xmm1 -; SSE41-NEXT: pshufb %xmm2, %xmm0 -; SSE41-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE41-NEXT: pminud {{.*}}(%rip), %xmm0 +; SSE41-NEXT: packusdw %xmm0, %xmm0 +; SSE41-NEXT: packuswb %xmm0, %xmm0 ; SSE41-NEXT: retq ; ; AVX1-LABEL: trunc_usat_v8i32_v8i8: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255] -; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm2 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u> -; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpshufb %xmm1, %xmm2, %xmm1 -; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; AVX1-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpackusdw %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: trunc_usat_v8i32_v8i8: ; AVX2: # %bb.0: -; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255] -; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u> -; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1 -; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0 -; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [255,255,255,255] +; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpackusdw %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: vpackuswb %xmm0, %xmm0, %xmm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -4000,39 +3994,27 @@ define void @trunc_usat_v8i32_v8i8_store(<8 x i32> %a0, <8 x i8> *%p1) { ; ; SSE41-LABEL: trunc_usat_v8i32_v8i8_store: ; SSE41: # %bb.0: -; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255] -; SSE41-NEXT: pminud %xmm2, %xmm0 -; SSE41-NEXT: pminud %xmm2, %xmm1 -; SSE41-NEXT: movdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u> -; SSE41-NEXT: pshufb %xmm2, %xmm1 -; SSE41-NEXT: pshufb %xmm2, %xmm0 -; SSE41-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE41-NEXT: pminud {{.*}}(%rip), %xmm0 +; SSE41-NEXT: packusdw %xmm0, %xmm0 +; SSE41-NEXT: packuswb %xmm0, %xmm0 ; SSE41-NEXT: movq %xmm0, (%rdi) ; SSE41-NEXT: retq ; ; AVX1-LABEL: trunc_usat_v8i32_v8i8_store: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255] -; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm2 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u> -; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpshufb %xmm1, %xmm2, %xmm1 -; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; AVX1-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpackusdw %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0 ; AVX1-NEXT: vmovq %xmm0, (%rdi) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: trunc_usat_v8i32_v8i8_store: ; AVX2: # %bb.0: -; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255] -; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u> -; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1 -; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0 -; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [255,255,255,255] +; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpackusdw %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: vpackuswb %xmm0, %xmm0, %xmm0 ; AVX2-NEXT: vmovq %xmm0, (%rdi) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq