Extend lowerShuffleWithPACK/matchShuffleWithPACK/createPackShuffleMask to handle compaction style shuffle masks that can be lowered to chains of PACKSS/PACKUS if their inputs are suitably sign/zero extended.
This helps avoid PSHUFB (and its mask load) for short shuffle chains, shuffle combining will still replace with a PSHUFB if we have enough shuffles as getFauxShuffleMask should recognise the PACKSS/PACKUS chains.
}
/// Create a shuffle mask that matches the PACKSS/PACKUS truncation.
+/// A multi-stage pack shuffle mask is created by specifying NumStages > 1.
/// Note: This ignores saturation, so inputs must be checked first.
static void createPackShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
- bool Unary) {
+ bool Unary, unsigned NumStages = 1) {
assert(Mask.empty() && "Expected an empty shuffle mask vector");
unsigned NumElts = VT.getVectorNumElements();
unsigned NumLanes = VT.getSizeInBits() / 128;
unsigned NumEltsPerLane = 128 / VT.getScalarSizeInBits();
unsigned Offset = Unary ? 0 : NumElts;
+ unsigned Repetitions = 1u << (NumStages - 1);
+ unsigned Increment = 1u << NumStages;
+ assert((NumEltsPerLane >> NumStages) > 0 && "Illegal packing compaction");
for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
- for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += 2)
- Mask.push_back(Elt + (Lane * NumEltsPerLane));
- for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += 2)
- Mask.push_back(Elt + (Lane * NumEltsPerLane) + Offset);
+ for (unsigned Stage = 0; Stage != Repetitions; ++Stage) {
+ for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += Increment)
+ Mask.push_back(Elt + (Lane * NumEltsPerLane));
+ for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += Increment)
+ Mask.push_back(Elt + (Lane * NumEltsPerLane) + Offset);
+ }
}
}
// X86 has dedicated pack instructions that can handle specific truncation
// operations: PACKSS and PACKUS.
+// Checks for compaction shuffle masks if MaxStages > 1.
// TODO: Add support for matching multiple PACKSS/PACKUS stages.
static bool matchShuffleWithPACK(MVT VT, MVT &SrcVT, SDValue &V1, SDValue &V2,
unsigned &PackOpcode, ArrayRef<int> TargetMask,
SelectionDAG &DAG,
- const X86Subtarget &Subtarget) {
+ const X86Subtarget &Subtarget,
+ unsigned MaxStages = 1) {
unsigned NumElts = VT.getVectorNumElements();
unsigned BitSize = VT.getScalarSizeInBits();
+ assert(0 < MaxStages && MaxStages <= 3 && (BitSize << MaxStages) <= 64 &&
+ "Illegal maximum compaction");
auto MatchPACK = [&](SDValue N1, SDValue N2, MVT PackVT) {
unsigned NumSrcBits = PackVT.getScalarSizeInBits();
return false;
};
- MVT PackSVT = MVT::getIntegerVT(BitSize * 2);
- MVT PackVT = MVT::getVectorVT(PackSVT, NumElts / 2);
+ // Attempt to match against wider and wider compaction patterns.
+ for (unsigned NumStages = 1; NumStages <= MaxStages; ++NumStages) {
+ MVT PackSVT = MVT::getIntegerVT(BitSize << NumStages);
+ MVT PackVT = MVT::getVectorVT(PackSVT, NumElts >> NumStages);
- // Try binary shuffle.
- SmallVector<int, 32> BinaryMask;
- createPackShuffleMask(VT, BinaryMask, false);
- if (isTargetShuffleEquivalent(TargetMask, BinaryMask, V1, V2))
- if (MatchPACK(V1, V2, PackVT))
- return true;
+ // Try binary shuffle.
+ SmallVector<int, 32> BinaryMask;
+ createPackShuffleMask(VT, BinaryMask, false, NumStages);
+ if (isTargetShuffleEquivalent(TargetMask, BinaryMask, V1, V2))
+ if (MatchPACK(V1, V2, PackVT))
+ return true;
- // Try unary shuffle.
- SmallVector<int, 32> UnaryMask;
- createPackShuffleMask(VT, UnaryMask, true);
- if (isTargetShuffleEquivalent(TargetMask, UnaryMask, V1))
- if (MatchPACK(V1, V1, PackVT))
- return true;
+ // Try unary shuffle.
+ SmallVector<int, 32> UnaryMask;
+ createPackShuffleMask(VT, UnaryMask, true, NumStages);
+ if (isTargetShuffleEquivalent(TargetMask, UnaryMask, V1))
+ if (MatchPACK(V1, V1, PackVT))
+ return true;
+ }
return false;
}
const X86Subtarget &Subtarget) {
MVT PackVT;
unsigned PackOpcode;
- if (matchShuffleWithPACK(VT, PackVT, V1, V2, PackOpcode, Mask, DAG,
- Subtarget))
- return DAG.getNode(PackOpcode, DL, VT, DAG.getBitcast(PackVT, V1),
- DAG.getBitcast(PackVT, V2));
+ unsigned SizeBits = VT.getSizeInBits();
+ unsigned EltBits = VT.getScalarSizeInBits();
+ unsigned MaxStages = Log2_32(64 / EltBits);
+ if (!matchShuffleWithPACK(VT, PackVT, V1, V2, PackOpcode, Mask, DAG,
+ Subtarget, MaxStages))
+ return SDValue();
- return SDValue();
+ unsigned CurrentEltBits = PackVT.getScalarSizeInBits();
+ unsigned NumStages = Log2_32(CurrentEltBits / EltBits);
+
+ // Don't lower multi-stage packs on AVX512, truncation is better.
+ if (NumStages != 1 && SizeBits == 128 && Subtarget.hasVLX())
+ return SDValue();
+
+ // Pack to the largest type possible:
+ // vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB.
+ unsigned MaxPackBits = 16;
+ if (CurrentEltBits > 16 &&
+ (PackOpcode == X86ISD::PACKSS || Subtarget.hasSSE41()))
+ MaxPackBits = 32;
+
+ // Repeatedly pack down to the target size.
+ SDValue Res;
+ for (unsigned i = 0; i != NumStages; ++i) {
+ unsigned SrcEltBits = std::min(MaxPackBits, CurrentEltBits);
+ unsigned NumSrcElts = SizeBits / SrcEltBits;
+ MVT SrcSVT = MVT::getIntegerVT(SrcEltBits);
+ MVT DstSVT = MVT::getIntegerVT(SrcEltBits / 2);
+ MVT SrcVT = MVT::getVectorVT(SrcSVT, NumSrcElts);
+ MVT DstVT = MVT::getVectorVT(DstSVT, NumSrcElts * 2);
+ Res = DAG.getNode(PackOpcode, DL, DstVT, DAG.getBitcast(SrcVT, V1),
+ DAG.getBitcast(SrcVT, V2));
+ V1 = V2 = Res;
+ CurrentEltBits /= 2;
+ }
+ assert(Res && Res.getValueType() == VT &&
+ "Failed to lower compaction shuffle");
+ return Res;
}
/// Try to emit a bitmask instruction for a shuffle.
; CHECK-LABEL: test1:
; CHECK: ## %bb.0:
; CHECK-NEXT: vcvttpd2dq %ymm0, %xmm0
-; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; CHECK-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retl
%c = fptoui <4 x double> %d to <4 x i8>
; CHECK-LABEL: test2:
; CHECK: ## %bb.0:
; CHECK-NEXT: vcvttpd2dq %ymm0, %xmm0
-; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; CHECK-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retl
%c = fptosi <4 x double> %d to <4 x i8>
; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10
; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11
; SSE-NEXT: pcmpgtq %xmm7, %xmm3
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,0,2,4,5,6,7]
; SSE-NEXT: pcmpgtq %xmm6, %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,0,2,4,5,6,7]
-; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE-NEXT: packssdw %xmm3, %xmm2
+; SSE-NEXT: packssdw %xmm2, %xmm2
; SSE-NEXT: pcmpgtq %xmm5, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
; SSE-NEXT: pcmpgtq %xmm4, %xmm0
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
-; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: packssdw %xmm0, %xmm0
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
; SSE-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm11
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,0,2,4,5,6,7]
; SSE-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm10
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm10[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,0,2,4,5,6,7]
-; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE-NEXT: packssdw %xmm11, %xmm10
+; SSE-NEXT: packssdw %xmm10, %xmm1
; SSE-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm9
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm9[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
; SSE-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm8
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm8[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,2,2,3,4,5,6,7]
-; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
-; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7]
-; SSE-NEXT: pand %xmm0, %xmm3
-; SSE-NEXT: packsswb %xmm0, %xmm3
-; SSE-NEXT: pmovmskb %xmm3, %eax
+; SSE-NEXT: packssdw %xmm9, %xmm8
+; SSE-NEXT: packssdw %xmm0, %xmm8
+; SSE-NEXT: pblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm1[4,5,6,7]
+; SSE-NEXT: pand %xmm0, %xmm8
+; SSE-NEXT: packsswb %xmm0, %xmm8
+; SSE-NEXT: pmovmskb %xmm8, %eax
; SSE-NEXT: # kill: def $al killed $al killed $eax
; SSE-NEXT: retq
;
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm10
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm11
; SSE-NEXT: cmpltpd %xmm3, %xmm7
-; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm7[0,1,0,2,4,5,6,7]
; SSE-NEXT: cmpltpd %xmm2, %xmm6
-; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm6[0,1,0,2,4,5,6,7]
-; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE-NEXT: packssdw %xmm7, %xmm6
+; SSE-NEXT: packssdw %xmm6, %xmm2
; SSE-NEXT: cmpltpd %xmm1, %xmm5
-; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm5[0,2,2,3,4,5,6,7]
; SSE-NEXT: cmpltpd %xmm0, %xmm4
-; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm4[0,2,2,3,4,5,6,7]
-; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; SSE-NEXT: packssdw %xmm5, %xmm4
+; SSE-NEXT: packssdw %xmm0, %xmm4
+; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm2[4,5,6,7]
; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm11
-; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm11[0,1,0,2,4,5,6,7]
; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm10
-; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm10[0,1,0,2,4,5,6,7]
-; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE-NEXT: packssdw %xmm11, %xmm10
+; SSE-NEXT: packssdw %xmm10, %xmm0
; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm9
-; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm9[0,2,2,3,4,5,6,7]
; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm8
-; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,2,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm8[0,2,2,3,4,5,6,7]
-; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
-; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7]
-; SSE-NEXT: pand %xmm0, %xmm3
-; SSE-NEXT: packsswb %xmm0, %xmm3
-; SSE-NEXT: pmovmskb %xmm3, %eax
+; SSE-NEXT: packssdw %xmm9, %xmm8
+; SSE-NEXT: packssdw %xmm0, %xmm8
+; SSE-NEXT: pblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm0[4,5,6,7]
+; SSE-NEXT: pand %xmm4, %xmm8
+; SSE-NEXT: packsswb %xmm0, %xmm8
+; SSE-NEXT: pmovmskb %xmm8, %eax
; SSE-NEXT: # kill: def $al killed $al killed $eax
; SSE-NEXT: retq
;
; SSE-LABEL: v16i32:
; SSE: # %bb.0:
; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
-; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10
; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
+; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10
; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11
; SSE-NEXT: pcmpgtd %xmm7, %xmm3
-; SSE-NEXT: movdqa {{.*#+}} xmm7 = <u,u,u,u,0,4,8,12,u,u,u,u,u,u,u,u>
-; SSE-NEXT: pshufb %xmm7, %xmm3
; SSE-NEXT: pcmpgtd %xmm6, %xmm2
-; SSE-NEXT: pshufb %xmm7, %xmm2
-; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE-NEXT: packssdw %xmm3, %xmm2
; SSE-NEXT: pcmpgtd %xmm5, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
-; SSE-NEXT: pshufb %xmm3, %xmm1
; SSE-NEXT: pcmpgtd %xmm4, %xmm0
-; SSE-NEXT: pshufb %xmm3, %xmm0
-; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: packsswb %xmm2, %xmm0
; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm11
-; SSE-NEXT: pshufb %xmm7, %xmm11
-; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm9
-; SSE-NEXT: pshufb %xmm7, %xmm9
-; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm11[0],xmm9[1],xmm11[1]
; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm10
-; SSE-NEXT: pshufb %xmm3, %xmm10
+; SSE-NEXT: packssdw %xmm11, %xmm10
+; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm9
; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm8
-; SSE-NEXT: pshufb %xmm3, %xmm8
-; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm10[0],xmm8[1],xmm10[1]
-; SSE-NEXT: pblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm9[4,5,6,7]
+; SSE-NEXT: packssdw %xmm9, %xmm8
+; SSE-NEXT: packsswb %xmm10, %xmm8
; SSE-NEXT: pand %xmm0, %xmm8
; SSE-NEXT: pmovmskb %xmm8, %eax
; SSE-NEXT: # kill: def $ax killed $ax killed $eax
; SSE-LABEL: v16f32:
; SSE: # %bb.0:
; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm8
-; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm10
; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm9
+; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm10
; SSE-NEXT: movaps {{[0-9]+}}(%rsp), %xmm11
; SSE-NEXT: cmpltps %xmm3, %xmm7
-; SSE-NEXT: movdqa {{.*#+}} xmm3 = <u,u,u,u,0,4,8,12,u,u,u,u,u,u,u,u>
-; SSE-NEXT: pshufb %xmm3, %xmm7
; SSE-NEXT: cmpltps %xmm2, %xmm6
-; SSE-NEXT: pshufb %xmm3, %xmm6
-; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
+; SSE-NEXT: packssdw %xmm7, %xmm6
; SSE-NEXT: cmpltps %xmm1, %xmm5
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
-; SSE-NEXT: pshufb %xmm1, %xmm5
; SSE-NEXT: cmpltps %xmm0, %xmm4
-; SSE-NEXT: pshufb %xmm1, %xmm4
-; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
-; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm6[4,5,6,7]
+; SSE-NEXT: packssdw %xmm5, %xmm4
+; SSE-NEXT: packsswb %xmm6, %xmm4
; SSE-NEXT: cmpltps {{[0-9]+}}(%rsp), %xmm11
-; SSE-NEXT: pshufb %xmm3, %xmm11
-; SSE-NEXT: cmpltps {{[0-9]+}}(%rsp), %xmm9
-; SSE-NEXT: pshufb %xmm3, %xmm9
-; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm11[0],xmm9[1],xmm11[1]
; SSE-NEXT: cmpltps {{[0-9]+}}(%rsp), %xmm10
-; SSE-NEXT: pshufb %xmm1, %xmm10
+; SSE-NEXT: packssdw %xmm11, %xmm10
+; SSE-NEXT: cmpltps {{[0-9]+}}(%rsp), %xmm9
; SSE-NEXT: cmpltps {{[0-9]+}}(%rsp), %xmm8
-; SSE-NEXT: pshufb %xmm1, %xmm8
-; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm10[0],xmm8[1],xmm10[1]
-; SSE-NEXT: pblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm9[4,5,6,7]
+; SSE-NEXT: packssdw %xmm9, %xmm8
+; SSE-NEXT: packsswb %xmm10, %xmm8
; SSE-NEXT: pand %xmm4, %xmm8
; SSE-NEXT: pmovmskb %xmm8, %eax
; SSE-NEXT: # kill: def $ax killed $ax killed $eax
; SSE4-NEXT: pxor %xmm2, %xmm2
; SSE4-NEXT: pminsd {{.*}}(%rip), %xmm0
; SSE4-NEXT: pmaxsd {{.*}}(%rip), %xmm0
-; SSE4-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; SSE4-NEXT: packssdw %xmm0, %xmm0
+; SSE4-NEXT: packsswb %xmm0, %xmm0
; SSE4-NEXT: pcmpeqd %xmm1, %xmm2
; SSE4-NEXT: movmskps %xmm2, %eax
; SSE4-NEXT: xorl $15, %eax
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpminsd {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vpmaxsd {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX1-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX1-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vmovmskps %xmm1, %eax
; AVX1-NEXT: xorl $15, %eax
; AVX2-NEXT: vpminsd %xmm3, %xmm0, %xmm0
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [4294967168,4294967168,4294967168,4294967168]
; AVX2-NEXT: vpmaxsd %xmm3, %xmm0, %xmm0
-; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
+; AVX2-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX2-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vmovmskps %xmm1, %eax
; AVX2-NEXT: xorl $15, %eax
; SSE4: # %bb.0:
; SSE4-NEXT: pxor %xmm4, %xmm4
; SSE4-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255]
-; SSE4-NEXT: pminud %xmm5, %xmm0
; SSE4-NEXT: pminud %xmm5, %xmm1
-; SSE4-NEXT: movdqa {{.*#+}} xmm5 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
-; SSE4-NEXT: pshufb %xmm5, %xmm1
-; SSE4-NEXT: pshufb %xmm5, %xmm0
-; SSE4-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE4-NEXT: pminud %xmm5, %xmm0
+; SSE4-NEXT: packusdw %xmm1, %xmm0
+; SSE4-NEXT: packuswb %xmm0, %xmm0
; SSE4-NEXT: pcmpeqd %xmm4, %xmm3
; SSE4-NEXT: pcmpeqd %xmm1, %xmm1
; SSE4-NEXT: pxor %xmm1, %xmm3
;
; AVX1-LABEL: truncstore_v8i32_v8i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [255,255,255,255]
-; AVX1-NEXT: vpminud %xmm2, %xmm0, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpminud %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpshufb %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,255,255,255]
+; AVX1-NEXT: vpminud %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpminud %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255]
; AVX2-NEXT: vpminud %ymm3, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX2-NEXT: vpshufb %xmm4, %xmm3, %xmm3
-; AVX2-NEXT: vpshufb %xmm4, %xmm0, %xmm0
-; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; AVX2-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
; AVX2-NEXT: vpcmpeqd %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vmovmskps %ymm1, %eax
; AVX2-NEXT: notl %eax
; SSE4: # %bb.0:
; SSE4-NEXT: pxor %xmm2, %xmm2
; SSE4-NEXT: pminud {{.*}}(%rip), %xmm0
-; SSE4-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; SSE4-NEXT: packusdw %xmm0, %xmm0
+; SSE4-NEXT: packuswb %xmm0, %xmm0
; SSE4-NEXT: pcmpeqd %xmm1, %xmm2
; SSE4-NEXT: movmskps %xmm2, %eax
; SSE4-NEXT: xorl $15, %eax
; AVX1: # %bb.0:
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX1-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
; AVX1-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vmovmskps %xmm1, %eax
; AVX1-NEXT: xorl $15, %eax
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [255,255,255,255]
; AVX2-NEXT: vpminud %xmm3, %xmm0, %xmm0
-; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
+; AVX2-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
; AVX2-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vmovmskps %xmm1, %eax
; AVX2-NEXT: xorl $15, %eax
; SSE2-NEXT: psubd %xmm5, %xmm4
; SSE2-NEXT: por %xmm0, %xmm5
; SSE2-NEXT: pcmpgtd %xmm9, %xmm5
-; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [255,0,255,0,255,0,255,0]
-; SSE2-NEXT: pand %xmm9, %xmm5
; SSE2-NEXT: movdqa %xmm3, %xmm7
; SSE2-NEXT: pxor %xmm0, %xmm7
; SSE2-NEXT: psubd %xmm10, %xmm3
; SSE2-NEXT: por %xmm0, %xmm10
; SSE2-NEXT: pcmpgtd %xmm7, %xmm10
-; SSE2-NEXT: pand %xmm9, %xmm10
-; SSE2-NEXT: packuswb %xmm5, %xmm10
+; SSE2-NEXT: packssdw %xmm5, %xmm10
; SSE2-NEXT: movdqa %xmm2, %xmm5
; SSE2-NEXT: pxor %xmm0, %xmm5
; SSE2-NEXT: psubd %xmm6, %xmm2
; SSE2-NEXT: por %xmm0, %xmm6
; SSE2-NEXT: pcmpgtd %xmm5, %xmm6
-; SSE2-NEXT: pand %xmm9, %xmm6
; SSE2-NEXT: movdqa %xmm1, %xmm5
; SSE2-NEXT: pxor %xmm0, %xmm5
; SSE2-NEXT: por %xmm8, %xmm0
; SSE2-NEXT: pcmpgtd %xmm5, %xmm0
-; SSE2-NEXT: pand %xmm9, %xmm0
-; SSE2-NEXT: packuswb %xmm6, %xmm0
-; SSE2-NEXT: packuswb %xmm10, %xmm0
+; SSE2-NEXT: packssdw %xmm6, %xmm0
+; SSE2-NEXT: packsswb %xmm10, %xmm0
; SSE2-NEXT: psubd %xmm8, %xmm1
-; SSE2-NEXT: pand %xmm9, %xmm4
-; SSE2-NEXT: pand %xmm9, %xmm3
+; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE2-NEXT: pand %xmm5, %xmm4
+; SSE2-NEXT: pand %xmm5, %xmm3
; SSE2-NEXT: packuswb %xmm4, %xmm3
-; SSE2-NEXT: pand %xmm9, %xmm2
-; SSE2-NEXT: pand %xmm9, %xmm1
+; SSE2-NEXT: pand %xmm5, %xmm2
+; SSE2-NEXT: pand %xmm5, %xmm1
; SSE2-NEXT: packuswb %xmm2, %xmm1
; SSE2-NEXT: packuswb %xmm3, %xmm1
; SSE2-NEXT: pandn %xmm1, %xmm0
;
; SSSE3-LABEL: test14:
; SSSE3: # %bb.0: # %vector.ph
-; SSSE3-NEXT: pxor %xmm7, %xmm7
-; SSSE3-NEXT: movdqa %xmm0, %xmm11
-; SSSE3-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm7[8],xmm11[9],xmm7[9],xmm11[10],xmm7[10],xmm11[11],xmm7[11],xmm11[12],xmm7[12],xmm11[13],xmm7[13],xmm11[14],xmm7[14],xmm11[15],xmm7[15]
-; SSSE3-NEXT: movdqa %xmm11, %xmm8
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3]
-; SSSE3-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm7[4],xmm11[5],xmm7[5],xmm11[6],xmm7[6],xmm11[7],xmm7[7]
-; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
-; SSSE3-NEXT: movdqa %xmm0, %xmm10
-; SSSE3-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm7[0],xmm10[1],xmm7[1],xmm10[2],xmm7[2],xmm10[3],xmm7[3]
-; SSSE3-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
-; SSSE3-NEXT: movdqa {{.*#+}} xmm7 = [2147483648,2147483648,2147483648,2147483648]
-; SSSE3-NEXT: movdqa %xmm2, %xmm9
-; SSSE3-NEXT: pxor %xmm7, %xmm9
-; SSSE3-NEXT: psubd %xmm0, %xmm2
; SSSE3-NEXT: movdqa %xmm0, %xmm5
-; SSSE3-NEXT: por %xmm7, %xmm5
+; SSSE3-NEXT: pxor %xmm0, %xmm0
+; SSSE3-NEXT: movdqa %xmm5, %xmm6
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3],xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
+; SSSE3-NEXT: movdqa %xmm6, %xmm8
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
+; SSSE3-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
+; SSSE3-NEXT: movdqa %xmm5, %xmm10
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3]
+; SSSE3-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [2147483648,2147483648,2147483648,2147483648]
+; SSSE3-NEXT: movdqa %xmm4, %xmm9
+; SSSE3-NEXT: pxor %xmm0, %xmm9
+; SSSE3-NEXT: psubd %xmm5, %xmm4
+; SSSE3-NEXT: por %xmm0, %xmm5
; SSSE3-NEXT: pcmpgtd %xmm9, %xmm5
-; SSSE3-NEXT: movdqa {{.*#+}} xmm9 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
-; SSSE3-NEXT: pshufb %xmm9, %xmm5
-; SSSE3-NEXT: movdqa %xmm1, %xmm6
-; SSSE3-NEXT: pxor %xmm7, %xmm6
-; SSSE3-NEXT: psubd %xmm10, %xmm1
-; SSSE3-NEXT: movdqa %xmm10, %xmm0
-; SSSE3-NEXT: por %xmm7, %xmm0
-; SSSE3-NEXT: pcmpgtd %xmm6, %xmm0
-; SSSE3-NEXT: pshufb %xmm9, %xmm0
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
-; SSSE3-NEXT: movdqa %xmm4, %xmm5
-; SSSE3-NEXT: pxor %xmm7, %xmm5
-; SSSE3-NEXT: psubd %xmm11, %xmm4
-; SSSE3-NEXT: por %xmm7, %xmm11
-; SSSE3-NEXT: pcmpgtd %xmm5, %xmm11
-; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = <u,u,u,u,0,4,8,12,u,u,u,u,u,u,u,u>
-; SSSE3-NEXT: pshufb %xmm5, %xmm11
-; SSSE3-NEXT: movdqa %xmm3, %xmm6
-; SSSE3-NEXT: pxor %xmm7, %xmm6
-; SSSE3-NEXT: por %xmm8, %xmm7
-; SSSE3-NEXT: pcmpgtd %xmm6, %xmm7
-; SSSE3-NEXT: pshufb %xmm5, %xmm7
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1]
-; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm7[2,3]
-; SSSE3-NEXT: psubd %xmm8, %xmm3
+; SSSE3-NEXT: movdqa %xmm3, %xmm7
+; SSSE3-NEXT: pxor %xmm0, %xmm7
+; SSSE3-NEXT: psubd %xmm10, %xmm3
+; SSSE3-NEXT: por %xmm0, %xmm10
+; SSSE3-NEXT: pcmpgtd %xmm7, %xmm10
+; SSSE3-NEXT: packssdw %xmm5, %xmm10
+; SSSE3-NEXT: movdqa %xmm2, %xmm5
+; SSSE3-NEXT: pxor %xmm0, %xmm5
+; SSSE3-NEXT: psubd %xmm6, %xmm2
+; SSSE3-NEXT: por %xmm0, %xmm6
+; SSSE3-NEXT: pcmpgtd %xmm5, %xmm6
+; SSSE3-NEXT: movdqa %xmm1, %xmm5
+; SSSE3-NEXT: pxor %xmm0, %xmm5
+; SSSE3-NEXT: por %xmm8, %xmm0
+; SSSE3-NEXT: pcmpgtd %xmm5, %xmm0
+; SSSE3-NEXT: packssdw %xmm6, %xmm0
+; SSSE3-NEXT: packsswb %xmm10, %xmm0
+; SSSE3-NEXT: psubd %xmm8, %xmm1
; SSSE3-NEXT: movdqa {{.*#+}} xmm5 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; SSSE3-NEXT: pand %xmm5, %xmm4
; SSSE3-NEXT: pand %xmm5, %xmm3
; SSE41-NEXT: pcmpeqd %xmm4, %xmm0
; SSE41-NEXT: pcmpeqd %xmm6, %xmm6
; SSE41-NEXT: pxor %xmm6, %xmm0
-; SSE41-NEXT: movdqa {{.*#+}} xmm7 = <u,u,u,u,0,4,8,12,u,u,u,u,u,u,u,u>
-; SSE41-NEXT: pshufb %xmm7, %xmm0
-; SSE41-NEXT: movdqa %xmm3, %xmm5
-; SSE41-NEXT: pmaxud %xmm9, %xmm5
-; SSE41-NEXT: pcmpeqd %xmm3, %xmm5
-; SSE41-NEXT: pxor %xmm6, %xmm5
-; SSE41-NEXT: pshufb %xmm7, %xmm5
-; SSE41-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
+; SSE41-NEXT: movdqa %xmm3, %xmm7
+; SSE41-NEXT: pmaxud %xmm9, %xmm7
+; SSE41-NEXT: pcmpeqd %xmm3, %xmm7
+; SSE41-NEXT: pxor %xmm6, %xmm7
+; SSE41-NEXT: packssdw %xmm0, %xmm7
; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: pmaxud %xmm8, %xmm0
; SSE41-NEXT: pcmpeqd %xmm1, %xmm0
; SSE41-NEXT: pxor %xmm6, %xmm0
-; SSE41-NEXT: movdqa {{.*#+}} xmm12 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
-; SSE41-NEXT: pshufb %xmm12, %xmm0
-; SSE41-NEXT: movdqa %xmm2, %xmm7
-; SSE41-NEXT: pmaxud %xmm11, %xmm7
-; SSE41-NEXT: pcmpeqd %xmm2, %xmm7
-; SSE41-NEXT: pxor %xmm6, %xmm7
-; SSE41-NEXT: pshufb %xmm12, %xmm7
-; SSE41-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm5[4,5,6,7]
+; SSE41-NEXT: movdqa %xmm2, %xmm5
+; SSE41-NEXT: pmaxud %xmm11, %xmm5
+; SSE41-NEXT: pcmpeqd %xmm2, %xmm5
+; SSE41-NEXT: pxor %xmm6, %xmm5
+; SSE41-NEXT: packssdw %xmm5, %xmm0
+; SSE41-NEXT: packsswb %xmm7, %xmm0
; SSE41-NEXT: psubd %xmm11, %xmm2
; SSE41-NEXT: psubd %xmm8, %xmm1
; SSE41-NEXT: psubd %xmm9, %xmm3
; SSE-32-LABEL: strict_vector_fptosi_v4f32_to_v4i8:
; SSE-32: # %bb.0:
; SSE-32-NEXT: cvttps2dq %xmm0, %xmm0
-; SSE-32-NEXT: pand {{\.LCPI.*}}, %xmm0
-; SSE-32-NEXT: packuswb %xmm0, %xmm0
-; SSE-32-NEXT: packuswb %xmm0, %xmm0
+; SSE-32-NEXT: packssdw %xmm0, %xmm0
+; SSE-32-NEXT: packsswb %xmm0, %xmm0
; SSE-32-NEXT: retl
;
; SSE-64-LABEL: strict_vector_fptosi_v4f32_to_v4i8:
; SSE-64: # %bb.0:
; SSE-64-NEXT: cvttps2dq %xmm0, %xmm0
-; SSE-64-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE-64-NEXT: packuswb %xmm0, %xmm0
-; SSE-64-NEXT: packuswb %xmm0, %xmm0
+; SSE-64-NEXT: packssdw %xmm0, %xmm0
+; SSE-64-NEXT: packsswb %xmm0, %xmm0
; SSE-64-NEXT: retq
;
; AVX-LABEL: strict_vector_fptosi_v4f32_to_v4i8:
; AVX: # %bb.0:
; AVX-NEXT: vcvttps2dq %xmm0, %xmm0
-; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX-NEXT: ret{{[l|q]}}
;
; AVX512F-LABEL: strict_vector_fptosi_v4f32_to_v4i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vcvttps2dq %xmm0, %xmm0
-; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
+; AVX512F-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX512F-NEXT: ret{{[l|q]}}
;
; AVX512VL-LABEL: strict_vector_fptosi_v4f32_to_v4i8:
; AVX512DQ-LABEL: strict_vector_fptosi_v4f32_to_v4i8:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvttps2dq %xmm0, %xmm0
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX512DQ-NEXT: ret{{[l|q]}}
;
; AVX512VLDQ-LABEL: strict_vector_fptosi_v4f32_to_v4i8:
; SSE-32-LABEL: strict_vector_fptoui_v4f32_to_v4i8:
; SSE-32: # %bb.0:
; SSE-32-NEXT: cvttps2dq %xmm0, %xmm0
-; SSE-32-NEXT: pand {{\.LCPI.*}}, %xmm0
; SSE-32-NEXT: packuswb %xmm0, %xmm0
; SSE-32-NEXT: packuswb %xmm0, %xmm0
; SSE-32-NEXT: retl
; SSE-64-LABEL: strict_vector_fptoui_v4f32_to_v4i8:
; SSE-64: # %bb.0:
; SSE-64-NEXT: cvttps2dq %xmm0, %xmm0
-; SSE-64-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-64-NEXT: packuswb %xmm0, %xmm0
; SSE-64-NEXT: packuswb %xmm0, %xmm0
; SSE-64-NEXT: retq
; AVX-LABEL: strict_vector_fptoui_v4f32_to_v4i8:
; AVX: # %bb.0:
; AVX-NEXT: vcvttps2dq %xmm0, %xmm0
-; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
; AVX-NEXT: ret{{[l|q]}}
;
; AVX512F-LABEL: strict_vector_fptoui_v4f32_to_v4i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vcvttps2dq %xmm0, %xmm0
-; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
+; AVX512F-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
; AVX512F-NEXT: ret{{[l|q]}}
;
; AVX512VL-LABEL: strict_vector_fptoui_v4f32_to_v4i8:
; AVX512DQ-LABEL: strict_vector_fptoui_v4f32_to_v4i8:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvttps2dq %xmm0, %xmm0
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
; AVX512DQ-NEXT: ret{{[l|q]}}
;
; AVX512VLDQ-LABEL: strict_vector_fptoui_v4f32_to_v4i8:
; AVX-LABEL: strict_vector_fptosi_v4f64_to_v4i8:
; AVX: # %bb.0:
; AVX-NEXT: vcvttpd2dq %ymm0, %xmm0
-; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: ret{{[l|q]}}
;
; AVX512F-LABEL: strict_vector_fptosi_v4f64_to_v4i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vcvttpd2dq %ymm0, %xmm0
-; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
+; AVX512F-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: ret{{[l|q]}}
;
; AVX512DQ-LABEL: strict_vector_fptosi_v4f64_to_v4i8:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvttpd2dq %ymm0, %xmm0
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: ret{{[l|q]}}
;
; AVX-LABEL: strict_vector_fptoui_v4f64_to_v4i8:
; AVX: # %bb.0:
; AVX-NEXT: vcvttpd2dq %ymm0, %xmm0
-; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: ret{{[l|q]}}
;
; AVX512F-LABEL: strict_vector_fptoui_v4f64_to_v4i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vcvttpd2dq %ymm0, %xmm0
-; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
+; AVX512F-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: ret{{[l|q]}}
;
; AVX512DQ-LABEL: strict_vector_fptoui_v4f64_to_v4i8:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvttpd2dq %ymm0, %xmm0
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: ret{{[l|q]}}
;
; CHECK-LABEL: cvt_v4f32_v4i8:
; CHECK: ## %bb.0:
; CHECK-NEXT: vcvttps2dq %xmm0, %xmm0
-; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; CHECK-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retl
%res = fptosi <4 x float> %src to <4 x i8>
ret <4 x i8> %res
; CHECK-LABEL: cvt_v4f32_v4u8:
; CHECK: ## %bb.0:
; CHECK-NEXT: vcvttps2dq %xmm0, %xmm0
-; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; CHECK-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retl
%res = fptoui <4 x float> %src to <4 x i8>
ret <4 x i8> %res
; AVX512F-NEXT: vpsrad $25, %zmm1, %zmm1
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm3
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = <u,u,u,u,0,4,8,12,u,u,u,u,0,4,8,12,u,u,u,u,0,4,8,12,u,u,u,u,0,4,8,12>
-; AVX512F-NEXT: vpshufb %ymm4, %ymm3, %ymm3
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = <0,4,8,12,u,u,u,u,0,4,8,12,u,u,u,u,0,4,8,12,u,u,u,u,0,4,8,12,u,u,u,u>
-; AVX512F-NEXT: vpshufb %ymm5, %ymm2, %ymm2
-; AVX512F-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2],ymm3[3],ymm2[4],ymm3[5],ymm2[6],ymm3[7]
-; AVX512F-NEXT: vpshufb %ymm4, %ymm1, %ymm1
-; AVX512F-NEXT: vpshufb %ymm5, %ymm0, %ymm0
-; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX512F-NEXT: vpackssdw %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT: vpacksswb %ymm2, %ymm2, %ymm2
+; AVX512F-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpacksswb %ymm0, %ymm0, %ymm0
; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsrad $25, %zmm0, %zmm0
; AVX512BW-NEXT: vpsrad $25, %zmm1, %zmm1
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm1, %ymm2
-; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm3 = <u,u,u,u,0,4,8,12,u,u,u,u,0,4,8,12,u,u,u,u,0,4,8,12,u,u,u,u,0,4,8,12>
-; AVX512BW-NEXT: vpshufb %ymm3, %ymm2, %ymm2
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm4
-; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm5 = <0,4,8,12,u,u,u,u,0,4,8,12,u,u,u,u,0,4,8,12,u,u,u,u,0,4,8,12,u,u,u,u>
-; AVX512BW-NEXT: vpshufb %ymm5, %ymm4, %ymm4
-; AVX512BW-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2],ymm2[3],ymm4[4],ymm2[5],ymm4[6],ymm2[7]
-; AVX512BW-NEXT: vpshufb %ymm3, %ymm1, %ymm1
-; AVX512BW-NEXT: vpshufb %ymm5, %ymm0, %ymm0
-; AVX512BW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vpackssdw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpacksswb %zmm0, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: shuffle_v64i8_ashr_00_04_08_12_64_68_72_76_00_04_08_12_64_68_72_76_16_20_24_28_80_84_88_92_16_20_24_28_80_84_88_92_32_36_40_44_96_100_104_108_32_36_40_44_96_100_104_108_48_52_56_60_112_116_120_124_48_52_56_60_112_116_120_124:
; AVX512DQ-NEXT: vpsrad $25, %zmm1, %zmm1
; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; AVX512DQ-NEXT: vextracti64x4 $1, %zmm1, %ymm3
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = <u,u,u,u,0,4,8,12,u,u,u,u,0,4,8,12,u,u,u,u,0,4,8,12,u,u,u,u,0,4,8,12>
-; AVX512DQ-NEXT: vpshufb %ymm4, %ymm3, %ymm3
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = <0,4,8,12,u,u,u,u,0,4,8,12,u,u,u,u,0,4,8,12,u,u,u,u,0,4,8,12,u,u,u,u>
-; AVX512DQ-NEXT: vpshufb %ymm5, %ymm2, %ymm2
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2],ymm3[3],ymm2[4],ymm3[5],ymm2[6],ymm3[7]
-; AVX512DQ-NEXT: vpshufb %ymm4, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpshufb %ymm5, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX512DQ-NEXT: vpackssdw %ymm3, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpacksswb %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpacksswb %ymm0, %ymm0, %ymm0
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VBMI-LABEL: shuffle_v64i8_ashr_00_04_08_12_64_68_72_76_00_04_08_12_64_68_72_76_16_20_24_28_80_84_88_92_16_20_24_28_80_84_88_92_32_36_40_44_96_100_104_108_32_36_40_44_96_100_104_108_48_52_56_60_112_116_120_124_48_52_56_60_112_116_120_124:
; AVX512VBMI: # %bb.0:
-; AVX512VBMI-NEXT: vpsrad $25, %zmm0, %zmm2
+; AVX512VBMI-NEXT: vpsrad $25, %zmm0, %zmm0
; AVX512VBMI-NEXT: vpsrad $25, %zmm1, %zmm1
-; AVX512VBMI-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,4,8,12,64,68,72,76,0,4,8,12,64,68,72,76,16,20,24,28,80,84,88,92,16,20,24,28,80,84,88,92,32,36,40,44,96,100,104,108,32,36,40,44,96,100,104,108,48,52,56,60,112,116,120,124,48,52,56,60,112,116,120,124]
-; AVX512VBMI-NEXT: vpermi2b %zmm1, %zmm2, %zmm0
+; AVX512VBMI-NEXT: vpackssdw %zmm1, %zmm0, %zmm0
+; AVX512VBMI-NEXT: vpacksswb %zmm0, %zmm0, %zmm0
; AVX512VBMI-NEXT: retq
%1 = ashr <16 x i32> %a0, <i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25>
%2 = ashr <16 x i32> %a1, <i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25>
; AVX512F-NEXT: vpsrld $25, %zmm1, %zmm1
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm3
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = <u,u,u,u,0,4,8,12,u,u,u,u,0,4,8,12,u,u,u,u,0,4,8,12,u,u,u,u,0,4,8,12>
-; AVX512F-NEXT: vpshufb %ymm4, %ymm3, %ymm3
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = <0,4,8,12,u,u,u,u,0,4,8,12,u,u,u,u,0,4,8,12,u,u,u,u,0,4,8,12,u,u,u,u>
-; AVX512F-NEXT: vpshufb %ymm5, %ymm2, %ymm2
-; AVX512F-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2],ymm3[3],ymm2[4],ymm3[5],ymm2[6],ymm3[7]
-; AVX512F-NEXT: vpshufb %ymm4, %ymm1, %ymm1
-; AVX512F-NEXT: vpshufb %ymm5, %ymm0, %ymm0
-; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX512F-NEXT: vpackusdw %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT: vpackuswb %ymm2, %ymm2, %ymm2
+; AVX512F-NEXT: vpackusdw %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpackuswb %ymm0, %ymm0, %ymm0
; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsrld $25, %zmm0, %zmm0
; AVX512BW-NEXT: vpsrld $25, %zmm1, %zmm1
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm1, %ymm2
-; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm3 = <u,u,u,u,0,4,8,12,u,u,u,u,0,4,8,12,u,u,u,u,0,4,8,12,u,u,u,u,0,4,8,12>
-; AVX512BW-NEXT: vpshufb %ymm3, %ymm2, %ymm2
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm4
-; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm5 = <0,4,8,12,u,u,u,u,0,4,8,12,u,u,u,u,0,4,8,12,u,u,u,u,0,4,8,12,u,u,u,u>
-; AVX512BW-NEXT: vpshufb %ymm5, %ymm4, %ymm4
-; AVX512BW-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2],ymm2[3],ymm4[4],ymm2[5],ymm4[6],ymm2[7]
-; AVX512BW-NEXT: vpshufb %ymm3, %ymm1, %ymm1
-; AVX512BW-NEXT: vpshufb %ymm5, %ymm0, %ymm0
-; AVX512BW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vpackusdw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpackuswb %zmm0, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: shuffle_v64i8_lshr_00_04_08_12_64_68_72_76_00_04_08_12_64_68_72_76_16_20_24_28_80_84_88_092_16_20_24_28_80_84_88_92_32_36_40_44_96_100_104_108_32_36_40_44_96_100_104_108_48_52_56_60_112_116_120_124_48_52_56_60_112_116_120_124:
; AVX512DQ-NEXT: vpsrld $25, %zmm1, %zmm1
; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; AVX512DQ-NEXT: vextracti64x4 $1, %zmm1, %ymm3
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = <u,u,u,u,0,4,8,12,u,u,u,u,0,4,8,12,u,u,u,u,0,4,8,12,u,u,u,u,0,4,8,12>
-; AVX512DQ-NEXT: vpshufb %ymm4, %ymm3, %ymm3
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = <0,4,8,12,u,u,u,u,0,4,8,12,u,u,u,u,0,4,8,12,u,u,u,u,0,4,8,12,u,u,u,u>
-; AVX512DQ-NEXT: vpshufb %ymm5, %ymm2, %ymm2
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2],ymm3[3],ymm2[4],ymm3[5],ymm2[6],ymm3[7]
-; AVX512DQ-NEXT: vpshufb %ymm4, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpshufb %ymm5, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX512DQ-NEXT: vpackusdw %ymm3, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpackuswb %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpackusdw %ymm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpackuswb %ymm0, %ymm0, %ymm0
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VBMI-LABEL: shuffle_v64i8_lshr_00_04_08_12_64_68_72_76_00_04_08_12_64_68_72_76_16_20_24_28_80_84_88_092_16_20_24_28_80_84_88_92_32_36_40_44_96_100_104_108_32_36_40_44_96_100_104_108_48_52_56_60_112_116_120_124_48_52_56_60_112_116_120_124:
; AVX512VBMI: # %bb.0:
-; AVX512VBMI-NEXT: vpsrld $25, %zmm0, %zmm2
+; AVX512VBMI-NEXT: vpsrld $25, %zmm0, %zmm0
; AVX512VBMI-NEXT: vpsrld $25, %zmm1, %zmm1
-; AVX512VBMI-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,4,8,12,64,68,72,76,0,4,8,12,64,68,72,76,16,20,24,28,80,84,88,92,16,20,24,28,80,84,88,92,32,36,40,44,96,100,104,108,32,36,40,44,96,100,104,108,48,52,56,60,112,116,120,124,48,52,56,60,112,116,120,124]
-; AVX512VBMI-NEXT: vpermi2b %zmm1, %zmm2, %zmm0
+; AVX512VBMI-NEXT: vpackusdw %zmm1, %zmm0, %zmm0
+; AVX512VBMI-NEXT: vpackuswb %zmm0, %zmm0, %zmm0
; AVX512VBMI-NEXT: retq
%1 = lshr <16 x i32> %a0, <i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25>
%2 = lshr <16 x i32> %a1, <i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25, i32 25>
; SSE41: # %bb.0:
; SSE41-NEXT: pminsd {{.*}}(%rip), %xmm0
; SSE41-NEXT: pxor %xmm1, %xmm1
-; SSE41-NEXT: pmaxsd %xmm1, %xmm0
-; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; SSE41-NEXT: pmaxsd %xmm0, %xmm1
+; SSE41-NEXT: packusdw %xmm0, %xmm1
+; SSE41-NEXT: packuswb %xmm1, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: trunc_packus_v4i32_v4i8:
; AVX1-NEXT: vpminsd {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX1-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_packus_v4i32_v4i8:
; AVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
+; AVX2-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc_packus_v4i32_v4i8:
; SSE41-NEXT: pminsd {{.*}}(%rip), %xmm0
; SSE41-NEXT: pxor %xmm1, %xmm1
; SSE41-NEXT: pmaxsd %xmm0, %xmm1
-; SSE41-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; SSE41-NEXT: packusdw %xmm0, %xmm1
+; SSE41-NEXT: packuswb %xmm0, %xmm1
; SSE41-NEXT: movd %xmm1, (%rdi)
; SSE41-NEXT: retq
;
; AVX1-NEXT: vpminsd {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX1-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
; AVX1-NEXT: vmovd %xmm0, (%rdi)
; AVX1-NEXT: retq
;
; AVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
+; AVX2-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
; AVX2-NEXT: vmovd %xmm0, (%rdi)
; AVX2-NEXT: retq
;
; SSE41: # %bb.0:
; SSE41-NEXT: pminsd {{.*}}(%rip), %xmm0
; SSE41-NEXT: pmaxsd {{.*}}(%rip), %xmm0
-; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; SSE41-NEXT: packssdw %xmm0, %xmm0
+; SSE41-NEXT: packsswb %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: trunc_ssat_v4i32_v4i8:
; AVX1: # %bb.0:
; AVX1-NEXT: vpminsd {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vpmaxsd {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX1-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_ssat_v4i32_v4i8:
; AVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294967168,4294967168,4294967168,4294967168]
; AVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
+; AVX2-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc_ssat_v4i32_v4i8:
; SSE41: # %bb.0:
; SSE41-NEXT: pminsd {{.*}}(%rip), %xmm0
; SSE41-NEXT: pmaxsd {{.*}}(%rip), %xmm0
-; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; SSE41-NEXT: packssdw %xmm0, %xmm0
+; SSE41-NEXT: packsswb %xmm0, %xmm0
; SSE41-NEXT: movd %xmm0, (%rdi)
; SSE41-NEXT: retq
;
; AVX1: # %bb.0:
; AVX1-NEXT: vpminsd {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vpmaxsd {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX1-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX1-NEXT: vmovd %xmm0, (%rdi)
; AVX1-NEXT: retq
;
; AVX2-NEXT: vpminsd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294967168,4294967168,4294967168,4294967168]
; AVX2-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
+; AVX2-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX2-NEXT: vmovd %xmm0, (%rdi)
; AVX2-NEXT: retq
;
; SSE41-LABEL: trunc_usat_v4i32_v4i8:
; SSE41: # %bb.0:
; SSE41-NEXT: pminud {{.*}}(%rip), %xmm0
-; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; SSE41-NEXT: packusdw %xmm0, %xmm0
+; SSE41-NEXT: packuswb %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: trunc_usat_v4i32_v4i8:
; AVX1: # %bb.0:
; AVX1-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX1-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: trunc_usat_v4i32_v4i8:
; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [255,255,255,255]
; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
+; AVX2-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc_usat_v4i32_v4i8:
; SSE41-LABEL: trunc_usat_v4i32_v4i8_store:
; SSE41: # %bb.0:
; SSE41-NEXT: pminud {{.*}}(%rip), %xmm0
-; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; SSE41-NEXT: packusdw %xmm0, %xmm0
+; SSE41-NEXT: packuswb %xmm0, %xmm0
; SSE41-NEXT: movd %xmm0, (%rdi)
; SSE41-NEXT: retq
;
; AVX1-LABEL: trunc_usat_v4i32_v4i8_store:
; AVX1: # %bb.0:
; AVX1-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX1-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
; AVX1-NEXT: vmovd %xmm0, (%rdi)
; AVX1-NEXT: retq
;
; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [255,255,255,255]
; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
+; AVX2-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
; AVX2-NEXT: vmovd %xmm0, (%rdi)
; AVX2-NEXT: retq
;
; SSE41-LABEL: trunc_usat_v8i32_v8i8:
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255]
-; SSE41-NEXT: pminud %xmm2, %xmm0
; SSE41-NEXT: pminud %xmm2, %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
-; SSE41-NEXT: pshufb %xmm2, %xmm1
-; SSE41-NEXT: pshufb %xmm2, %xmm0
-; SSE41-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE41-NEXT: pminud %xmm2, %xmm0
+; SSE41-NEXT: packusdw %xmm1, %xmm0
+; SSE41-NEXT: packuswb %xmm0, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: trunc_usat_v8i32_v8i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255]
-; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufb %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [255,255,255,255]
+; AVX1-NEXT: vpminud %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpminud %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255]
; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; SSE41-LABEL: trunc_usat_v8i32_v8i8_store:
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255]
-; SSE41-NEXT: pminud %xmm2, %xmm0
; SSE41-NEXT: pminud %xmm2, %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
-; SSE41-NEXT: pshufb %xmm2, %xmm1
-; SSE41-NEXT: pshufb %xmm2, %xmm0
-; SSE41-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE41-NEXT: pminud %xmm2, %xmm0
+; SSE41-NEXT: packusdw %xmm1, %xmm0
+; SSE41-NEXT: packuswb %xmm0, %xmm0
; SSE41-NEXT: movq %xmm0, (%rdi)
; SSE41-NEXT: retq
;
; AVX1-LABEL: trunc_usat_v8i32_v8i8_store:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [255,255,255,255]
-; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpshufb %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [255,255,255,255]
+; AVX1-NEXT: vpminud %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpminud %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
; AVX1-NEXT: vmovq %xmm0, (%rdi)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255]
; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
; AVX2-NEXT: vmovq %xmm0, (%rdi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq