return false;
}
+ /// Return true if it is profitable to transform an integer
+ /// multiplication-by-constant into simpler operations like shifts and adds.
+ /// This may be true if the target does not directly support the
+ /// multiplication operation for the specified type or the sequence of simpler
+ /// ops is faster than the multiply.
+ virtual bool decomposeMulByConstant(EVT VT, SDValue C) const {
+ return false;
+ }
+
//===--------------------------------------------------------------------===//
// TargetLowering Configuration Methods - These methods should be invoked by
// the derived class constructor to configure this object for the target.
getShiftAmountTy(N0.getValueType()))));
}
+ // Try to transform multiply-by-(power-of-2 +/- 1) into shift and add/sub.
+ // Examples: x * 33 --> (x << 5) + x
+ // x * 15 --> (x << 4) - x
+ if (N1IsConst && TLI.decomposeMulByConstant(VT, N1)) {
+ // TODO: Negative constants can be handled by negating the result.
+ // TODO: We could handle more general decomposition of any constant by
+ // having the target set a limit on number of ops and making a
+ // callback to determine that sequence (similar to sqrt expansion).
+ unsigned MathOp = ISD::DELETED_NODE;
+ if ((ConstValue1 - 1).isPowerOf2())
+ MathOp = ISD::ADD;
+ else if ((ConstValue1 + 1).isPowerOf2())
+ MathOp = ISD::SUB;
+
+ if (MathOp != ISD::DELETED_NODE) {
+ unsigned ShAmt = MathOp == ISD::ADD ? (ConstValue1 - 1).logBase2()
+ : (ConstValue1 + 1).logBase2();
+ assert(ShAmt > 0 && ShAmt < VT.getScalarSizeInBits() &&
+ "Not expecting multiply-by-constant that could have simplified");
+ SDLoc DL(N);
+ SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, N0,
+ DAG.getConstant(ShAmt, DL, VT));
+ return DAG.getNode(MathOp, DL, VT, Shl, N0);
+ }
+ }
+
// (mul (shl X, c1), c2) -> (mul X, c2 << c1)
if (N0.getOpcode() == ISD::SHL &&
isConstantOrConstantVector(N1, /* NoOpaques */ true) &&
return true;
}
+bool X86TargetLowering::decomposeMulByConstant(EVT VT, SDValue C) const {
+ // TODO: We handle scalars using custom code, but generic combining could make
+ // that unnecessary.
+ APInt MulC;
+ if (!ISD::isConstantSplatVector(C.getNode(), MulC))
+ return false;
+
+ // If vector multiply is legal, assume that's faster than shl + add/sub.
+ // TODO: Multiply is a complex op with higher latency and lower througput in
+ // most implementations, so this check could be loosened based on type
+ // and/or a CPU attribute.
+ if (isOperationLegal(ISD::MUL, VT))
+ return false;
+
+ return (MulC + 1).isPowerOf2() || (MulC - 1).isPowerOf2();
+}
+
bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
unsigned Index) const {
if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
bool convertSelectOfConstantsToMath(EVT VT) const override;
+ bool decomposeMulByConstant(EVT VT, SDValue C) const override;
+
/// Return true if EXTRACT_SUBVECTOR is cheap for this result type
/// with this index.
bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; CHECK-SSE2-NEXT: psrld $2, %xmm2
-; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [5,5,5,5]
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm2
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm3
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
-; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; CHECK-SSE2-NEXT: psubd %xmm2, %xmm0
+; CHECK-SSE2-NEXT: movdqa %xmm2, %xmm1
+; CHECK-SSE2-NEXT: pslld $2, %xmm1
+; CHECK-SSE2-NEXT: paddd %xmm2, %xmm1
+; CHECK-SSE2-NEXT: psubd %xmm1, %xmm0
; CHECK-SSE2-NEXT: pcmpeqd {{.*}}(%rip), %xmm0
; CHECK-SSE2-NEXT: psrld $31, %xmm0
; CHECK-SSE2-NEXT: retq
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; CHECK-SSE2-NEXT: psrld $2, %xmm2
-; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [5,5,5,5]
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm2
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm3
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
-; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; CHECK-SSE2-NEXT: psubd %xmm2, %xmm0
+; CHECK-SSE2-NEXT: movdqa %xmm2, %xmm1
+; CHECK-SSE2-NEXT: pslld $2, %xmm1
+; CHECK-SSE2-NEXT: paddd %xmm2, %xmm1
+; CHECK-SSE2-NEXT: psubd %xmm1, %xmm0
; CHECK-SSE2-NEXT: pxor %xmm1, %xmm1
; CHECK-SSE2-NEXT: pcmpeqd %xmm1, %xmm0
; CHECK-SSE2-NEXT: psrld $31, %xmm0
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; CHECK-SSE2-NEXT: psrld $2, %xmm2
-; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [5,5,5,5]
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm2
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm3
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
-; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; CHECK-SSE2-NEXT: psubd %xmm2, %xmm0
+; CHECK-SSE2-NEXT: movdqa %xmm2, %xmm1
+; CHECK-SSE2-NEXT: pslld $2, %xmm1
+; CHECK-SSE2-NEXT: paddd %xmm2, %xmm1
+; CHECK-SSE2-NEXT: psubd %xmm1, %xmm0
; CHECK-SSE2-NEXT: pxor %xmm1, %xmm1
; CHECK-SSE2-NEXT: pcmpeqd %xmm1, %xmm0
; CHECK-SSE2-NEXT: psrld $31, %xmm0
; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,3,2,3]
; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; CHECK-SSE2-NEXT: psrld $2, %xmm2
-; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [5,5,5,5]
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm2
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm3
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
-; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; CHECK-SSE2-NEXT: psubd %xmm2, %xmm0
+; CHECK-SSE2-NEXT: movdqa %xmm2, %xmm1
+; CHECK-SSE2-NEXT: pslld $2, %xmm1
+; CHECK-SSE2-NEXT: paddd %xmm2, %xmm1
+; CHECK-SSE2-NEXT: psubd %xmm1, %xmm0
; CHECK-SSE2-NEXT: pxor %xmm1, %xmm1
; CHECK-SSE2-NEXT: pcmpeqd %xmm1, %xmm0
; CHECK-SSE2-NEXT: psrld $31, %xmm0
; SSE2-NEXT: psrld $31, %xmm1
; SSE2-NEXT: psrad $2, %xmm2
; SSE2-NEXT: paddd %xmm1, %xmm2
-; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [7,7,7,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm1, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; SSE2-NEXT: pmuludq %xmm1, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE2-NEXT: psubd %xmm2, %xmm0
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: pslld $3, %xmm1
+; SSE2-NEXT: psubd %xmm1, %xmm2
+; SSE2-NEXT: paddd %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_rem7_4i32:
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
; SSE2-NEXT: paddb %xmm2, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; SSE2-NEXT: pmullw %xmm3, %xmm2
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
-; SSE2-NEXT: pand %xmm4, %xmm2
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: pmullw %xmm3, %xmm1
-; SSE2-NEXT: pand %xmm4, %xmm1
-; SSE2-NEXT: packuswb %xmm2, %xmm1
-; SSE2-NEXT: psubb %xmm1, %xmm0
+; SSE2-NEXT: psllw $3, %xmm2
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm2
+; SSE2-NEXT: psubb %xmm2, %xmm1
+; SSE2-NEXT: paddb %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_rem7_16i8:
; SSE41-NEXT: psrlw $7, %xmm1
; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
; SSE41-NEXT: paddb %xmm2, %xmm1
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; SSE41-NEXT: pmullw %xmm3, %xmm1
-; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
-; SSE41-NEXT: pand %xmm4, %xmm1
-; SSE41-NEXT: pmullw %xmm3, %xmm2
-; SSE41-NEXT: pand %xmm4, %xmm2
-; SSE41-NEXT: packuswb %xmm1, %xmm2
-; SSE41-NEXT: psubb %xmm2, %xmm0
+; SSE41-NEXT: movdqa %xmm1, %xmm2
+; SSE41-NEXT: psllw $3, %xmm2
+; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
+; SSE41-NEXT: psubb %xmm2, %xmm1
+; SSE41-NEXT: paddb %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_rem7_16i8:
; AVX1-NEXT: vpsrlw $7, %xmm1, %xmm1
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vpaddb %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
-; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX1-NEXT: vpmullw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsllw $3, %xmm1, %xmm2
+; AVX1-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT: vpsubb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2NOBW-LABEL: test_rem7_16i8:
; AVX2NOBW-NEXT: vpsrlw $7, %xmm1, %xmm1
; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX2NOBW-NEXT: vpaddb %xmm1, %xmm2, %xmm1
-; AVX2NOBW-NEXT: vpmovsxbw %xmm1, %ymm1
-; AVX2NOBW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
-; AVX2NOBW-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2NOBW-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX2NOBW-NEXT: vpshufb %xmm3, %xmm2, %xmm2
-; AVX2NOBW-NEXT: vpshufb %xmm3, %xmm1, %xmm1
-; AVX2NOBW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; AVX2NOBW-NEXT: vpsubb %xmm1, %xmm0, %xmm0
+; AVX2NOBW-NEXT: vpsllw $3, %xmm1, %xmm2
+; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX2NOBW-NEXT: vpsubb %xmm2, %xmm1, %xmm1
+; AVX2NOBW-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; AVX2NOBW-NEXT: vzeroupper
; AVX2NOBW-NEXT: retq
;
; AVX512BW-NEXT: vpsrlw $7, %xmm1, %xmm1
; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX512BW-NEXT: vpaddb %xmm1, %xmm2, %xmm1
-; AVX512BW-NEXT: vpmovsxbw %xmm1, %ymm1
-; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
-; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
-; AVX512BW-NEXT: vpsubb %xmm1, %xmm0, %xmm0
+; AVX512BW-NEXT: vpsllw $3, %xmm1, %xmm2
+; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512BW-NEXT: vpsubb %xmm2, %xmm1, %xmm1
+; AVX512BW-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%res = srem <16 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7>
; AVX1-NEXT: vpsrld $31, %xmm2, %xmm4
; AVX1-NEXT: vpsrad $2, %xmm2, %xmm2
; AVX1-NEXT: vpaddd %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [7,7,7,7]
-; AVX1-NEXT: vpmulld %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpsubd %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpslld $3, %xmm2, %xmm4
+; AVX1-NEXT: vpsubd %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX1-NEXT: vpmuldq %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpmuldq %xmm3, %xmm0, %xmm3
; AVX1-NEXT: vpsrld $31, %xmm2, %xmm3
; AVX1-NEXT: vpsrad $2, %xmm2, %xmm2
; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpmulld %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpsubd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpslld $3, %xmm2, %xmm3
+; AVX1-NEXT: vpsubd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX1-NEXT: vpsrlw $15, %xmm3, %xmm4
; AVX1-NEXT: vpsraw $1, %xmm3, %xmm3
; AVX1-NEXT: vpaddw %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [7,7,7,7,7,7,7,7]
-; AVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vpsubw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpsllw $3, %xmm3, %xmm4
+; AVX1-NEXT: vpsubw %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpaddw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpmulhw %xmm2, %xmm0, %xmm2
; AVX1-NEXT: vpsrlw $15, %xmm2, %xmm3
; AVX1-NEXT: vpsraw $1, %xmm2, %xmm2
; AVX1-NEXT: vpaddw %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpmullw %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpsubw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsllw $3, %xmm2, %xmm3
+; AVX1-NEXT: vpsubw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpaddw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind {
; AVX1-LABEL: test_rem7_32i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpmovsxbw %xmm2, %xmm3
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [65427,65427,65427,65427,65427,65427,65427,65427]
-; AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm3
-; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
-; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[2,3,0,1]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpmovsxbw %xmm1, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [65427,65427,65427,65427,65427,65427,65427,65427]
+; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
; AVX1-NEXT: vpmovsxbw %xmm4, %xmm4
-; AVX1-NEXT: vpmullw %xmm1, %xmm4, %xmm4
+; AVX1-NEXT: vpmullw %xmm3, %xmm4, %xmm4
; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4
-; AVX1-NEXT: vpackuswb %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vpaddb %xmm2, %xmm3, %xmm3
-; AVX1-NEXT: vpsrlw $7, %xmm3, %xmm4
+; AVX1-NEXT: vpackuswb %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpaddb %xmm1, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $7, %xmm2, %xmm4
; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
; AVX1-NEXT: vpand %xmm8, %xmm4, %xmm4
-; AVX1-NEXT: vpsrlw $2, %xmm3, %xmm3
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
-; AVX1-NEXT: vpand %xmm9, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; AVX1-NEXT: vpand %xmm6, %xmm2, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32]
-; AVX1-NEXT: vpxor %xmm7, %xmm3, %xmm3
-; AVX1-NEXT: vpsubb %xmm7, %xmm3, %xmm3
-; AVX1-NEXT: vpaddb %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; AVX1-NEXT: vpmullw %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [255,255,255,255,255,255,255,255]
-; AVX1-NEXT: vpand %xmm6, %xmm4, %xmm4
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; AVX1-NEXT: vpmullw %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
-; AVX1-NEXT: vpackuswb %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vpsubb %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpmovsxbw %xmm0, %xmm3
-; AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm3
-; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
+; AVX1-NEXT: vpxor %xmm7, %xmm2, %xmm2
+; AVX1-NEXT: vpsubb %xmm7, %xmm2, %xmm2
+; AVX1-NEXT: vpaddb %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpsllw $3, %xmm2, %xmm4
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248]
+; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpsubb %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpaddb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpmovsxbw %xmm0, %xmm2
+; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovsxbw %xmm4, %xmm4
-; AVX1-NEXT: vpmullw %xmm1, %xmm4, %xmm1
-; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw $7, %xmm1, %xmm3
+; AVX1-NEXT: vpmullw %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
+; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpaddb %xmm0, %xmm2, %xmm2
+; AVX1-NEXT: vpsrlw $7, %xmm2, %xmm3
; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
-; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm1
-; AVX1-NEXT: vpand %xmm9, %xmm1, %xmm1
-; AVX1-NEXT: vpxor %xmm7, %xmm1, %xmm1
-; AVX1-NEXT: vpsubb %xmm7, %xmm1, %xmm1
-; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-NEXT: vpmullw %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX1-NEXT: vpmullw %xmm5, %xmm1, %xmm1
-; AVX1-NEXT: vpand %xmm6, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm2
+; AVX1-NEXT: vpand %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vpxor %xmm7, %xmm2, %xmm2
+; AVX1-NEXT: vpsubb %xmm7, %xmm2, %xmm2
+; AVX1-NEXT: vpaddb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsllw $3, %xmm2, %xmm3
+; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpsubb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpaddb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2NOBW-LABEL: test_rem7_32i8:
; AVX2NOBW-NEXT: vpsrlw $7, %ymm1, %ymm1
; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
; AVX2NOBW-NEXT: vpaddb %ymm1, %ymm2, %ymm1
-; AVX2NOBW-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2NOBW-NEXT: vpmovsxbw %xmm2, %ymm2
-; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; AVX2NOBW-NEXT: vpmullw %ymm3, %ymm2, %ymm2
-; AVX2NOBW-NEXT: vextracti128 $1, %ymm2, %xmm4
-; AVX2NOBW-NEXT: vmovdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX2NOBW-NEXT: vpshufb %xmm5, %xmm4, %xmm4
-; AVX2NOBW-NEXT: vpshufb %xmm5, %xmm2, %xmm2
-; AVX2NOBW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0]
-; AVX2NOBW-NEXT: vpmovsxbw %xmm1, %ymm1
-; AVX2NOBW-NEXT: vpmullw %ymm3, %ymm1, %ymm1
-; AVX2NOBW-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX2NOBW-NEXT: vpshufb %xmm5, %xmm3, %xmm3
-; AVX2NOBW-NEXT: vpshufb %xmm5, %xmm1, %xmm1
-; AVX2NOBW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
-; AVX2NOBW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
-; AVX2NOBW-NEXT: vpsubb %ymm1, %ymm0, %ymm0
+; AVX2NOBW-NEXT: vpsllw $3, %ymm1, %ymm2
+; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX2NOBW-NEXT: vpsubb %ymm2, %ymm1, %ymm1
+; AVX2NOBW-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; AVX2NOBW-NEXT: retq
;
; AVX512BW-LABEL: test_rem7_32i8:
; AVX512BW-NEXT: vpsrlw $7, %ymm1, %ymm1
; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
; AVX512BW-NEXT: vpaddb %ymm1, %ymm2, %ymm1
-; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1
-; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm1, %zmm1
-; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
-; AVX512BW-NEXT: vpsubb %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpsllw $3, %ymm1, %ymm2
+; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512BW-NEXT: vpsubb %ymm2, %ymm1, %ymm1
+; AVX512BW-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: retq
%res = srem <32 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7>
ret <32 x i8> %res
; AVX512F-LABEL: test_rem7_64i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX512F-NEXT: vpmovsxbw %xmm2, %ymm3
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427]
-; AVX512F-NEXT: vpmullw %ymm2, %ymm3, %ymm3
-; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3
+; AVX512F-NEXT: vpmovsxbw %xmm2, %ymm2
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427,65427]
+; AVX512F-NEXT: vpmullw %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2
; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm4
-; AVX512F-NEXT: vpmullw %ymm2, %ymm4, %ymm4
+; AVX512F-NEXT: vpmullw %ymm3, %ymm4, %ymm4
; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4
-; AVX512F-NEXT: vpackuswb %ymm3, %ymm4, %ymm3
-; AVX512F-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,1,3]
-; AVX512F-NEXT: vpaddb %ymm0, %ymm3, %ymm3
-; AVX512F-NEXT: vpsrlw $7, %ymm3, %ymm5
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX512F-NEXT: vpand %ymm4, %ymm5, %ymm7
-; AVX512F-NEXT: vpsrlw $2, %ymm3, %ymm3
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
-; AVX512F-NEXT: vpand %ymm5, %ymm3, %ymm3
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32]
-; AVX512F-NEXT: vpxor %ymm6, %ymm3, %ymm3
-; AVX512F-NEXT: vpsubb %ymm6, %ymm3, %ymm3
-; AVX512F-NEXT: vpaddb %ymm7, %ymm3, %ymm7
-; AVX512F-NEXT: vpmovsxbw %xmm7, %ymm8
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; AVX512F-NEXT: vpmullw %ymm3, %ymm8, %ymm8
-; AVX512F-NEXT: vpmovsxwd %ymm8, %zmm8
-; AVX512F-NEXT: vpmovdb %zmm8, %xmm8
-; AVX512F-NEXT: vextracti128 $1, %ymm7, %xmm7
-; AVX512F-NEXT: vpmovsxbw %xmm7, %ymm7
-; AVX512F-NEXT: vpmullw %ymm3, %ymm7, %ymm7
-; AVX512F-NEXT: vpmovsxwd %ymm7, %zmm7
-; AVX512F-NEXT: vpmovdb %zmm7, %xmm7
-; AVX512F-NEXT: vinserti128 $1, %xmm7, %ymm8, %ymm7
-; AVX512F-NEXT: vpsubb %ymm7, %ymm0, %ymm0
-; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm7
-; AVX512F-NEXT: vpmovsxbw %xmm7, %ymm7
-; AVX512F-NEXT: vpmullw %ymm2, %ymm7, %ymm7
-; AVX512F-NEXT: vpsrlw $8, %ymm7, %ymm7
-; AVX512F-NEXT: vpmovsxbw %xmm1, %ymm8
-; AVX512F-NEXT: vpmullw %ymm2, %ymm8, %ymm2
-; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2
-; AVX512F-NEXT: vpackuswb %ymm7, %ymm2, %ymm2
+; AVX512F-NEXT: vpackuswb %ymm2, %ymm4, %ymm2
; AVX512F-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3]
-; AVX512F-NEXT: vpaddb %ymm1, %ymm2, %ymm2
-; AVX512F-NEXT: vpsrlw $7, %ymm2, %ymm7
-; AVX512F-NEXT: vpand %ymm4, %ymm7, %ymm4
+; AVX512F-NEXT: vpaddb %ymm0, %ymm2, %ymm2
+; AVX512F-NEXT: vpsrlw $7, %ymm2, %ymm4
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4
; AVX512F-NEXT: vpsrlw $2, %ymm2, %ymm2
-; AVX512F-NEXT: vpand %ymm5, %ymm2, %ymm2
-; AVX512F-NEXT: vpxor %ymm6, %ymm2, %ymm2
-; AVX512F-NEXT: vpsubb %ymm6, %ymm2, %ymm2
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
+; AVX512F-NEXT: vpand %ymm6, %ymm2, %ymm2
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32]
+; AVX512F-NEXT: vpxor %ymm7, %ymm2, %ymm2
+; AVX512F-NEXT: vpsubb %ymm7, %ymm2, %ymm2
; AVX512F-NEXT: vpaddb %ymm4, %ymm2, %ymm2
-; AVX512F-NEXT: vpmovsxbw %xmm2, %ymm4
-; AVX512F-NEXT: vpmullw %ymm3, %ymm4, %ymm4
-; AVX512F-NEXT: vpmovsxwd %ymm4, %zmm4
-; AVX512F-NEXT: vpmovdb %zmm4, %xmm4
-; AVX512F-NEXT: vextracti128 $1, %ymm2, %xmm2
+; AVX512F-NEXT: vpsllw $3, %ymm2, %ymm4
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248]
+; AVX512F-NEXT: vpand %ymm8, %ymm4, %ymm4
+; AVX512F-NEXT: vpsubb %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX512F-NEXT: vpmovsxbw %xmm2, %ymm2
; AVX512F-NEXT: vpmullw %ymm3, %ymm2, %ymm2
-; AVX512F-NEXT: vpmovsxwd %ymm2, %zmm2
-; AVX512F-NEXT: vpmovdb %zmm2, %xmm2
-; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2
-; AVX512F-NEXT: vpsubb %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2
+; AVX512F-NEXT: vpmovsxbw %xmm1, %ymm4
+; AVX512F-NEXT: vpmullw %ymm3, %ymm4, %ymm3
+; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3
+; AVX512F-NEXT: vpackuswb %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3]
+; AVX512F-NEXT: vpaddb %ymm1, %ymm2, %ymm2
+; AVX512F-NEXT: vpsrlw $7, %ymm2, %ymm3
+; AVX512F-NEXT: vpand %ymm5, %ymm3, %ymm3
+; AVX512F-NEXT: vpsrlw $2, %ymm2, %ymm2
+; AVX512F-NEXT: vpand %ymm6, %ymm2, %ymm2
+; AVX512F-NEXT: vpxor %ymm7, %ymm2, %ymm2
+; AVX512F-NEXT: vpsubb %ymm7, %ymm2, %ymm2
+; AVX512F-NEXT: vpaddb %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT: vpsllw $3, %ymm2, %ymm3
+; AVX512F-NEXT: vpand %ymm8, %ymm3, %ymm3
+; AVX512F-NEXT: vpsubb %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT: vpaddb %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_rem7_64i8:
; AVX512BW-NEXT: vpsrlw $7, %zmm1, %zmm1
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: vpaddb %zmm1, %zmm2, %zmm1
-; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm2
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; AVX512BW-NEXT: vpmullw %zmm3, %zmm2, %zmm2
-; AVX512BW-NEXT: vpmovwb %zmm2, %ymm2
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm1, %ymm1
-; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1
-; AVX512BW-NEXT: vpmullw %zmm3, %zmm1, %zmm1
-; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512BW-NEXT: vpsubb %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsllw $3, %zmm1, %zmm2
+; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm2, %zmm2
+; AVX512BW-NEXT: vpsubb %zmm2, %zmm1, %zmm1
+; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
%res = srem <64 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7>
ret <64 x i8> %res
; SSE2-NEXT: psrld $1, %xmm1
; SSE2-NEXT: paddd %xmm2, %xmm1
; SSE2-NEXT: psrld $2, %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [7,7,7,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm2, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE2-NEXT: pmuludq %xmm2, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; SSE2-NEXT: psubd %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: pslld $3, %xmm2
+; SSE2-NEXT: psubd %xmm2, %xmm1
+; SSE2-NEXT: paddd %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_rem7_4i32:
; SSE2-NEXT: psrlw $2, %xmm1
; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; SSE2-NEXT: pmullw %xmm3, %xmm2
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
-; SSE2-NEXT: pand %xmm4, %xmm2
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: pmullw %xmm3, %xmm1
-; SSE2-NEXT: pand %xmm4, %xmm1
-; SSE2-NEXT: packuswb %xmm2, %xmm1
-; SSE2-NEXT: psubb %xmm1, %xmm0
+; SSE2-NEXT: psllw $3, %xmm2
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm2
+; SSE2-NEXT: psubb %xmm2, %xmm1
+; SSE2-NEXT: paddb %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_rem7_16i8:
; SSE41: # %bb.0:
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [37,37,37,37,37,37,37,37]
-; SSE41-NEXT: pmullw %xmm2, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
+; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [37,37,37,37,37,37,37,37]
+; SSE41-NEXT: pmullw %xmm1, %xmm2
+; SSE41-NEXT: psrlw $8, %xmm2
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; SSE41-NEXT: pmullw %xmm2, %xmm3
+; SSE41-NEXT: pmullw %xmm1, %xmm3
; SSE41-NEXT: psrlw $8, %xmm3
-; SSE41-NEXT: packuswb %xmm3, %xmm1
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: psubb %xmm1, %xmm2
-; SSE41-NEXT: psrlw $1, %xmm2
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
-; SSE41-NEXT: paddb %xmm1, %xmm2
-; SSE41-NEXT: psrlw $2, %xmm2
+; SSE41-NEXT: packuswb %xmm3, %xmm2
+; SSE41-NEXT: movdqa %xmm0, %xmm1
+; SSE41-NEXT: psubb %xmm2, %xmm1
+; SSE41-NEXT: psrlw $1, %xmm1
+; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE41-NEXT: paddb %xmm2, %xmm1
+; SSE41-NEXT: psrlw $2, %xmm1
+; SSE41-NEXT: pand {{.*}}(%rip), %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm2
+; SSE41-NEXT: psllw $3, %xmm2
; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; SSE41-NEXT: pmullw %xmm3, %xmm2
-; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
-; SSE41-NEXT: pand %xmm4, %xmm2
-; SSE41-NEXT: pmullw %xmm3, %xmm1
-; SSE41-NEXT: pand %xmm4, %xmm1
-; SSE41-NEXT: packuswb %xmm2, %xmm1
-; SSE41-NEXT: psubb %xmm1, %xmm0
+; SSE41-NEXT: psubb %xmm2, %xmm1
+; SSE41-NEXT: paddb %xmm0, %xmm1
+; SSE41-NEXT: movdqa %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-LABEL: test_rem7_16i8:
; AVX1-NEXT: vpaddb %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm1
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
-; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX1-NEXT: vpmullw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsllw $3, %xmm1, %xmm2
+; AVX1-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT: vpsubb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
; AVX2NOBW-LABEL: test_rem7_16i8:
; AVX2NOBW-NEXT: vpaddb %xmm1, %xmm2, %xmm1
; AVX2NOBW-NEXT: vpsrlw $2, %xmm1, %xmm1
; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX2NOBW-NEXT: vpmovsxbw %xmm1, %ymm1
-; AVX2NOBW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
-; AVX2NOBW-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2NOBW-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX2NOBW-NEXT: vpshufb %xmm3, %xmm2, %xmm2
-; AVX2NOBW-NEXT: vpshufb %xmm3, %xmm1, %xmm1
-; AVX2NOBW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; AVX2NOBW-NEXT: vpsubb %xmm1, %xmm0, %xmm0
+; AVX2NOBW-NEXT: vpsllw $3, %xmm1, %xmm2
+; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX2NOBW-NEXT: vpsubb %xmm2, %xmm1, %xmm1
+; AVX2NOBW-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; AVX2NOBW-NEXT: vzeroupper
; AVX2NOBW-NEXT: retq
;
; AVX512BW-NEXT: vpaddb %xmm1, %xmm2, %xmm1
; AVX512BW-NEXT: vpsrlw $2, %xmm1, %xmm1
; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX512BW-NEXT: vpmovsxbw %xmm1, %ymm1
-; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
-; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
-; AVX512BW-NEXT: vpsubb %xmm1, %xmm0, %xmm0
+; AVX512BW-NEXT: vpsllw $3, %xmm1, %xmm2
+; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
+; AVX512BW-NEXT: vpsubb %xmm2, %xmm1, %xmm1
+; AVX512BW-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%res = urem <16 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7>
; AVX1-NEXT: vpsrld $1, %xmm4, %xmm4
; AVX1-NEXT: vpaddd %xmm2, %xmm4, %xmm2
; AVX1-NEXT: vpsrld $2, %xmm2, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [7,7,7,7]
-; AVX1-NEXT: vpmulld %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpsubd %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpslld $3, %xmm2, %xmm4
+; AVX1-NEXT: vpsubd %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX1-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
; AVX1-NEXT: vpaddd %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpsrld $2, %xmm2, %xmm2
-; AVX1-NEXT: vpmulld %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpsubd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpslld $3, %xmm2, %xmm3
+; AVX1-NEXT: vpsubd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm4
; AVX1-NEXT: vpaddw %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsrlw $2, %xmm3, %xmm3
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [7,7,7,7,7,7,7,7]
-; AVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vpsubw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpsllw $3, %xmm3, %xmm4
+; AVX1-NEXT: vpsubw %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpaddw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpmulhuw %xmm2, %xmm0, %xmm2
; AVX1-NEXT: vpsubw %xmm2, %xmm0, %xmm3
; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3
; AVX1-NEXT: vpaddw %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm2
-; AVX1-NEXT: vpmullw %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpsubw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsllw $3, %xmm2, %xmm3
+; AVX1-NEXT: vpsubw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpaddw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX1-NEXT: vpackuswb %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpsubb %xmm2, %xmm1, %xmm4
; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm4
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX1-NEXT: vpand %xmm8, %xmm4, %xmm4
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4
; AVX1-NEXT: vpaddb %xmm2, %xmm4, %xmm2
; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; AVX1-NEXT: vpmullw %xmm7, %xmm6, %xmm6
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255]
-; AVX1-NEXT: vpand %xmm5, %xmm6, %xmm6
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX1-NEXT: vpmullw %xmm7, %xmm2, %xmm2
-; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2
-; AVX1-NEXT: vpackuswb %xmm6, %xmm2, %xmm2
-; AVX1-NEXT: vpsubb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpsllw $3, %xmm2, %xmm6
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248]
+; AVX1-NEXT: vpand %xmm7, %xmm6, %xmm6
+; AVX1-NEXT: vpsubb %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vpaddb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpsubb %xmm2, %xmm0, %xmm3
; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3
-; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
+; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vpaddb %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm2
; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-NEXT: vpmullw %xmm7, %xmm3, %xmm3
-; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX1-NEXT: vpmullw %xmm7, %xmm2, %xmm2
-; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2
-; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpsubb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpsllw $3, %xmm2, %xmm3
+; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3
+; AVX1-NEXT: vpsubb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpaddb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2NOBW-NEXT: vpaddb %ymm1, %ymm2, %ymm1
; AVX2NOBW-NEXT: vpsrlw $2, %ymm1, %ymm1
; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
-; AVX2NOBW-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2NOBW-NEXT: vpmovsxbw %xmm2, %ymm2
-; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; AVX2NOBW-NEXT: vpmullw %ymm3, %ymm2, %ymm2
-; AVX2NOBW-NEXT: vextracti128 $1, %ymm2, %xmm4
-; AVX2NOBW-NEXT: vmovdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; AVX2NOBW-NEXT: vpshufb %xmm5, %xmm4, %xmm4
-; AVX2NOBW-NEXT: vpshufb %xmm5, %xmm2, %xmm2
-; AVX2NOBW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0]
-; AVX2NOBW-NEXT: vpmovsxbw %xmm1, %ymm1
-; AVX2NOBW-NEXT: vpmullw %ymm3, %ymm1, %ymm1
-; AVX2NOBW-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX2NOBW-NEXT: vpshufb %xmm5, %xmm3, %xmm3
-; AVX2NOBW-NEXT: vpshufb %xmm5, %xmm1, %xmm1
-; AVX2NOBW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
-; AVX2NOBW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
-; AVX2NOBW-NEXT: vpsubb %ymm1, %ymm0, %ymm0
+; AVX2NOBW-NEXT: vpsllw $3, %ymm1, %ymm2
+; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX2NOBW-NEXT: vpsubb %ymm2, %ymm1, %ymm1
+; AVX2NOBW-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; AVX2NOBW-NEXT: retq
;
; AVX512BW-LABEL: test_rem7_32i8:
; AVX512BW-NEXT: vpaddb %ymm1, %ymm2, %ymm1
; AVX512BW-NEXT: vpsrlw $2, %ymm1, %ymm1
; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
-; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1
-; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm1, %zmm1
-; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
-; AVX512BW-NEXT: vpsubb %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpsllw $3, %ymm1, %ymm2
+; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512BW-NEXT: vpsubb %ymm2, %ymm1, %ymm1
+; AVX512BW-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: retq
%res = urem <32 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7>
ret <32 x i8> %res
; AVX512F-LABEL: test_rem7_64i8:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37]
-; AVX512F-NEXT: vpmullw %ymm2, %ymm3, %ymm3
-; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3
+; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37]
+; AVX512F-NEXT: vpmullw %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2
; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX512F-NEXT: vpmullw %ymm2, %ymm4, %ymm4
+; AVX512F-NEXT: vpmullw %ymm3, %ymm4, %ymm4
; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4
-; AVX512F-NEXT: vpackuswb %ymm3, %ymm4, %ymm3
-; AVX512F-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,1,3]
-; AVX512F-NEXT: vpsubb %ymm3, %ymm0, %ymm4
+; AVX512F-NEXT: vpackuswb %ymm2, %ymm4, %ymm2
+; AVX512F-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3]
+; AVX512F-NEXT: vpsubb %ymm2, %ymm0, %ymm4
; AVX512F-NEXT: vpsrlw $1, %ymm4, %ymm4
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4
-; AVX512F-NEXT: vpaddb %ymm3, %ymm4, %ymm3
-; AVX512F-NEXT: vpsrlw $2, %ymm3, %ymm3
+; AVX512F-NEXT: vpaddb %ymm2, %ymm4, %ymm2
+; AVX512F-NEXT: vpsrlw $2, %ymm2, %ymm2
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
-; AVX512F-NEXT: vpand %ymm4, %ymm3, %ymm6
-; AVX512F-NEXT: vpmovsxbw %xmm6, %ymm7
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; AVX512F-NEXT: vpmullw %ymm3, %ymm7, %ymm7
-; AVX512F-NEXT: vpmovsxwd %ymm7, %zmm7
-; AVX512F-NEXT: vpmovdb %zmm7, %xmm7
-; AVX512F-NEXT: vextracti128 $1, %ymm6, %xmm6
-; AVX512F-NEXT: vpmovsxbw %xmm6, %ymm6
-; AVX512F-NEXT: vpmullw %ymm3, %ymm6, %ymm6
-; AVX512F-NEXT: vpmovsxwd %ymm6, %zmm6
-; AVX512F-NEXT: vpmovdb %zmm6, %xmm6
-; AVX512F-NEXT: vinserti128 $1, %xmm6, %ymm7, %ymm6
-; AVX512F-NEXT: vpsubb %ymm6, %ymm0, %ymm0
-; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm6
-; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero,xmm6[8],zero,xmm6[9],zero,xmm6[10],zero,xmm6[11],zero,xmm6[12],zero,xmm6[13],zero,xmm6[14],zero,xmm6[15],zero
-; AVX512F-NEXT: vpmullw %ymm2, %ymm6, %ymm6
-; AVX512F-NEXT: vpsrlw $8, %ymm6, %ymm6
-; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm7 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX512F-NEXT: vpmullw %ymm2, %ymm7, %ymm2
+; AVX512F-NEXT: vpand %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT: vpsllw $3, %ymm2, %ymm6
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248]
+; AVX512F-NEXT: vpand %ymm7, %ymm6, %ymm6
+; AVX512F-NEXT: vpsubb %ymm6, %ymm2, %ymm2
+; AVX512F-NEXT: vpaddb %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; AVX512F-NEXT: vpmullw %ymm3, %ymm2, %ymm2
; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2
-; AVX512F-NEXT: vpackuswb %ymm6, %ymm2, %ymm2
+; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm6 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512F-NEXT: vpmullw %ymm3, %ymm6, %ymm3
+; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3
+; AVX512F-NEXT: vpackuswb %ymm2, %ymm3, %ymm2
; AVX512F-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3]
-; AVX512F-NEXT: vpsubb %ymm2, %ymm1, %ymm6
-; AVX512F-NEXT: vpsrlw $1, %ymm6, %ymm6
-; AVX512F-NEXT: vpand %ymm5, %ymm6, %ymm5
-; AVX512F-NEXT: vpaddb %ymm2, %ymm5, %ymm2
+; AVX512F-NEXT: vpsubb %ymm2, %ymm1, %ymm3
+; AVX512F-NEXT: vpsrlw $1, %ymm3, %ymm3
+; AVX512F-NEXT: vpand %ymm5, %ymm3, %ymm3
+; AVX512F-NEXT: vpaddb %ymm2, %ymm3, %ymm2
; AVX512F-NEXT: vpsrlw $2, %ymm2, %ymm2
; AVX512F-NEXT: vpand %ymm4, %ymm2, %ymm2
-; AVX512F-NEXT: vpmovsxbw %xmm2, %ymm4
-; AVX512F-NEXT: vpmullw %ymm3, %ymm4, %ymm4
-; AVX512F-NEXT: vpmovsxwd %ymm4, %zmm4
-; AVX512F-NEXT: vpmovdb %zmm4, %xmm4
-; AVX512F-NEXT: vextracti128 $1, %ymm2, %xmm2
-; AVX512F-NEXT: vpmovsxbw %xmm2, %ymm2
-; AVX512F-NEXT: vpmullw %ymm3, %ymm2, %ymm2
-; AVX512F-NEXT: vpmovsxwd %ymm2, %zmm2
-; AVX512F-NEXT: vpmovdb %zmm2, %xmm2
-; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2
-; AVX512F-NEXT: vpsubb %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT: vpsllw $3, %ymm2, %ymm3
+; AVX512F-NEXT: vpand %ymm7, %ymm3, %ymm3
+; AVX512F-NEXT: vpsubb %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT: vpaddb %ymm2, %ymm1, %ymm1
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: test_rem7_64i8:
; AVX512BW-NEXT: vpaddb %zmm1, %zmm2, %zmm1
; AVX512BW-NEXT: vpsrlw $2, %zmm1, %zmm1
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1
-; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm2
-; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
-; AVX512BW-NEXT: vpmullw %zmm3, %zmm2, %zmm2
-; AVX512BW-NEXT: vpmovwb %zmm2, %ymm2
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm1, %ymm1
-; AVX512BW-NEXT: vpmovsxbw %ymm1, %zmm1
-; AVX512BW-NEXT: vpmullw %zmm3, %zmm1, %zmm1
-; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512BW-NEXT: vpsubb %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsllw $3, %zmm1, %zmm2
+; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm2, %zmm2
+; AVX512BW-NEXT: vpsubb %zmm2, %zmm1, %zmm1
+; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: retq
%res = urem <64 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7>
ret <64 x i8> %res
define <2 x i64> @mul_v2i64_17(<2 x i64> %a0) nounwind {
; X86-LABEL: mul_v2i64_17:
; X86: # %bb.0:
-; X86-NEXT: movdqa {{.*#+}} xmm1 = [17,0,17,0]
-; X86-NEXT: movdqa %xmm0, %xmm2
-; X86-NEXT: pmuludq %xmm1, %xmm2
-; X86-NEXT: psrlq $32, %xmm0
-; X86-NEXT: pmuludq %xmm1, %xmm0
-; X86-NEXT: psllq $32, %xmm0
-; X86-NEXT: paddq %xmm2, %xmm0
+; X86-NEXT: movdqa %xmm0, %xmm1
+; X86-NEXT: psllq $4, %xmm1
+; X86-NEXT: paddq %xmm0, %xmm1
+; X86-NEXT: movdqa %xmm1, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_17:
; X64: # %bb.0:
-; X64-NEXT: movdqa {{.*#+}} xmm1 = [17,17]
-; X64-NEXT: movdqa %xmm0, %xmm2
-; X64-NEXT: pmuludq %xmm1, %xmm2
-; X64-NEXT: psrlq $32, %xmm0
-; X64-NEXT: pmuludq %xmm1, %xmm0
-; X64-NEXT: psllq $32, %xmm0
-; X64-NEXT: paddq %xmm2, %xmm0
+; X64-NEXT: movdqa %xmm0, %xmm1
+; X64-NEXT: psllq $4, %xmm1
+; X64-NEXT: paddq %xmm0, %xmm1
+; X64-NEXT: movdqa %xmm1, %xmm0
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v2i64_17:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [17,17]
-; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
-; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0
-; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0
-; X64-AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0
+; X64-AVX-NEXT: vpsllq $4, %xmm0, %xmm1
+; X64-AVX-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; X64-AVX-NEXT: retq
%1 = mul <2 x i64> %a0, <i64 17, i64 17>
ret <2 x i64> %1
define <16 x i8> @mul_v16i8_17(<16 x i8> %a0) nounwind {
; X86-LABEL: mul_v16i8_17:
; X86: # %bb.0:
-; X86-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; X86-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; X86-NEXT: movdqa {{.*#+}} xmm2 = [17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17]
-; X86-NEXT: pmullw %xmm2, %xmm0
-; X86-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
-; X86-NEXT: pand %xmm3, %xmm0
-; X86-NEXT: pmullw %xmm2, %xmm1
-; X86-NEXT: pand %xmm3, %xmm1
-; X86-NEXT: packuswb %xmm0, %xmm1
+; X86-NEXT: movdqa %xmm0, %xmm1
+; X86-NEXT: psllw $4, %xmm1
+; X86-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-NEXT: paddb %xmm0, %xmm1
; X86-NEXT: movdqa %xmm1, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v16i8_17:
; X64: # %bb.0:
-; X64-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; X64-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; X64-NEXT: movdqa {{.*#+}} xmm2 = [17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17]
-; X64-NEXT: pmullw %xmm2, %xmm0
-; X64-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
-; X64-NEXT: pand %xmm3, %xmm0
-; X64-NEXT: pmullw %xmm2, %xmm1
-; X64-NEXT: pand %xmm3, %xmm1
-; X64-NEXT: packuswb %xmm0, %xmm1
+; X64-NEXT: movdqa %xmm0, %xmm1
+; X64-NEXT: psllw $4, %xmm1
+; X64-NEXT: pand {{.*}}(%rip), %xmm1
+; X64-NEXT: paddb %xmm0, %xmm1
; X64-NEXT: movdqa %xmm1, %xmm0
; X64-NEXT: retq
;
; X64-XOP-LABEL: mul_v16i8_17:
; X64-XOP: # %bb.0:
-; X64-XOP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm2 = [17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17]
-; X64-XOP-NEXT: vpmullw %xmm2, %xmm1, %xmm1
-; X64-XOP-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; X64-XOP-NEXT: vpmullw %xmm2, %xmm0, %xmm0
-; X64-XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],xmm1[0,2,4,6,8,10,12,14]
+; X64-XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm1
+; X64-XOP-NEXT: vpaddb %xmm0, %xmm1, %xmm0
; X64-XOP-NEXT: retq
;
; X64-AVX2-LABEL: mul_v16i8_17:
; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
-; X64-AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
-; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; X64-AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; X64-AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; X64-AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X64-AVX2-NEXT: vzeroupper
+; X64-AVX2-NEXT: vpsllw $4, %xmm0, %xmm1
+; X64-AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; X64-AVX2-NEXT: vpaddb %xmm0, %xmm1, %xmm0
; X64-AVX2-NEXT: retq
%1 = mul <16 x i8> %a0, <i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17, i8 17>
ret <16 x i8> %1
define <2 x i64> @mul_v2i64_7(<2 x i64> %a0) nounwind {
; X86-LABEL: mul_v2i64_7:
; X86: # %bb.0:
-; X86-NEXT: movdqa {{.*#+}} xmm1 = [7,0,7,0]
-; X86-NEXT: movdqa %xmm0, %xmm2
-; X86-NEXT: pmuludq %xmm1, %xmm2
-; X86-NEXT: psrlq $32, %xmm0
-; X86-NEXT: pmuludq %xmm1, %xmm0
-; X86-NEXT: psllq $32, %xmm0
-; X86-NEXT: paddq %xmm2, %xmm0
+; X86-NEXT: movdqa %xmm0, %xmm1
+; X86-NEXT: psllq $3, %xmm1
+; X86-NEXT: psubq %xmm0, %xmm1
+; X86-NEXT: movdqa %xmm1, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_7:
; X64: # %bb.0:
-; X64-NEXT: movdqa {{.*#+}} xmm1 = [7,7]
-; X64-NEXT: movdqa %xmm0, %xmm2
-; X64-NEXT: pmuludq %xmm1, %xmm2
-; X64-NEXT: psrlq $32, %xmm0
-; X64-NEXT: pmuludq %xmm1, %xmm0
-; X64-NEXT: psllq $32, %xmm0
-; X64-NEXT: paddq %xmm2, %xmm0
+; X64-NEXT: movdqa %xmm0, %xmm1
+; X64-NEXT: psllq $3, %xmm1
+; X64-NEXT: psubq %xmm0, %xmm1
+; X64-NEXT: movdqa %xmm1, %xmm0
; X64-NEXT: retq
;
; X64-AVX-LABEL: mul_v2i64_7:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [7,7]
-; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2
-; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0
-; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
-; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0
-; X64-AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0
+; X64-AVX-NEXT: vpsllq $3, %xmm0, %xmm1
+; X64-AVX-NEXT: vpsubq %xmm0, %xmm1, %xmm0
; X64-AVX-NEXT: retq
%1 = mul <2 x i64> %a0, <i64 7, i64 7>
ret <2 x i64> %1
define <16 x i8> @mul_v16i8_31(<16 x i8> %a0) nounwind {
; X86-LABEL: mul_v16i8_31:
; X86: # %bb.0:
-; X86-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; X86-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; X86-NEXT: movdqa {{.*#+}} xmm2 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
-; X86-NEXT: pmullw %xmm2, %xmm0
-; X86-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
-; X86-NEXT: pand %xmm3, %xmm0
-; X86-NEXT: pmullw %xmm2, %xmm1
-; X86-NEXT: pand %xmm3, %xmm1
-; X86-NEXT: packuswb %xmm0, %xmm1
+; X86-NEXT: movdqa %xmm0, %xmm1
+; X86-NEXT: psllw $5, %xmm1
+; X86-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-NEXT: psubb %xmm0, %xmm1
; X86-NEXT: movdqa %xmm1, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v16i8_31:
; X64: # %bb.0:
-; X64-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; X64-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; X64-NEXT: movdqa {{.*#+}} xmm2 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
-; X64-NEXT: pmullw %xmm2, %xmm0
-; X64-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
-; X64-NEXT: pand %xmm3, %xmm0
-; X64-NEXT: pmullw %xmm2, %xmm1
-; X64-NEXT: pand %xmm3, %xmm1
-; X64-NEXT: packuswb %xmm0, %xmm1
+; X64-NEXT: movdqa %xmm0, %xmm1
+; X64-NEXT: psllw $5, %xmm1
+; X64-NEXT: pand {{.*}}(%rip), %xmm1
+; X64-NEXT: psubb %xmm0, %xmm1
; X64-NEXT: movdqa %xmm1, %xmm0
; X64-NEXT: retq
;
; X64-XOP-LABEL: mul_v16i8_31:
; X64-XOP: # %bb.0:
-; X64-XOP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm2 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
-; X64-XOP-NEXT: vpmullw %xmm2, %xmm1, %xmm1
-; X64-XOP-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; X64-XOP-NEXT: vpmullw %xmm2, %xmm0, %xmm0
-; X64-XOP-NEXT: vpperm {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],xmm1[0,2,4,6,8,10,12,14]
+; X64-XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm1
+; X64-XOP-NEXT: vpsubb %xmm0, %xmm1, %xmm0
; X64-XOP-NEXT: retq
;
; X64-AVX2-LABEL: mul_v16i8_31:
; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
-; X64-AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
-; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
-; X64-AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1
-; X64-AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0
-; X64-AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X64-AVX2-NEXT: vzeroupper
+; X64-AVX2-NEXT: vpsllw $5, %xmm0, %xmm1
+; X64-AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; X64-AVX2-NEXT: vpsubb %xmm0, %xmm1, %xmm0
; X64-AVX2-NEXT: retq
%1 = mul <16 x i8> %a0, <i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31, i8 31>
ret <16 x i8> %1