If we have BWI, we can truncate in a much simpler way by using vpmovwb. This even works without VLX by using the wider zmm->ymm truncate with a subvector extract.
Differential Revision: https://reviews.llvm.org/D38375
llvm-svn: 314457
SDValue Mul = DAG.getNode(ISD::MUL, dl, MVT::v32i16, ExA, ExB);
Mul = DAG.getNode(ISD::SRL, dl, MVT::v32i16, Mul,
DAG.getConstant(8, dl, MVT::v32i16));
- // The ymm variant of PACKUS treats the 128-bit lanes separately, so
- // before using PACKUS we need to permute the inputs to the correct
- // lo/hi xmm lane.
- const int Mask[] = { 0, 1, 2, 3, 4, 5, 6, 7,
- 16, 17, 18, 19, 20, 21, 22, 23,
- 8, 9, 10, 11, 12, 13, 14, 15,
- 24, 25, 26, 27, 28, 29, 30, 31};
- Mul = DAG.getVectorShuffle(MVT::v32i16, dl, Mul, Mul, Mask);
- Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i16, Mul, Lo);
- Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i16, Mul, Hi);
- return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
+ return DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
}
SDValue ALo = extract128BitVector(A, 0, DAG, dl);
SDValue BLo = extract128BitVector(B, 0, DAG, dl);
SDValue ExA = DAG.getNode(ExAVX, dl, MVT::v16i16, A);
SDValue ExB = DAG.getNode(ExAVX, dl, MVT::v16i16, B);
SDValue Mul = DAG.getNode(ISD::MUL, dl, MVT::v16i16, ExA, ExB);
- SDValue MulH = DAG.getNode(ISD::SRL, dl, MVT::v16i16, Mul,
- DAG.getConstant(8, dl, MVT::v16i16));
- Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i16, MulH, Lo);
- Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i16, MulH, Hi);
+ Mul = DAG.getNode(ISD::SRL, dl, MVT::v16i16, Mul,
+ DAG.getConstant(8, dl, MVT::v16i16));
+ // If we have BWI we can use truncate instruction.
+ if (Subtarget.hasBWI())
+ return DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
+ Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i16, Mul, Lo);
+ Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i16, Mul, Hi);
return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
}
; AVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: test_div7_16i8:
-; AVX2: # BB#0:
-; AVX2-NEXT: vpmovsxbw %xmm0, %ymm1
-; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
-; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpaddb %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vpsrlw $2, %xmm0, %xmm1
-; AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32]
-; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpsubb %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpsrlw $7, %xmm0, %xmm0
-; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT: vpaddb %xmm0, %xmm1, %xmm0
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
+; AVX2NOBW-LABEL: test_div7_16i8:
+; AVX2NOBW: # BB#0:
+; AVX2NOBW-NEXT: vpmovsxbw %xmm0, %ymm1
+; AVX2NOBW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
+; AVX2NOBW-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX2NOBW-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX2NOBW-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
+; AVX2NOBW-NEXT: vpaddb %xmm0, %xmm1, %xmm0
+; AVX2NOBW-NEXT: vpsrlw $2, %xmm0, %xmm1
+; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX2NOBW-NEXT: vmovdqa {{.*#+}} xmm2 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32]
+; AVX2NOBW-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; AVX2NOBW-NEXT: vpsubb %xmm2, %xmm1, %xmm1
+; AVX2NOBW-NEXT: vpsrlw $7, %xmm0, %xmm0
+; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX2NOBW-NEXT: vpaddb %xmm0, %xmm1, %xmm0
+; AVX2NOBW-NEXT: vzeroupper
+; AVX2NOBW-NEXT: retq
+;
+; AVX512BW-LABEL: test_div7_16i8:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm1
+; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
+; AVX512BW-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
+; AVX512BW-NEXT: vpaddb %xmm0, %xmm1, %xmm0
+; AVX512BW-NEXT: vpsrlw $2, %xmm0, %xmm1
+; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32]
+; AVX512BW-NEXT: vpxor %xmm2, %xmm1, %xmm1
+; AVX512BW-NEXT: vpsubb %xmm2, %xmm1, %xmm1
+; AVX512BW-NEXT: vpsrlw $7, %xmm0, %xmm0
+; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512BW-NEXT: vpaddb %xmm0, %xmm1, %xmm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
%res = sdiv <16 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7>
ret <16 x i8> %res
}
; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm1
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
; AVX512BW-NEXT: vpsrlw $8, %ymm1, %ymm1
-; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX512BW-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
+; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
; AVX512BW-NEXT: vpaddb %xmm0, %xmm1, %xmm1
; AVX512BW-NEXT: vpsrlw $2, %xmm1, %xmm2
; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm1
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,4,5,2,3,6,7]
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm1, %ymm2
-; AVX512BW-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
+; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
; AVX512BW-NEXT: vpaddb %ymm0, %ymm1, %ymm0
; AVX512BW-NEXT: vpsrlw $2, %ymm0, %ymm1
; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm1
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,4,5,2,3,6,7]
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm1, %ymm2
-; AVX512BW-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
+; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
; AVX512BW-NEXT: vpaddb %ymm0, %ymm1, %ymm1
; AVX512BW-NEXT: vpsrlw $2, %ymm1, %ymm2
; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: test_div7_16i8:
-; AVX2: # BB#0:
-; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
-; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
-; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpsubb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrlw $1, %xmm0, %xmm0
-; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpsrlw $2, %xmm0, %xmm0
-; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
+; AVX2NOBW-LABEL: test_div7_16i8:
+; AVX2NOBW: # BB#0:
+; AVX2NOBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX2NOBW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
+; AVX2NOBW-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX2NOBW-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX2NOBW-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
+; AVX2NOBW-NEXT: vpsubb %xmm1, %xmm0, %xmm0
+; AVX2NOBW-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX2NOBW-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX2NOBW-NEXT: vpsrlw $2, %xmm0, %xmm0
+; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX2NOBW-NEXT: vzeroupper
+; AVX2NOBW-NEXT: retq
+;
+; AVX512BW-LABEL: test_div7_16i8:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
+; AVX512BW-NEXT: vpsrlw $8, %ymm1, %ymm1
+; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
+; AVX512BW-NEXT: vpsubb %xmm1, %xmm0, %xmm0
+; AVX512BW-NEXT: vpsrlw $1, %xmm0, %xmm0
+; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512BW-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX512BW-NEXT: vpsrlw $2, %xmm0, %xmm0
+; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512BW-NEXT: vzeroupper
+; AVX512BW-NEXT: retq
%res = udiv <16 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7>
ret <16 x i8> %res
}
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1
; AVX512BW-NEXT: vpsrlw $8, %ymm1, %ymm1
-; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX512BW-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
+; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
; AVX512BW-NEXT: vpsubb %xmm1, %xmm0, %xmm2
; AVX512BW-NEXT: vpsrlw $1, %xmm2, %xmm2
; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,4,5,2,3,6,7]
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm1, %ymm2
-; AVX512BW-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
+; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
; AVX512BW-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpsrlw $1, %ymm0, %ymm0
; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1
-; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,4,5,2,3,6,7]
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm1, %ymm2
-; AVX512BW-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
+; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
; AVX512BW-NEXT: vpsubb %ymm1, %ymm0, %ymm2
; AVX512BW-NEXT: vpsrlw $1, %ymm2, %ymm2
; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2