From 83e1a1e79b51f54700b304a230d10df0b5c8d8e6 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Thu, 27 Jun 2019 14:25:54 +0000 Subject: [PATCH] [TargetLowering] SimplifyDemandedVectorElts - add shift/rotate support. llvm-svn: 364548 --- llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp | 18 + llvm/test/CodeGen/X86/combine-sdiv.ll | 478 +++++++++++------------ llvm/test/CodeGen/X86/combine-udiv.ll | 46 +-- llvm/test/CodeGen/X86/known-signbits-vector.ll | 48 ++- llvm/test/CodeGen/X86/urem-seteq-vec-nonsplat.ll | 5 +- llvm/test/CodeGen/X86/vector-fshl-128.ll | 55 ++- llvm/test/CodeGen/X86/vector-fshl-256.ll | 50 +-- llvm/test/CodeGen/X86/vector-fshl-512.ll | 12 +- llvm/test/CodeGen/X86/vector-fshr-128.ll | 170 ++++---- llvm/test/CodeGen/X86/vector-fshr-256.ll | 45 +-- llvm/test/CodeGen/X86/vector-fshr-512.ll | 20 +- 11 files changed, 472 insertions(+), 475 deletions(-) diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp index 50cd8cd..0acac2b 100644 --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -2232,6 +2232,24 @@ bool TargetLowering::SimplifyDemandedVectorElts( KnownUndef = getKnownUndefForVectorBinop(Op, TLO.DAG, UndefLHS, UndefRHS); break; } + case ISD::SHL: + case ISD::SRL: + case ISD::SRA: + case ISD::ROTL: + case ISD::ROTR: { + APInt UndefRHS, ZeroRHS; + if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedElts, UndefRHS, + ZeroRHS, TLO, Depth + 1)) + return true; + APInt UndefLHS, ZeroLHS; + if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, UndefLHS, + ZeroLHS, TLO, Depth + 1)) + return true; + + KnownZero = ZeroLHS; + KnownUndef = UndefLHS & UndefRHS; // TODO: use getKnownUndefForVectorBinop? + break; + } case ISD::MUL: case ISD::AND: { APInt SrcUndef, SrcZero; diff --git a/llvm/test/CodeGen/X86/combine-sdiv.ll b/llvm/test/CodeGen/X86/combine-sdiv.ll index 3d785692..39ce0c9 100644 --- a/llvm/test/CodeGen/X86/combine-sdiv.ll +++ b/llvm/test/CodeGen/X86/combine-sdiv.ll @@ -295,7 +295,7 @@ define <16 x i8> @combine_vec_sdiv_by_pow2b_v16i8(<16 x i8> %x) { ; SSE2-NEXT: pcmpgtb %xmm0, %xmm2 ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15] -; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [1,4,2,16,8,32,64,2] +; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [256,4,2,16,8,32,64,2] ; SSE2-NEXT: pmullw %xmm4, %xmm3 ; SSE2-NEXT: psrlw $8, %xmm3 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] @@ -329,7 +329,7 @@ define <16 x i8> @combine_vec_sdiv_by_pow2b_v16i8(<16 x i8> %x) { ; SSE41-NEXT: pcmpgtb %xmm1, %xmm3 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero ; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] -; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [1,4,2,16,8,32,64,2] +; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [256,4,2,16,8,32,64,2] ; SSE41-NEXT: pmullw %xmm0, %xmm3 ; SSE41-NEXT: psrlw $8, %xmm3 ; SSE41-NEXT: pmullw %xmm0, %xmm2 @@ -357,7 +357,7 @@ define <16 x i8> @combine_vec_sdiv_by_pow2b_v16i8(<16 x i8> %x) { ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm2 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,4,2,16,8,32,64,2] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [256,4,2,16,8,32,64,2] ; AVX1-NEXT: vpmullw %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1 ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero @@ -553,7 +553,7 @@ define <16 x i16> @combine_vec_sdiv_by_pow2b_v16i16(<16 x i16> %x) { ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm0, %xmm3 ; SSE2-NEXT: psraw $15, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [1,4,2,16,8,32,64,2] +; SSE2-NEXT: movdqa {{.*#+}} xmm8 = ; SSE2-NEXT: pmulhuw %xmm8, %xmm0 ; SSE2-NEXT: paddw %xmm3, %xmm0 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,0,65535,0,0,65535] @@ -611,7 +611,7 @@ define <16 x i16> @combine_vec_sdiv_by_pow2b_v16i16(<16 x i16> %x) { ; SSE41: # %bb.0: ; SSE41-NEXT: movdqa %xmm0, %xmm2 ; SSE41-NEXT: psraw $15, %xmm2 -; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [1,4,2,16,8,32,64,2] +; SSE41-NEXT: movdqa {{.*#+}} xmm4 = ; SSE41-NEXT: pmulhuw %xmm4, %xmm2 ; SSE41-NEXT: paddw %xmm0, %xmm2 ; SSE41-NEXT: movdqa {{.*#+}} xmm5 = @@ -636,19 +636,17 @@ define <16 x i16> @combine_vec_sdiv_by_pow2b_v16i16(<16 x i16> %x) { ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpsraw $15, %xmm1, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,4,2,16,8,32,64,2] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = ; AVX1-NEXT: vpmulhuw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpaddw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = ; AVX1-NEXT: vpmulhw %xmm2, %xmm1, %xmm4 -; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm1[0],xmm4[1,2,3,4,5,6,7] ; AVX1-NEXT: vpsraw $1, %xmm1, %xmm1 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm4[0,1],xmm1[2],xmm4[3,4,5,6],xmm1[7] ; AVX1-NEXT: vpsraw $15, %xmm0, %xmm4 ; AVX1-NEXT: vpmulhuw %xmm3, %xmm4, %xmm3 ; AVX1-NEXT: vpaddw %xmm3, %xmm0, %xmm3 ; AVX1-NEXT: vpmulhw %xmm2, %xmm3, %xmm2 -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1,2,3,4,5,6,7] ; AVX1-NEXT: vpsraw $1, %xmm3, %xmm3 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3,4,5,6],xmm3[7] ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 @@ -694,10 +692,10 @@ define <16 x i16> @combine_vec_sdiv_by_pow2b_v16i16(<16 x i16> %x) { ; XOP: # %bb.0: ; XOP-NEXT: vextractf128 $1, %ymm0, %xmm1 ; XOP-NEXT: vpsraw $15, %xmm1, %xmm2 -; XOP-NEXT: vmovdqa {{.*#+}} xmm3 = [65520,65522,65521,65524,65523,65525,65526,65521] +; XOP-NEXT: vmovdqa {{.*#+}} xmm3 = ; XOP-NEXT: vpshlw %xmm3, %xmm2, %xmm2 ; XOP-NEXT: vpaddw %xmm2, %xmm1, %xmm1 -; XOP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,65534,65535,65532,65533,65531,65530,65535] +; XOP-NEXT: vmovdqa {{.*#+}} xmm2 = ; XOP-NEXT: vpshaw %xmm2, %xmm1, %xmm1 ; XOP-NEXT: vpsraw $15, %xmm0, %xmm4 ; XOP-NEXT: vpshlw %xmm3, %xmm4, %xmm3 @@ -718,7 +716,7 @@ define <32 x i16> @combine_vec_sdiv_by_pow2b_v32i16(<32 x i16> %x) { ; SSE2-NEXT: movdqa %xmm1, %xmm8 ; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: psraw $15, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [1,4,2,16,8,32,64,2] +; SSE2-NEXT: movdqa {{.*#+}} xmm9 = ; SSE2-NEXT: pmulhuw %xmm9, %xmm0 ; SSE2-NEXT: paddw %xmm1, %xmm0 ; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [65535,65535,65535,0,65535,0,0,65535] @@ -830,7 +828,7 @@ define <32 x i16> @combine_vec_sdiv_by_pow2b_v32i16(<32 x i16> %x) { ; SSE41-NEXT: movdqa %xmm1, %xmm4 ; SSE41-NEXT: movdqa %xmm0, %xmm1 ; SSE41-NEXT: psraw $15, %xmm0 -; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [1,4,2,16,8,32,64,2] +; SSE41-NEXT: movdqa {{.*#+}} xmm7 = ; SSE41-NEXT: pmulhuw %xmm7, %xmm0 ; SSE41-NEXT: paddw %xmm1, %xmm0 ; SSE41-NEXT: movdqa {{.*#+}} xmm6 = @@ -873,19 +871,17 @@ define <32 x i16> @combine_vec_sdiv_by_pow2b_v32i16(<32 x i16> %x) { ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpsraw $15, %xmm2, %xmm3 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [1,4,2,16,8,32,64,2] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = ; AVX1-NEXT: vpmulhuw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vpaddw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = ; AVX1-NEXT: vpmulhw %xmm3, %xmm2, %xmm5 -; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm2[0],xmm5[1,2,3,4,5,6,7] ; AVX1-NEXT: vpsraw $1, %xmm2, %xmm2 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm5[0,1],xmm2[2],xmm5[3,4,5,6],xmm2[7] ; AVX1-NEXT: vpsraw $15, %xmm0, %xmm5 ; AVX1-NEXT: vpmulhuw %xmm4, %xmm5, %xmm5 ; AVX1-NEXT: vpaddw %xmm5, %xmm0, %xmm5 ; AVX1-NEXT: vpmulhw %xmm3, %xmm5, %xmm6 -; AVX1-NEXT: vpblendw {{.*#+}} xmm6 = xmm5[0],xmm6[1,2,3,4,5,6,7] ; AVX1-NEXT: vpsraw $1, %xmm5, %xmm5 ; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1],xmm5[2],xmm6[3,4,5,6],xmm5[7] ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm5, %ymm2 @@ -899,14 +895,12 @@ define <32 x i16> @combine_vec_sdiv_by_pow2b_v32i16(<32 x i16> %x) { ; AVX1-NEXT: vpmulhuw %xmm4, %xmm6, %xmm6 ; AVX1-NEXT: vpaddw %xmm6, %xmm2, %xmm2 ; AVX1-NEXT: vpmulhw %xmm3, %xmm2, %xmm6 -; AVX1-NEXT: vpblendw {{.*#+}} xmm6 = xmm2[0],xmm6[1,2,3,4,5,6,7] ; AVX1-NEXT: vpsraw $1, %xmm2, %xmm2 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm6[0,1],xmm2[2],xmm6[3,4,5,6],xmm2[7] ; AVX1-NEXT: vpsraw $15, %xmm1, %xmm6 ; AVX1-NEXT: vpmulhuw %xmm4, %xmm6, %xmm4 ; AVX1-NEXT: vpaddw %xmm4, %xmm1, %xmm4 ; AVX1-NEXT: vpmulhw %xmm3, %xmm4, %xmm3 -; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1,2,3,4,5,6,7] ; AVX1-NEXT: vpsraw $1, %xmm4, %xmm4 ; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2],xmm3[3,4,5,6],xmm4[7] ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2 @@ -918,7 +912,7 @@ define <32 x i16> @combine_vec_sdiv_by_pow2b_v32i16(<32 x i16> %x) { ; AVX2-LABEL: combine_vec_sdiv_by_pow2b_v32i16: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsraw $15, %ymm0, %ymm2 -; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [1,4,2,16,8,32,64,2,1,4,2,16,8,32,64,2] +; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,4,2,16,8,32,64,2,0,4,2,16,8,32,64,2] ; AVX2-NEXT: # ymm3 = mem[0,1,0,1] ; AVX2-NEXT: vpmulhuw %ymm3, %ymm2, %ymm2 ; AVX2-NEXT: vpaddw %ymm2, %ymm0, %ymm2 @@ -940,7 +934,7 @@ define <32 x i16> @combine_vec_sdiv_by_pow2b_v32i16(<32 x i16> %x) { ; AVX512F-LABEL: combine_vec_sdiv_by_pow2b_v32i16: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpsraw $15, %ymm0, %ymm2 -; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [1,4,2,16,8,32,64,2,1,4,2,16,8,32,64,2] +; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,4,2,16,8,32,64,2,0,4,2,16,8,32,64,2] ; AVX512F-NEXT: # ymm3 = mem[0,1,0,1] ; AVX512F-NEXT: vpmulhuw %ymm3, %ymm2, %ymm2 ; AVX512F-NEXT: vpaddw %ymm2, %ymm0, %ymm2 @@ -975,10 +969,10 @@ define <32 x i16> @combine_vec_sdiv_by_pow2b_v32i16(<32 x i16> %x) { ; XOP: # %bb.0: ; XOP-NEXT: vextractf128 $1, %ymm0, %xmm2 ; XOP-NEXT: vpsraw $15, %xmm2, %xmm3 -; XOP-NEXT: vmovdqa {{.*#+}} xmm4 = [65520,65522,65521,65524,65523,65525,65526,65521] +; XOP-NEXT: vmovdqa {{.*#+}} xmm4 = ; XOP-NEXT: vpshlw %xmm4, %xmm3, %xmm3 ; XOP-NEXT: vpaddw %xmm3, %xmm2, %xmm2 -; XOP-NEXT: vmovdqa {{.*#+}} xmm3 = [0,65534,65535,65532,65533,65531,65530,65535] +; XOP-NEXT: vmovdqa {{.*#+}} xmm3 = ; XOP-NEXT: vpshaw %xmm3, %xmm2, %xmm2 ; XOP-NEXT: vpsraw $15, %xmm0, %xmm5 ; XOP-NEXT: vpshlw %xmm4, %xmm5, %xmm5 @@ -1185,8 +1179,7 @@ define <8 x i32> @combine_vec_sdiv_by_pow2b_v8i32(<8 x i32> %x) { ; AVX1-NEXT: vpsrad $4, %xmm1, %xmm2 ; AVX1-NEXT: vpsrad $2, %xmm1, %xmm3 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7] -; AVX1-NEXT: vpsrad $3, %xmm1, %xmm3 -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7] +; AVX1-NEXT: vpsrad $3, %xmm1, %xmm1 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] ; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2 ; AVX1-NEXT: vpsrld $28, %xmm2, %xmm3 @@ -1217,10 +1210,10 @@ define <8 x i32> @combine_vec_sdiv_by_pow2b_v8i32(<8 x i32> %x) { ; XOP: # %bb.0: ; XOP-NEXT: vextractf128 $1, %ymm0, %xmm1 ; XOP-NEXT: vpsrad $31, %xmm1, %xmm2 -; XOP-NEXT: vmovdqa {{.*#+}} xmm3 = [4294967264,4294967266,4294967267,4294967268] +; XOP-NEXT: vmovdqa {{.*#+}} xmm3 = ; XOP-NEXT: vpshld %xmm3, %xmm2, %xmm2 ; XOP-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; XOP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,4294967294,4294967293,4294967292] +; XOP-NEXT: vmovdqa {{.*#+}} xmm2 = ; XOP-NEXT: vpshad %xmm2, %xmm1, %xmm1 ; XOP-NEXT: vpsrad $31, %xmm0, %xmm4 ; XOP-NEXT: vpshld %xmm3, %xmm4, %xmm3 @@ -1405,8 +1398,7 @@ define <16 x i32> @combine_vec_sdiv_by_pow2b_v16i32(<16 x i32> %x) { ; AVX1-NEXT: vpsrad $4, %xmm2, %xmm3 ; AVX1-NEXT: vpsrad $2, %xmm2, %xmm4 ; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7] -; AVX1-NEXT: vpsrad $3, %xmm2, %xmm4 -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4,5,6,7] +; AVX1-NEXT: vpsrad $3, %xmm2, %xmm2 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7] ; AVX1-NEXT: vpsrad $31, %xmm0, %xmm3 ; AVX1-NEXT: vpsrld $28, %xmm3, %xmm4 @@ -1433,8 +1425,7 @@ define <16 x i32> @combine_vec_sdiv_by_pow2b_v16i32(<16 x i32> %x) { ; AVX1-NEXT: vpsrad $4, %xmm2, %xmm3 ; AVX1-NEXT: vpsrad $2, %xmm2, %xmm4 ; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7] -; AVX1-NEXT: vpsrad $3, %xmm2, %xmm4 -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4,5,6,7] +; AVX1-NEXT: vpsrad $3, %xmm2, %xmm2 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7] ; AVX1-NEXT: vpsrad $31, %xmm1, %xmm3 ; AVX1-NEXT: vpsrld $28, %xmm3, %xmm4 @@ -1455,7 +1446,7 @@ define <16 x i32> @combine_vec_sdiv_by_pow2b_v16i32(<16 x i32> %x) { ; AVX2-LABEL: combine_vec_sdiv_by_pow2b_v16i32: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsrad $31, %ymm0, %ymm2 -; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [32,30,29,28,32,30,29,28] +; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,30,29,28,0,30,29,28] ; AVX2-NEXT: # ymm3 = mem[0,1,0,1] ; AVX2-NEXT: vpsrlvd %ymm3, %ymm2, %ymm2 ; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm2 @@ -1498,10 +1489,10 @@ define <16 x i32> @combine_vec_sdiv_by_pow2b_v16i32(<16 x i32> %x) { ; XOP: # %bb.0: ; XOP-NEXT: vextractf128 $1, %ymm0, %xmm2 ; XOP-NEXT: vpsrad $31, %xmm2, %xmm3 -; XOP-NEXT: vmovdqa {{.*#+}} xmm4 = [4294967264,4294967266,4294967267,4294967268] +; XOP-NEXT: vmovdqa {{.*#+}} xmm4 = ; XOP-NEXT: vpshld %xmm4, %xmm3, %xmm3 ; XOP-NEXT: vpaddd %xmm3, %xmm2, %xmm2 -; XOP-NEXT: vmovdqa {{.*#+}} xmm3 = [0,4294967294,4294967293,4294967292] +; XOP-NEXT: vmovdqa {{.*#+}} xmm3 = ; XOP-NEXT: vpshad %xmm3, %xmm2, %xmm2 ; XOP-NEXT: vpsrad $31, %xmm0, %xmm5 ; XOP-NEXT: vpshld %xmm4, %xmm5, %xmm5 @@ -1532,10 +1523,12 @@ define <2 x i64> @combine_vec_sdiv_by_pow2b_v2i64(<2 x i64> %x) { ; SSE2-NEXT: psrad $31, %xmm1 ; SSE2-NEXT: psrlq $62, %xmm1 ; SSE2-NEXT: paddq %xmm0, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: psrad $2, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3] ; SSE2-NEXT: psrlq $2, %xmm1 -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,2305843009213693952] -; SSE2-NEXT: pxor %xmm2, %xmm1 -; SSE2-NEXT: psubq %xmm2, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] ; SSE2-NEXT: movapd %xmm1, %xmm0 ; SSE2-NEXT: retq @@ -1546,10 +1539,10 @@ define <2 x i64> @combine_vec_sdiv_by_pow2b_v2i64(<2 x i64> %x) { ; SSE41-NEXT: psrad $31, %xmm1 ; SSE41-NEXT: psrlq $62, %xmm1 ; SSE41-NEXT: paddq %xmm0, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm2 +; SSE41-NEXT: psrad $2, %xmm2 ; SSE41-NEXT: psrlq $2, %xmm1 -; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,2305843009213693952] -; SSE41-NEXT: pxor %xmm2, %xmm1 -; SSE41-NEXT: psubq %xmm2, %xmm1 +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] ; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7] ; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: retq @@ -1560,10 +1553,9 @@ define <2 x i64> @combine_vec_sdiv_by_pow2b_v2i64(<2 x i64> %x) { ; AVX1-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm1 ; AVX1-NEXT: vpsrlq $62, %xmm1, %xmm1 ; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm1 +; AVX1-NEXT: vpsrad $2, %xmm1, %xmm2 ; AVX1-NEXT: vpsrlq $2, %xmm1, %xmm1 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,2305843009213693952] -; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm1 -; AVX1-NEXT: vpsubq %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] ; AVX1-NEXT: retq ; @@ -1571,23 +1563,21 @@ define <2 x i64> @combine_vec_sdiv_by_pow2b_v2i64(<2 x i64> %x) { ; AVX2: # %bb.0: ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm1 -; AVX2-NEXT: vpsrlvq {{.*}}(%rip), %xmm1, %xmm1 +; AVX2-NEXT: vpsrlq $62, %xmm1, %xmm1 ; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm1 -; AVX2-NEXT: vpsrlvq {{.*}}(%rip), %xmm1, %xmm1 -; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,2305843009213693952] -; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1 -; AVX2-NEXT: vpsubq %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpsrad $2, %xmm1, %xmm2 +; AVX2-NEXT: vpsrlq $2, %xmm1, %xmm1 +; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] ; AVX2-NEXT: retq ; ; AVX512F-LABEL: combine_vec_sdiv_by_pow2b_v2i64: ; AVX512F: # %bb.0: ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 -; AVX512F-NEXT: vmovdqa {{.*#+}} xmm1 = [0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0] -; AVX512F-NEXT: vpsraq $63, %zmm0, %zmm2 -; AVX512F-NEXT: vpsrlvq {{.*}}(%rip), %xmm2, %xmm2 -; AVX512F-NEXT: vpaddq %xmm2, %xmm0, %xmm2 -; AVX512F-NEXT: vpsravq %zmm1, %zmm2, %zmm1 +; AVX512F-NEXT: vpsraq $63, %zmm0, %zmm1 +; AVX512F-NEXT: vpsrlq $62, %xmm1, %xmm1 +; AVX512F-NEXT: vpaddq %xmm1, %xmm0, %xmm1 +; AVX512F-NEXT: vpsraq $2, %zmm1, %zmm1 ; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq @@ -1595,16 +1585,16 @@ define <2 x i64> @combine_vec_sdiv_by_pow2b_v2i64(<2 x i64> %x) { ; AVX512BW-LABEL: combine_vec_sdiv_by_pow2b_v2i64: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpsraq $63, %xmm0, %xmm1 -; AVX512BW-NEXT: vpsrlvq {{.*}}(%rip), %xmm1, %xmm1 +; AVX512BW-NEXT: vpsrlq $62, %xmm1, %xmm1 ; AVX512BW-NEXT: vpaddq %xmm1, %xmm0, %xmm1 -; AVX512BW-NEXT: vpsravq {{.*}}(%rip), %xmm1, %xmm1 +; AVX512BW-NEXT: vpsraq $2, %xmm1, %xmm1 ; AVX512BW-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] ; AVX512BW-NEXT: retq ; ; XOP-LABEL: combine_vec_sdiv_by_pow2b_v2i64: ; XOP: # %bb.0: ; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm0, %xmm1 -; XOP-NEXT: vpshlq {{.*}}(%rip), %xmm1, %xmm1 +; XOP-NEXT: vpsrlq $62, %xmm1, %xmm1 ; XOP-NEXT: vpaddq %xmm1, %xmm0, %xmm1 ; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm1, %xmm1 ; XOP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] @@ -1616,60 +1606,63 @@ define <2 x i64> @combine_vec_sdiv_by_pow2b_v2i64(<2 x i64> %x) { define <4 x i64> @combine_vec_sdiv_by_pow2b_v4i64(<4 x i64> %x) { ; SSE2-LABEL: combine_vec_sdiv_by_pow2b_v4i64: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: psrad $31, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; SSE2-NEXT: movdqa %xmm1, %xmm3 -; SSE2-NEXT: psrlq $61, %xmm3 -; SSE2-NEXT: psrlq $60, %xmm1 -; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm3[0],xmm1[1] -; SSE2-NEXT: paddq %xmm2, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: psrlq $3, %xmm2 -; SSE2-NEXT: psrlq $4, %xmm1 -; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] -; SSE2-NEXT: movapd {{.*#+}} xmm2 = [1152921504606846976,576460752303423488] -; SSE2-NEXT: xorpd %xmm2, %xmm1 -; SSE2-NEXT: psubq %xmm2, %xmm1 ; SSE2-NEXT: movdqa %xmm0, %xmm2 ; SSE2-NEXT: psrad $31, %xmm2 ; SSE2-NEXT: psrlq $62, %xmm2 ; SSE2-NEXT: paddq %xmm0, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: psrad $2, %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3] ; SSE2-NEXT: psrlq $2, %xmm2 -; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [9223372036854775808,2305843009213693952] -; SSE2-NEXT: pxor %xmm3, %xmm2 -; SSE2-NEXT: psubq %xmm3, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] ; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1] +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] +; SSE2-NEXT: movdqa %xmm3, %xmm0 +; SSE2-NEXT: psrlq $61, %xmm0 +; SSE2-NEXT: psrlq $60, %xmm3 +; SSE2-NEXT: movsd {{.*#+}} xmm3 = xmm0[0],xmm3[1] +; SSE2-NEXT: paddq %xmm1, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm0 +; SSE2-NEXT: psrlq $3, %xmm0 +; SSE2-NEXT: psrlq $4, %xmm3 +; SSE2-NEXT: movsd {{.*#+}} xmm3 = xmm0[0],xmm3[1] +; SSE2-NEXT: movapd {{.*#+}} xmm0 = [1152921504606846976,576460752303423488] +; SSE2-NEXT: xorpd %xmm0, %xmm3 +; SSE2-NEXT: psubq %xmm0, %xmm3 ; SSE2-NEXT: movapd %xmm2, %xmm0 +; SSE2-NEXT: movdqa %xmm3, %xmm1 ; SSE2-NEXT: retq ; ; SSE41-LABEL: combine_vec_sdiv_by_pow2b_v4i64: ; SSE41: # %bb.0: -; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: psrad $31, %xmm1 -; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: psrlq $60, %xmm3 -; SSE41-NEXT: psrlq $61, %xmm1 -; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7] -; SSE41-NEXT: paddq %xmm2, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: psrlq $4, %xmm2 -; SSE41-NEXT: psrlq $3, %xmm1 -; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] -; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [1152921504606846976,576460752303423488] -; SSE41-NEXT: pxor %xmm2, %xmm1 -; SSE41-NEXT: psubq %xmm2, %xmm1 ; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: psrad $31, %xmm0 +; SSE41-NEXT: psrlq $62, %xmm0 +; SSE41-NEXT: paddq %xmm2, %xmm0 +; SSE41-NEXT: movdqa %xmm0, %xmm3 +; SSE41-NEXT: psrad $2, %xmm3 +; SSE41-NEXT: psrlq $2, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,3],xmm0[4,5],xmm3[6,7] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7] +; SSE41-NEXT: movdqa %xmm1, %xmm2 ; SSE41-NEXT: psrad $31, %xmm2 -; SSE41-NEXT: psrlq $62, %xmm2 -; SSE41-NEXT: paddq %xmm0, %xmm2 -; SSE41-NEXT: psrlq $2, %xmm2 -; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [9223372036854775808,2305843009213693952] -; SSE41-NEXT: pxor %xmm3, %xmm2 -; SSE41-NEXT: psubq %xmm3, %xmm2 -; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5,6,7] -; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] +; SSE41-NEXT: movdqa %xmm2, %xmm3 +; SSE41-NEXT: psrlq $60, %xmm3 +; SSE41-NEXT: psrlq $61, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4,5,6,7] +; SSE41-NEXT: paddq %xmm1, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm1 +; SSE41-NEXT: psrlq $4, %xmm1 +; SSE41-NEXT: psrlq $3, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7] +; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1152921504606846976,576460752303423488] +; SSE41-NEXT: pxor %xmm1, %xmm2 +; SSE41-NEXT: psubq %xmm1, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm1 ; SSE41-NEXT: retq ; ; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v4i64: @@ -1690,10 +1683,9 @@ define <4 x i64> @combine_vec_sdiv_by_pow2b_v4i64(<4 x i64> %x) { ; AVX1-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm2 ; AVX1-NEXT: vpsrlq $62, %xmm2, %xmm2 ; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm2 +; AVX1-NEXT: vpsrad $2, %xmm2, %xmm3 ; AVX1-NEXT: vpsrlq $2, %xmm2, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372036854775808,2305843009213693952] -; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vpsubq %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7] ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7] ; AVX1-NEXT: retq @@ -1705,7 +1697,7 @@ define <4 x i64> @combine_vec_sdiv_by_pow2b_v4i64(<4 x i64> %x) { ; AVX2-NEXT: vpsrlvq {{.*}}(%rip), %ymm1, %ymm1 ; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm1 ; AVX2-NEXT: vpsrlvq {{.*}}(%rip), %ymm1, %ymm1 -; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [9223372036854775808,2305843009213693952,1152921504606846976,576460752303423488] +; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = ; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm1 ; AVX2-NEXT: vpsubq %ymm2, %ymm1, %ymm1 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7] @@ -1714,7 +1706,7 @@ define <4 x i64> @combine_vec_sdiv_by_pow2b_v4i64(<4 x i64> %x) { ; AVX512F-LABEL: combine_vec_sdiv_by_pow2b_v4i64: ; AVX512F: # %bb.0: ; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm1 = [0,2,3,4] +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm1 = ; AVX512F-NEXT: vpsraq $63, %zmm0, %zmm2 ; AVX512F-NEXT: vpsrlvq {{.*}}(%rip), %ymm2, %ymm2 ; AVX512F-NEXT: vpaddq %ymm2, %ymm0, %ymm2 @@ -1735,7 +1727,7 @@ define <4 x i64> @combine_vec_sdiv_by_pow2b_v4i64(<4 x i64> %x) { ; XOP: # %bb.0: ; XOP-NEXT: vmovdqa {{.*#+}} xmm1 = [18446744073709551553,18446744073709551553] ; XOP-NEXT: vpshaq %xmm1, %xmm0, %xmm2 -; XOP-NEXT: vpshlq {{.*}}(%rip), %xmm2, %xmm2 +; XOP-NEXT: vpsrlq $62, %xmm2, %xmm2 ; XOP-NEXT: vpaddq %xmm2, %xmm0, %xmm2 ; XOP-NEXT: vpshaq {{.*}}(%rip), %xmm2, %xmm2 ; XOP-NEXT: vextractf128 $1, %ymm0, %xmm3 @@ -1753,108 +1745,114 @@ define <4 x i64> @combine_vec_sdiv_by_pow2b_v4i64(<4 x i64> %x) { define <8 x i64> @combine_vec_sdiv_by_pow2b_v8i64(<8 x i64> %x) { ; SSE2-LABEL: combine_vec_sdiv_by_pow2b_v8i64: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa %xmm3, %xmm4 -; SSE2-NEXT: movdqa %xmm1, %xmm3 -; SSE2-NEXT: psrad $31, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; SSE2-NEXT: movdqa %xmm1, %xmm5 -; SSE2-NEXT: psrlq $61, %xmm5 -; SSE2-NEXT: psrlq $60, %xmm1 -; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm5[0],xmm1[1] -; SSE2-NEXT: paddq %xmm3, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm3 -; SSE2-NEXT: psrlq $3, %xmm3 -; SSE2-NEXT: psrlq $4, %xmm1 -; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm3[0],xmm1[1] -; SSE2-NEXT: movapd {{.*#+}} xmm5 = [1152921504606846976,576460752303423488] -; SSE2-NEXT: xorpd %xmm5, %xmm1 -; SSE2-NEXT: psubq %xmm5, %xmm1 -; SSE2-NEXT: movdqa %xmm4, %xmm3 -; SSE2-NEXT: psrad $31, %xmm3 -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] -; SSE2-NEXT: movdqa %xmm3, %xmm6 -; SSE2-NEXT: psrlq $61, %xmm6 -; SSE2-NEXT: psrlq $60, %xmm3 -; SSE2-NEXT: movsd {{.*#+}} xmm3 = xmm6[0],xmm3[1] -; SSE2-NEXT: paddq %xmm4, %xmm3 -; SSE2-NEXT: movdqa %xmm3, %xmm4 -; SSE2-NEXT: psrlq $3, %xmm4 -; SSE2-NEXT: psrlq $4, %xmm3 -; SSE2-NEXT: movsd {{.*#+}} xmm3 = xmm4[0],xmm3[1] -; SSE2-NEXT: xorpd %xmm5, %xmm3 -; SSE2-NEXT: psubq %xmm5, %xmm3 -; SSE2-NEXT: movdqa %xmm0, %xmm4 -; SSE2-NEXT: psrad $31, %xmm4 -; SSE2-NEXT: psrlq $62, %xmm4 -; SSE2-NEXT: paddq %xmm0, %xmm4 -; SSE2-NEXT: psrlq $2, %xmm4 -; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [9223372036854775808,2305843009213693952] -; SSE2-NEXT: pxor %xmm6, %xmm4 -; SSE2-NEXT: psubq %xmm6, %xmm4 -; SSE2-NEXT: movsd {{.*#+}} xmm4 = xmm0[0],xmm4[1] +; SSE2-NEXT: movdqa %xmm2, %xmm4 +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: psrad $31, %xmm0 +; SSE2-NEXT: psrlq $62, %xmm0 +; SSE2-NEXT: paddq %xmm2, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm5 +; SSE2-NEXT: psrad $2, %xmm5 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,3,2,3] +; SSE2-NEXT: psrlq $2, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1] +; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1] +; SSE2-NEXT: movdqa %xmm4, %xmm2 +; SSE2-NEXT: psrad $31, %xmm2 +; SSE2-NEXT: psrlq $62, %xmm2 +; SSE2-NEXT: paddq %xmm4, %xmm2 ; SSE2-NEXT: movdqa %xmm2, %xmm5 +; SSE2-NEXT: psrad $2, %xmm5 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,3,2,3] +; SSE2-NEXT: psrlq $2, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1] +; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm4[0],xmm2[1] +; SSE2-NEXT: movdqa %xmm1, %xmm4 +; SSE2-NEXT: psrad $31, %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] +; SSE2-NEXT: movdqa %xmm4, %xmm5 +; SSE2-NEXT: psrlq $61, %xmm5 +; SSE2-NEXT: psrlq $60, %xmm4 +; SSE2-NEXT: movsd {{.*#+}} xmm4 = xmm5[0],xmm4[1] +; SSE2-NEXT: paddq %xmm1, %xmm4 +; SSE2-NEXT: movdqa %xmm4, %xmm1 +; SSE2-NEXT: psrlq $3, %xmm1 +; SSE2-NEXT: psrlq $4, %xmm4 +; SSE2-NEXT: movsd {{.*#+}} xmm4 = xmm1[0],xmm4[1] +; SSE2-NEXT: movapd {{.*#+}} xmm1 = [1152921504606846976,576460752303423488] +; SSE2-NEXT: xorpd %xmm1, %xmm4 +; SSE2-NEXT: psubq %xmm1, %xmm4 +; SSE2-NEXT: movdqa %xmm3, %xmm5 ; SSE2-NEXT: psrad $31, %xmm5 -; SSE2-NEXT: psrlq $62, %xmm5 -; SSE2-NEXT: paddq %xmm2, %xmm5 -; SSE2-NEXT: psrlq $2, %xmm5 -; SSE2-NEXT: pxor %xmm6, %xmm5 -; SSE2-NEXT: psubq %xmm6, %xmm5 -; SSE2-NEXT: movsd {{.*#+}} xmm5 = xmm2[0],xmm5[1] -; SSE2-NEXT: movapd %xmm4, %xmm0 -; SSE2-NEXT: movapd %xmm5, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3] +; SSE2-NEXT: movdqa %xmm5, %xmm6 +; SSE2-NEXT: psrlq $61, %xmm6 +; SSE2-NEXT: psrlq $60, %xmm5 +; SSE2-NEXT: movsd {{.*#+}} xmm5 = xmm6[0],xmm5[1] +; SSE2-NEXT: paddq %xmm3, %xmm5 +; SSE2-NEXT: movdqa %xmm5, %xmm3 +; SSE2-NEXT: psrlq $3, %xmm3 +; SSE2-NEXT: psrlq $4, %xmm5 +; SSE2-NEXT: movsd {{.*#+}} xmm5 = xmm3[0],xmm5[1] +; SSE2-NEXT: xorpd %xmm1, %xmm5 +; SSE2-NEXT: psubq %xmm1, %xmm5 +; SSE2-NEXT: movdqa %xmm4, %xmm1 +; SSE2-NEXT: movdqa %xmm5, %xmm3 ; SSE2-NEXT: retq ; ; SSE41-LABEL: combine_vec_sdiv_by_pow2b_v8i64: ; SSE41: # %bb.0: -; SSE41-NEXT: movdqa %xmm3, %xmm4 -; SSE41-NEXT: movdqa %xmm1, %xmm3 +; SSE41-NEXT: movdqa %xmm2, %xmm5 +; SSE41-NEXT: movdqa %xmm1, %xmm4 +; SSE41-NEXT: movdqa %xmm0, %xmm1 +; SSE41-NEXT: psrad $31, %xmm0 +; SSE41-NEXT: psrlq $62, %xmm0 +; SSE41-NEXT: paddq %xmm1, %xmm0 +; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: psrad $2, %xmm2 +; SSE41-NEXT: psrlq $2, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] +; SSE41-NEXT: movdqa %xmm5, %xmm2 +; SSE41-NEXT: psrad $31, %xmm2 +; SSE41-NEXT: psrlq $62, %xmm2 +; SSE41-NEXT: paddq %xmm5, %xmm2 +; SSE41-NEXT: movdqa %xmm2, %xmm1 +; SSE41-NEXT: psrad $2, %xmm1 +; SSE41-NEXT: psrlq $2, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7] +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm5[0,1,2,3],xmm2[4,5,6,7] +; SSE41-NEXT: movdqa %xmm4, %xmm1 ; SSE41-NEXT: psrad $31, %xmm1 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] ; SSE41-NEXT: movdqa %xmm1, %xmm5 ; SSE41-NEXT: psrlq $60, %xmm5 ; SSE41-NEXT: psrlq $61, %xmm1 ; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm5[4,5,6,7] -; SSE41-NEXT: paddq %xmm3, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: psrlq $4, %xmm3 +; SSE41-NEXT: paddq %xmm4, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm4 +; SSE41-NEXT: psrlq $4, %xmm4 ; SSE41-NEXT: psrlq $3, %xmm1 -; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7] +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm4[4,5,6,7] ; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [1152921504606846976,576460752303423488] ; SSE41-NEXT: pxor %xmm5, %xmm1 ; SSE41-NEXT: psubq %xmm5, %xmm1 -; SSE41-NEXT: movdqa %xmm4, %xmm3 -; SSE41-NEXT: psrad $31, %xmm3 -; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] -; SSE41-NEXT: movdqa %xmm3, %xmm6 -; SSE41-NEXT: psrlq $60, %xmm6 -; SSE41-NEXT: psrlq $61, %xmm3 -; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm6[4,5,6,7] -; SSE41-NEXT: paddq %xmm4, %xmm3 ; SSE41-NEXT: movdqa %xmm3, %xmm4 -; SSE41-NEXT: psrlq $4, %xmm4 -; SSE41-NEXT: psrlq $3, %xmm3 -; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4,5,6,7] -; SSE41-NEXT: pxor %xmm5, %xmm3 -; SSE41-NEXT: psubq %xmm5, %xmm3 -; SSE41-NEXT: movdqa %xmm0, %xmm4 ; SSE41-NEXT: psrad $31, %xmm4 -; SSE41-NEXT: psrlq $62, %xmm4 -; SSE41-NEXT: paddq %xmm0, %xmm4 -; SSE41-NEXT: psrlq $2, %xmm4 -; SSE41-NEXT: movdqa {{.*#+}} xmm6 = [9223372036854775808,2305843009213693952] -; SSE41-NEXT: pxor %xmm6, %xmm4 -; SSE41-NEXT: psubq %xmm6, %xmm4 -; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm0[0,1,2,3],xmm4[4,5,6,7] -; SSE41-NEXT: movdqa %xmm2, %xmm5 -; SSE41-NEXT: psrad $31, %xmm5 -; SSE41-NEXT: psrlq $62, %xmm5 -; SSE41-NEXT: paddq %xmm2, %xmm5 -; SSE41-NEXT: psrlq $2, %xmm5 -; SSE41-NEXT: pxor %xmm6, %xmm5 -; SSE41-NEXT: psubq %xmm6, %xmm5 -; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm2[0,1,2,3],xmm5[4,5,6,7] -; SSE41-NEXT: movdqa %xmm4, %xmm0 -; SSE41-NEXT: movdqa %xmm5, %xmm2 +; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3] +; SSE41-NEXT: movdqa %xmm4, %xmm6 +; SSE41-NEXT: psrlq $60, %xmm6 +; SSE41-NEXT: psrlq $61, %xmm4 +; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm6[4,5,6,7] +; SSE41-NEXT: paddq %xmm3, %xmm4 +; SSE41-NEXT: movdqa %xmm4, %xmm3 +; SSE41-NEXT: psrlq $4, %xmm3 +; SSE41-NEXT: psrlq $3, %xmm4 +; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7] +; SSE41-NEXT: pxor %xmm5, %xmm4 +; SSE41-NEXT: psubq %xmm5, %xmm4 +; SSE41-NEXT: movdqa %xmm4, %xmm3 ; SSE41-NEXT: retq ; ; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v8i64: @@ -1875,17 +1873,16 @@ define <8 x i64> @combine_vec_sdiv_by_pow2b_v8i64(<8 x i64> %x) { ; AVX1-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm5 ; AVX1-NEXT: vpsrlq $62, %xmm5, %xmm5 ; AVX1-NEXT: vpaddq %xmm5, %xmm0, %xmm5 +; AVX1-NEXT: vpsrad $2, %xmm5, %xmm6 ; AVX1-NEXT: vpsrlq $2, %xmm5, %xmm5 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [9223372036854775808,2305843009213693952] -; AVX1-NEXT: vpxor %xmm6, %xmm5, %xmm5 -; AVX1-NEXT: vpsubq %xmm6, %xmm5, %xmm5 +; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm6[2,3],xmm5[4,5],xmm6[6,7] ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3 ; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3,4,5,6,7] ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 ; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm5 -; AVX1-NEXT: vpsrlq $60, %xmm5, %xmm7 +; AVX1-NEXT: vpsrlq $60, %xmm5, %xmm6 ; AVX1-NEXT: vpsrlq $61, %xmm5, %xmm5 -; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm7[4,5,6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm6[4,5,6,7] ; AVX1-NEXT: vpaddq %xmm5, %xmm3, %xmm3 ; AVX1-NEXT: vpsrlq $4, %xmm3, %xmm5 ; AVX1-NEXT: vpsrlq $3, %xmm3, %xmm3 @@ -1895,9 +1892,9 @@ define <8 x i64> @combine_vec_sdiv_by_pow2b_v8i64(<8 x i64> %x) { ; AVX1-NEXT: vpcmpgtq %xmm1, %xmm2, %xmm2 ; AVX1-NEXT: vpsrlq $62, %xmm2, %xmm2 ; AVX1-NEXT: vpaddq %xmm2, %xmm1, %xmm2 +; AVX1-NEXT: vpsrad $2, %xmm2, %xmm4 ; AVX1-NEXT: vpsrlq $2, %xmm2, %xmm2 -; AVX1-NEXT: vpxor %xmm6, %xmm2, %xmm2 -; AVX1-NEXT: vpsubq %xmm6, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7] ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 ; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3,4,5,6,7] ; AVX1-NEXT: retq @@ -1906,12 +1903,12 @@ define <8 x i64> @combine_vec_sdiv_by_pow2b_v8i64(<8 x i64> %x) { ; AVX2: # %bb.0: ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX2-NEXT: vpcmpgtq %ymm0, %ymm2, %ymm3 -; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [64,62,61,60] +; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = ; AVX2-NEXT: vpsrlvq %ymm4, %ymm3, %ymm3 ; AVX2-NEXT: vpaddq %ymm3, %ymm0, %ymm3 -; AVX2-NEXT: vmovdqa {{.*#+}} ymm5 = [0,2,3,4] +; AVX2-NEXT: vmovdqa {{.*#+}} ymm5 = ; AVX2-NEXT: vpsrlvq %ymm5, %ymm3, %ymm3 -; AVX2-NEXT: vmovdqa {{.*#+}} ymm6 = [9223372036854775808,2305843009213693952,1152921504606846976,576460752303423488] +; AVX2-NEXT: vmovdqa {{.*#+}} ymm6 = ; AVX2-NEXT: vpxor %ymm6, %ymm3, %ymm3 ; AVX2-NEXT: vpsubq %ymm6, %ymm3, %ymm3 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3,4,5,6,7] @@ -1953,28 +1950,27 @@ define <8 x i64> @combine_vec_sdiv_by_pow2b_v8i64(<8 x i64> %x) { ; XOP-NEXT: vextractf128 $1, %ymm0, %xmm2 ; XOP-NEXT: vmovdqa {{.*#+}} xmm3 = [18446744073709551553,18446744073709551553] ; XOP-NEXT: vpshaq %xmm3, %xmm2, %xmm4 -; XOP-NEXT: vmovdqa {{.*#+}} xmm8 = [18446744073709551555,18446744073709551556] -; XOP-NEXT: vpshlq %xmm8, %xmm4, %xmm4 +; XOP-NEXT: vmovdqa {{.*#+}} xmm5 = [18446744073709551555,18446744073709551556] +; XOP-NEXT: vpshlq %xmm5, %xmm4, %xmm4 ; XOP-NEXT: vpaddq %xmm4, %xmm2, %xmm2 ; XOP-NEXT: vmovdqa {{.*#+}} xmm4 = [18446744073709551613,18446744073709551612] ; XOP-NEXT: vpshaq %xmm4, %xmm2, %xmm2 ; XOP-NEXT: vpshaq %xmm3, %xmm0, %xmm6 -; XOP-NEXT: vmovdqa {{.*#+}} xmm7 = [18446744073709551552,18446744073709551554] -; XOP-NEXT: vpshlq %xmm7, %xmm6, %xmm6 +; XOP-NEXT: vpsrlq $62, %xmm6, %xmm6 ; XOP-NEXT: vpaddq %xmm6, %xmm0, %xmm6 -; XOP-NEXT: vmovdqa {{.*#+}} xmm5 = [0,0,0,0,0,0,0,0,254,255,255,255,255,255,255,255] -; XOP-NEXT: vpshaq %xmm5, %xmm6, %xmm6 +; XOP-NEXT: vmovdqa {{.*#+}} xmm7 = +; XOP-NEXT: vpshaq %xmm7, %xmm6, %xmm6 ; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm6, %ymm2 ; XOP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3,4,5,6,7] ; XOP-NEXT: vextractf128 $1, %ymm1, %xmm2 ; XOP-NEXT: vpshaq %xmm3, %xmm2, %xmm6 -; XOP-NEXT: vpshlq %xmm8, %xmm6, %xmm6 -; XOP-NEXT: vpaddq %xmm6, %xmm2, %xmm2 +; XOP-NEXT: vpshlq %xmm5, %xmm6, %xmm5 +; XOP-NEXT: vpaddq %xmm5, %xmm2, %xmm2 ; XOP-NEXT: vpshaq %xmm4, %xmm2, %xmm2 ; XOP-NEXT: vpshaq %xmm3, %xmm1, %xmm3 -; XOP-NEXT: vpshlq %xmm7, %xmm3, %xmm3 +; XOP-NEXT: vpsrlq $62, %xmm3, %xmm3 ; XOP-NEXT: vpaddq %xmm3, %xmm1, %xmm3 -; XOP-NEXT: vpshaq %xmm5, %xmm3, %xmm3 +; XOP-NEXT: vpshaq %xmm7, %xmm3, %xmm3 ; XOP-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2 ; XOP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3,4,5,6,7] ; XOP-NEXT: retq @@ -2192,7 +2188,7 @@ define <16 x i8> @non_splat_minus_one_divisor_1(<16 x i8> %A) { ; SSE2-NEXT: psrlw $8, %xmm1 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psraw $8, %xmm2 -; SSE2-NEXT: pmullw {{.*}}(%rip), %xmm2 +; SSE2-NEXT: psllw $7, %xmm2 ; SSE2-NEXT: psrlw $8, %xmm2 ; SSE2-NEXT: packuswb %xmm1, %xmm2 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,0,255,0,0,0,255,0,0,255,255,255,255,255,255,255] @@ -2209,31 +2205,32 @@ define <16 x i8> @non_splat_minus_one_divisor_1(<16 x i8> %A) { ; SSE41: # %bb.0: ; SSE41-NEXT: movdqa %xmm0, %xmm1 ; SSE41-NEXT: pxor %xmm2, %xmm2 -; SSE41-NEXT: pxor %xmm3, %xmm3 -; SSE41-NEXT: pcmpgtb %xmm0, %xmm3 -; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero -; SSE41-NEXT: movdqa %xmm4, %xmm0 -; SSE41-NEXT: psllw $1, %xmm0 -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm4[0,1],xmm0[2],xmm4[3,4,5],xmm0[6],xmm4[7] -; SSE41-NEXT: psrlw $8, %xmm0 -; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15] -; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm3 -; SSE41-NEXT: psrlw $8, %xmm3 -; SSE41-NEXT: packuswb %xmm3, %xmm0 -; SSE41-NEXT: paddb %xmm1, %xmm0 -; SSE41-NEXT: movdqa %xmm0, %xmm3 -; SSE41-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE41-NEXT: psraw $8, %xmm3 -; SSE41-NEXT: movdqa %xmm3, %xmm4 -; SSE41-NEXT: psllw $7, %xmm4 -; SSE41-NEXT: psllw $8, %xmm3 -; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2],xmm3[3,4,5],xmm4[6],xmm3[7] +; SSE41-NEXT: pxor %xmm0, %xmm0 +; SSE41-NEXT: pcmpgtb %xmm1, %xmm0 +; SSE41-NEXT: pxor %xmm4, %xmm4 +; SSE41-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7] +; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; SSE41-NEXT: psllw $1, %xmm3 +; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm4[0,1],xmm3[2],xmm4[3,4,5],xmm3[6],xmm4[7] ; SSE41-NEXT: psrlw $8, %xmm3 -; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; SSE41-NEXT: psraw $8, %xmm0 +; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15] ; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm0 ; SSE41-NEXT: psrlw $8, %xmm0 ; SSE41-NEXT: packuswb %xmm0, %xmm3 +; SSE41-NEXT: paddb %xmm1, %xmm3 +; SSE41-NEXT: movdqa %xmm3, %xmm0 +; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15] +; SSE41-NEXT: psraw $8, %xmm0 +; SSE41-NEXT: movdqa %xmm0, %xmm4 +; SSE41-NEXT: psllw $1, %xmm4 +; SSE41-NEXT: psllw $7, %xmm0 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm4[5],xmm0[6],xmm4[7] +; SSE41-NEXT: psrlw $8, %xmm0 +; SSE41-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE41-NEXT: psraw $8, %xmm3 +; SSE41-NEXT: psllw $7, %xmm3 +; SSE41-NEXT: psrlw $8, %xmm3 +; SSE41-NEXT: packuswb %xmm0, %xmm3 ; SSE41-NEXT: movaps {{.*#+}} xmm0 = [0,0,255,0,0,0,255,0,0,255,255,255,255,255,255,255] ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1 ; SSE41-NEXT: psubb %xmm1, %xmm2 @@ -2246,8 +2243,9 @@ define <16 x i8> @non_splat_minus_one_divisor_1(<16 x i8> %A) { ; AVX1: # %bb.0: ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpcmpgtb %xmm0, %xmm1, %xmm2 -; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero -; AVX1-NEXT: vpsllw $1, %xmm3, %xmm4 +; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero +; AVX1-NEXT: vpsllw $1, %xmm4, %xmm4 ; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2],xmm3[3,4,5],xmm4[6],xmm3[7] ; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] @@ -2255,17 +2253,17 @@ define <16 x i8> @non_splat_minus_one_divisor_1(<16 x i8> %A) { ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpaddb %xmm2, %xmm0, %xmm2 -; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; AVX1-NEXT: vpsraw $8, %xmm3, %xmm3 -; AVX1-NEXT: vpsllw $7, %xmm3, %xmm4 -; AVX1-NEXT: vpsllw $8, %xmm3, %xmm3 -; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2],xmm3[3,4,5],xmm4[6],xmm3[7] +; AVX1-NEXT: vpsllw $1, %xmm3, %xmm4 +; AVX1-NEXT: vpsllw $7, %xmm3, %xmm3 +; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm4[5],xmm3[6],xmm4[7] ; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 -; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; AVX1-NEXT: vpsraw $8, %xmm2, %xmm2 -; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm2, %xmm2 +; AVX1-NEXT: vpsllw $7, %xmm2, %xmm2 ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 -; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,0,255,0,0,0,255,0,0,255,255,255,255,255,255,255] ; AVX1-NEXT: vpblendvb %xmm3, %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpsubb %xmm0, %xmm1, %xmm1 @@ -2393,9 +2391,9 @@ define <4 x i32> @non_splat_minus_one_divisor_2(<4 x i32> %A) { ; ; AVX2ORLATER-LABEL: non_splat_minus_one_divisor_2: ; AVX2ORLATER: # %bb.0: -; AVX2ORLATER-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm1 +; AVX2ORLATER-NEXT: vpsrld $31, %xmm0, %xmm1 ; AVX2ORLATER-NEXT: vpaddd %xmm1, %xmm0, %xmm1 -; AVX2ORLATER-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1 +; AVX2ORLATER-NEXT: vpsrad $1, %xmm1, %xmm1 ; AVX2ORLATER-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] ; AVX2ORLATER-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX2ORLATER-NEXT: vpsubd %xmm0, %xmm1, %xmm1 @@ -2404,9 +2402,9 @@ define <4 x i32> @non_splat_minus_one_divisor_2(<4 x i32> %A) { ; ; XOP-LABEL: non_splat_minus_one_divisor_2: ; XOP: # %bb.0: -; XOP-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm1 +; XOP-NEXT: vpsrld $31, %xmm0, %xmm1 ; XOP-NEXT: vpaddd %xmm1, %xmm0, %xmm1 -; XOP-NEXT: vpshad {{.*}}(%rip), %xmm1, %xmm1 +; XOP-NEXT: vpsrad $1, %xmm1, %xmm1 ; XOP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] ; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; XOP-NEXT: vpsubd %xmm0, %xmm1, %xmm1 diff --git a/llvm/test/CodeGen/X86/combine-udiv.ll b/llvm/test/CodeGen/X86/combine-udiv.ll index bb7583b..e7e14f8 100644 --- a/llvm/test/CodeGen/X86/combine-udiv.ll +++ b/llvm/test/CodeGen/X86/combine-udiv.ll @@ -644,20 +644,21 @@ define <8 x i16> @combine_vec_udiv_nonuniform3(<8 x i16> %x) { define <16 x i8> @combine_vec_udiv_nonuniform4(<16 x i8> %x) { ; SSE2-LABEL: combine_vec_udiv_nonuniform4: ; SSE2: # %bb.0: -; SSE2-NEXT: pxor %xmm1, %xmm1 -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: pand %xmm2, %xmm1 +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] ; SSE2-NEXT: movl $171, %eax -; SSE2-NEXT: movd %eax, %xmm1 -; SSE2-NEXT: pmullw %xmm2, %xmm1 -; SSE2-NEXT: psrlw $8, %xmm1 -; SSE2-NEXT: pmullw {{.*}}(%rip), %xmm1 -; SSE2-NEXT: psrlw $8, %xmm1 -; SSE2-NEXT: movl $255, %eax -; SSE2-NEXT: movd %eax, %xmm2 -; SSE2-NEXT: pand %xmm1, %xmm2 -; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE2-NEXT: por %xmm2, %xmm0 +; SSE2-NEXT: movd %eax, %xmm3 +; SSE2-NEXT: pmullw %xmm0, %xmm3 +; SSE2-NEXT: psrlw $8, %xmm3 +; SSE2-NEXT: packuswb %xmm0, %xmm3 +; SSE2-NEXT: psrlw $7, %xmm3 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm3 +; SSE2-NEXT: pandn %xmm3, %xmm2 +; SSE2-NEXT: por %xmm2, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: combine_vec_udiv_nonuniform4: @@ -668,12 +669,9 @@ define <16 x i8> @combine_vec_udiv_nonuniform4(<16 x i8> %x) { ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero ; SSE41-NEXT: pmullw %xmm0, %xmm2 ; SSE41-NEXT: psrlw $8, %xmm2 -; SSE41-NEXT: movdqa %xmm2, %xmm0 -; SSE41-NEXT: psllw $1, %xmm0 -; SSE41-NEXT: psllw $8, %xmm2 -; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3,4,5,6,7] -; SSE41-NEXT: psrlw $8, %xmm2 ; SSE41-NEXT: packuswb %xmm0, %xmm2 +; SSE41-NEXT: psrlw $7, %xmm2 +; SSE41-NEXT: pand {{.*}}(%rip), %xmm2 ; SSE41-NEXT: movaps {{.*#+}} xmm0 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] ; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm0 @@ -686,11 +684,9 @@ define <16 x i8> @combine_vec_udiv_nonuniform4(<16 x i8> %x) { ; AVX-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX-NEXT: vpmullw %xmm1, %xmm2, %xmm1 ; AVX-NEXT: vpsrlw $8, %xmm1, %xmm1 -; AVX-NEXT: vpsllw $1, %xmm1, %xmm2 -; AVX-NEXT: vpsllw $8, %xmm1, %xmm1 -; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2,3,4,5,6,7] -; AVX-NEXT: vpsrlw $8, %xmm1, %xmm1 ; AVX-NEXT: vpackuswb %xmm0, %xmm1, %xmm1 +; AVX-NEXT: vpsrlw $7, %xmm1, %xmm1 +; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 ; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] ; AVX-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq @@ -722,7 +718,7 @@ define <8 x i16> @pr38477(<8 x i16> %a0) { ; SSE2-NEXT: psubw %xmm2, %xmm1 ; SSE2-NEXT: pmulhuw {{.*}}(%rip), %xmm1 ; SSE2-NEXT: paddw %xmm2, %xmm1 -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,65535,65535,65535,65535,0,65535] +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535,65535,65535,0,65535] ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: pandn %xmm1, %xmm3 ; SSE2-NEXT: pmulhuw {{.*}}(%rip), %xmm1 @@ -745,7 +741,7 @@ define <8 x i16> @pr38477(<8 x i16> %a0) { ; SSE41-NEXT: paddw %xmm2, %xmm1 ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = ; SSE41-NEXT: pmulhuw %xmm1, %xmm2 -; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5],xmm1[6],xmm2[7] +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4,5],xmm1[6],xmm2[7] ; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7] ; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: retq @@ -757,7 +753,7 @@ define <8 x i16> @pr38477(<8 x i16> %a0) { ; AVX-NEXT: vpmulhuw {{.*}}(%rip), %xmm2, %xmm2 ; AVX-NEXT: vpaddw %xmm1, %xmm2, %xmm1 ; AVX-NEXT: vpmulhuw {{.*}}(%rip), %xmm1, %xmm2 -; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5],xmm1[6],xmm2[7] +; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4,5],xmm1[6],xmm2[7] ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7] ; AVX-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/known-signbits-vector.ll b/llvm/test/CodeGen/X86/known-signbits-vector.ll index 43cf147..a6b993d 100644 --- a/llvm/test/CodeGen/X86/known-signbits-vector.ll +++ b/llvm/test/CodeGen/X86/known-signbits-vector.ll @@ -104,10 +104,8 @@ define float @signbits_ashr_shl_extract_sitofp(<2 x i64> %a0) nounwind { ; X32-LABEL: signbits_ashr_shl_extract_sitofp: ; X32: # %bb.0: ; X32-NEXT: pushl %eax -; X32-NEXT: vpsrlq $61, %xmm0, %xmm0 -; X32-NEXT: vmovdqa {{.*#+}} xmm1 = [4,0,8,0] -; X32-NEXT: vpxor %xmm1, %xmm0, %xmm0 -; X32-NEXT: vpsubq %xmm1, %xmm0, %xmm0 +; X32-NEXT: vpsrad $29, %xmm0, %xmm0 +; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] ; X32-NEXT: vpsllq $20, %xmm0, %xmm0 ; X32-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X32-NEXT: vmovss %xmm0, (%esp) @@ -220,10 +218,8 @@ define float @signbits_ashr_sext_sextinreg_and_extract_sitofp(<2 x i64> %a0, <2 ; X32-LABEL: signbits_ashr_sext_sextinreg_and_extract_sitofp: ; X32: # %bb.0: ; X32-NEXT: pushl %eax -; X32-NEXT: vpsrlq $61, %xmm0, %xmm0 -; X32-NEXT: vmovdqa {{.*#+}} xmm1 = [4,0,8,0] -; X32-NEXT: vpxor %xmm1, %xmm0, %xmm0 -; X32-NEXT: vpsubq %xmm1, %xmm0, %xmm0 +; X32-NEXT: vpsrad $29, %xmm0, %xmm0 +; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] ; X32-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero ; X32-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X32-NEXT: vcvtdq2ps %xmm0, %xmm0 @@ -234,10 +230,8 @@ define float @signbits_ashr_sext_sextinreg_and_extract_sitofp(<2 x i64> %a0, <2 ; ; X64-LABEL: signbits_ashr_sext_sextinreg_and_extract_sitofp: ; X64: # %bb.0: -; X64-NEXT: vpsrlq $61, %xmm0, %xmm0 -; X64-NEXT: vmovdqa {{.*#+}} xmm1 = [4,8] -; X64-NEXT: vpxor %xmm1, %xmm0, %xmm0 -; X64-NEXT: vpsubq %xmm1, %xmm0, %xmm0 +; X64-NEXT: vpsrad $29, %xmm0, %xmm0 +; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] ; X64-NEXT: vmovd %edi, %xmm1 ; X64-NEXT: vpand %xmm1, %xmm0, %xmm0 ; X64-NEXT: vmovq %xmm0, %rax @@ -309,14 +303,15 @@ define <4 x float> @signbits_ashr_sext_select_shuffle_sitofp(<4 x i64> %a0, <4 x ; X32-NEXT: subl $16, %esp ; X32-NEXT: vpmovsxdq 8(%ebp), %xmm3 ; X32-NEXT: vpmovsxdq 16(%ebp), %xmm4 -; X32-NEXT: vpsrlq $33, %xmm2, %xmm5 -; X32-NEXT: vmovdqa {{.*#+}} xmm6 = [1073741824,0,1,0] -; X32-NEXT: vpxor %xmm6, %xmm5, %xmm5 -; X32-NEXT: vpsubq %xmm6, %xmm5, %xmm5 +; X32-NEXT: vpsrad $31, %xmm2, %xmm5 +; X32-NEXT: vpsrad $1, %xmm2, %xmm6 +; X32-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[1,1,3,3] +; X32-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3],xmm6[4,5],xmm5[6,7] ; X32-NEXT: vextractf128 $1, %ymm2, %xmm2 -; X32-NEXT: vpsrlq $33, %xmm2, %xmm2 -; X32-NEXT: vpxor %xmm6, %xmm2, %xmm2 -; X32-NEXT: vpsubq %xmm6, %xmm2, %xmm2 +; X32-NEXT: vpsrad $31, %xmm2, %xmm6 +; X32-NEXT: vpsrad $1, %xmm2, %xmm2 +; X32-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] +; X32-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm6[2,3],xmm2[4,5],xmm6[6,7] ; X32-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm6 ; X32-NEXT: vblendvpd %xmm6, %xmm5, %xmm3, %xmm3 ; X32-NEXT: vextractf128 $1, %ymm1, %xmm1 @@ -335,14 +330,15 @@ define <4 x float> @signbits_ashr_sext_select_shuffle_sitofp(<4 x i64> %a0, <4 x ; ; X64-LABEL: signbits_ashr_sext_select_shuffle_sitofp: ; X64: # %bb.0: -; X64-NEXT: vpsrlq $33, %xmm2, %xmm4 -; X64-NEXT: vmovdqa {{.*#+}} xmm5 = [1073741824,1] -; X64-NEXT: vpxor %xmm5, %xmm4, %xmm4 -; X64-NEXT: vpsubq %xmm5, %xmm4, %xmm4 +; X64-NEXT: vpsrad $31, %xmm2, %xmm4 +; X64-NEXT: vpsrad $1, %xmm2, %xmm5 +; X64-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[1,1,3,3] +; X64-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1],xmm4[2,3],xmm5[4,5],xmm4[6,7] ; X64-NEXT: vextractf128 $1, %ymm2, %xmm2 -; X64-NEXT: vpsrlq $33, %xmm2, %xmm2 -; X64-NEXT: vpxor %xmm5, %xmm2, %xmm2 -; X64-NEXT: vpsubq %xmm5, %xmm2, %xmm2 +; X64-NEXT: vpsrad $31, %xmm2, %xmm5 +; X64-NEXT: vpsrad $1, %xmm2, %xmm2 +; X64-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] +; X64-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2,3],xmm2[4,5],xmm5[6,7] ; X64-NEXT: vpmovsxdq %xmm3, %xmm5 ; X64-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] ; X64-NEXT: vpmovsxdq %xmm3, %xmm3 diff --git a/llvm/test/CodeGen/X86/urem-seteq-vec-nonsplat.ll b/llvm/test/CodeGen/X86/urem-seteq-vec-nonsplat.ll index 2c82699..523a8f1 100644 --- a/llvm/test/CodeGen/X86/urem-seteq-vec-nonsplat.ll +++ b/llvm/test/CodeGen/X86/urem-seteq-vec-nonsplat.ll @@ -390,10 +390,11 @@ define <4 x i32> @test_urem_one(<4 x i32> %X) nounwind readnone { ; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] ; CHECK-SSE2-NEXT: movdqa %xmm2, %xmm1 ; CHECK-SSE2-NEXT: psrld $2, %xmm1 +; CHECK-SSE2-NEXT: psrld $3, %xmm2 +; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm1[3,0] ; CHECK-SSE2-NEXT: movdqa %xmm0, %xmm3 ; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm1[0,0] -; CHECK-SSE2-NEXT: psrld $3, %xmm2 -; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm2[2,3] +; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm2[0,2] ; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [6,1,12,14] ; CHECK-SSE2-NEXT: pmuludq %xmm2, %xmm3 ; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] diff --git a/llvm/test/CodeGen/X86/vector-fshl-128.ll b/llvm/test/CodeGen/X86/vector-fshl-128.ll index e7e1876..b40c881 100644 --- a/llvm/test/CodeGen/X86/vector-fshl-128.ll +++ b/llvm/test/CodeGen/X86/vector-fshl-128.ll @@ -2402,7 +2402,7 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind { define <8 x i16> @constant_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind { ; SSE2-LABEL: constant_funnnel_v8i16: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128] +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = ; SSE2-NEXT: pmulhuw %xmm2, %xmm1 ; SSE2-NEXT: pmullw %xmm0, %xmm2 ; SSE2-NEXT: por %xmm1, %xmm2 @@ -2415,7 +2415,7 @@ define <8 x i16> @constant_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind { ; ; SSE41-LABEL: constant_funnnel_v8i16: ; SSE41: # %bb.0: -; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128] +; SSE41-NEXT: movdqa {{.*#+}} xmm2 = ; SSE41-NEXT: pmulhuw %xmm2, %xmm1 ; SSE41-NEXT: pmullw %xmm0, %xmm2 ; SSE41-NEXT: por %xmm1, %xmm2 @@ -2424,7 +2424,7 @@ define <8 x i16> @constant_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind { ; ; AVX-LABEL: constant_funnnel_v8i16: ; AVX: # %bb.0: -; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128] +; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = ; AVX-NEXT: vpmulhuw %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpmullw %xmm2, %xmm0, %xmm2 ; AVX-NEXT: vpor %xmm1, %xmm2, %xmm1 @@ -2433,7 +2433,7 @@ define <8 x i16> @constant_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind { ; ; AVX512F-LABEL: constant_funnnel_v8i16: ; AVX512F: # %bb.0: -; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128] +; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = ; AVX512F-NEXT: vpmulhuw %xmm2, %xmm1, %xmm1 ; AVX512F-NEXT: vpmullw %xmm2, %xmm0, %xmm2 ; AVX512F-NEXT: vpor %xmm1, %xmm2, %xmm1 @@ -2442,7 +2442,7 @@ define <8 x i16> @constant_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind { ; ; AVX512VL-LABEL: constant_funnnel_v8i16: ; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128] +; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm2 = ; AVX512VL-NEXT: vpmulhuw %xmm2, %xmm1, %xmm1 ; AVX512VL-NEXT: vpmullw %xmm2, %xmm0, %xmm2 ; AVX512VL-NEXT: vpor %xmm1, %xmm2, %xmm1 @@ -2453,9 +2453,9 @@ define <8 x i16> @constant_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind { ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 -; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [16,15,14,13,12,11,10,9] +; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = ; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm1, %zmm1 -; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7] +; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = ; AVX512BW-NEXT: vpsllvw %zmm2, %zmm0, %zmm2 ; AVX512BW-NEXT: vpor %xmm1, %xmm2, %xmm1 ; AVX512BW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7] @@ -2466,9 +2466,9 @@ define <8 x i16> @constant_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind { ; AVX512VBMI2: # %bb.0: ; AVX512VBMI2-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 ; AVX512VBMI2-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 -; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} xmm2 = [16,15,14,13,12,11,10,9] +; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} xmm2 = ; AVX512VBMI2-NEXT: vpsrlvw %zmm2, %zmm1, %zmm1 -; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7] +; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} xmm2 = ; AVX512VBMI2-NEXT: vpsllvw %zmm2, %zmm0, %zmm2 ; AVX512VBMI2-NEXT: vpor %xmm1, %xmm2, %xmm1 ; AVX512VBMI2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7] @@ -2498,7 +2498,7 @@ define <8 x i16> @constant_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind { ; ; X32-SSE-LABEL: constant_funnnel_v8i16: ; X32-SSE: # %bb.0: -; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128] +; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = ; X32-SSE-NEXT: pmulhuw %xmm2, %xmm1 ; X32-SSE-NEXT: pmullw %xmm0, %xmm2 ; X32-SSE-NEXT: por %xmm1, %xmm2 @@ -2518,11 +2518,11 @@ define <16 x i8> @constant_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind { ; SSE2-NEXT: pxor %xmm2, %xmm2 ; SSE2-NEXT: movdqa %xmm1, %xmm3 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15] -; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [1,128,64,32,16,8,4,2] +; SSE2-NEXT: movdqa {{.*#+}} xmm4 = ; SSE2-NEXT: pmullw %xmm4, %xmm3 ; SSE2-NEXT: psrlw $8, %xmm3 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] -; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [1,2,4,8,16,32,64,128] +; SSE2-NEXT: movdqa {{.*#+}} xmm5 = ; SSE2-NEXT: pmullw %xmm5, %xmm1 ; SSE2-NEXT: psrlw $8, %xmm1 ; SSE2-NEXT: packuswb %xmm3, %xmm1 @@ -2550,10 +2550,10 @@ define <16 x i8> @constant_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind { ; SSE41-NEXT: pxor %xmm0, %xmm0 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero ; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] -; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [1,128,64,32,16,8,4,2] +; SSE41-NEXT: movdqa {{.*#+}} xmm0 = ; SSE41-NEXT: pmullw %xmm0, %xmm1 ; SSE41-NEXT: psrlw $8, %xmm1 -; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128] +; SSE41-NEXT: movdqa {{.*#+}} xmm4 = ; SSE41-NEXT: pmullw %xmm4, %xmm3 ; SSE41-NEXT: psrlw $8, %xmm3 ; SSE41-NEXT: packuswb %xmm1, %xmm3 @@ -2576,11 +2576,11 @@ define <16 x i8> @constant_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind { ; AVX1: # %bb.0: ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15] -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,128,64,32,16,8,4,2] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = ; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero -; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = ; AVX1-NEXT: vpmullw %xmm4, %xmm1, %xmm1 ; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1 ; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1 @@ -2600,13 +2600,12 @@ define <16 x i8> @constant_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind { ; AVX2-LABEL: constant_funnnel_v16i8: ; AVX2: # %bb.0: ; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero -; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,1,128,64,32,16,8,4,2] -; AVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1 +; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1 ; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1 -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3 -; AVX2-NEXT: vpackuswb %xmm3, %xmm1, %xmm1 -; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero -; AVX2-NEXT: vpmullw %ymm2, %ymm3, %ymm2 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm2, %ymm2 ; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 ; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3 ; AVX2-NEXT: vpackuswb %xmm3, %xmm2, %xmm2 @@ -2644,10 +2643,10 @@ define <16 x i8> @constant_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind { ; ; AVX512BW-LABEL: constant_funnnel_v16i8: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [8,7,6,5,4,3,2,1,8,1,2,3,4,5,6,7] +; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero ; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm1, %zmm1 -; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,0,7,6,5,4,3,2,1] +; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero ; AVX512BW-NEXT: vpsllvw %zmm2, %zmm3, %zmm2 ; AVX512BW-NEXT: vpor %ymm1, %ymm2, %ymm1 @@ -2659,10 +2658,10 @@ define <16 x i8> @constant_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind { ; ; AVX512VBMI2-LABEL: constant_funnnel_v16i8: ; AVX512VBMI2: # %bb.0: -; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} ymm2 = [8,7,6,5,4,3,2,1,8,1,2,3,4,5,6,7] +; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} ymm2 = ; AVX512VBMI2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero ; AVX512VBMI2-NEXT: vpsrlvw %zmm2, %zmm1, %zmm1 -; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,0,7,6,5,4,3,2,1] +; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} ymm2 = ; AVX512VBMI2-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero ; AVX512VBMI2-NEXT: vpsllvw %zmm2, %zmm3, %zmm2 ; AVX512VBMI2-NEXT: vpor %ymm1, %ymm2, %ymm1 @@ -2716,11 +2715,11 @@ define <16 x i8> @constant_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind { ; X32-SSE-NEXT: pxor %xmm2, %xmm2 ; X32-SSE-NEXT: movdqa %xmm1, %xmm3 ; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15] -; X32-SSE-NEXT: movdqa {{.*#+}} xmm4 = [1,128,64,32,16,8,4,2] +; X32-SSE-NEXT: movdqa {{.*#+}} xmm4 = ; X32-SSE-NEXT: pmullw %xmm4, %xmm3 ; X32-SSE-NEXT: psrlw $8, %xmm3 ; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] -; X32-SSE-NEXT: movdqa {{.*#+}} xmm5 = [1,2,4,8,16,32,64,128] +; X32-SSE-NEXT: movdqa {{.*#+}} xmm5 = ; X32-SSE-NEXT: pmullw %xmm5, %xmm1 ; X32-SSE-NEXT: psrlw $8, %xmm1 ; X32-SSE-NEXT: packuswb %xmm3, %xmm1 diff --git a/llvm/test/CodeGen/X86/vector-fshl-256.ll b/llvm/test/CodeGen/X86/vector-fshl-256.ll index 3fa183e..1e30cb2 100644 --- a/llvm/test/CodeGen/X86/vector-fshl-256.ll +++ b/llvm/test/CodeGen/X86/vector-fshl-256.ll @@ -1887,7 +1887,7 @@ define <16 x i16> @constant_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y) nounwin ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [256,512,1024,2048,4096,8192,16384,32768] ; AVX1-NEXT: vpmulhuw %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = ; AVX1-NEXT: vpmulhuw %xmm4, %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 @@ -1903,7 +1903,7 @@ define <16 x i16> @constant_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y) nounwin ; ; AVX2-LABEL: constant_funnnel_v16i16: ; AVX2: # %bb.0: -; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768] +; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = ; AVX2-NEXT: vpmulhuw %ymm2, %ymm1, %ymm1 ; AVX2-NEXT: vpmullw %ymm2, %ymm0, %ymm2 ; AVX2-NEXT: vpor %ymm1, %ymm2, %ymm1 @@ -1913,7 +1913,7 @@ define <16 x i16> @constant_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y) nounwin ; ; AVX512F-LABEL: constant_funnnel_v16i16: ; AVX512F: # %bb.0: -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768] +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = ; AVX512F-NEXT: vpmulhuw %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: vpmullw %ymm2, %ymm0, %ymm2 ; AVX512F-NEXT: vpor %ymm1, %ymm2, %ymm1 @@ -1923,7 +1923,7 @@ define <16 x i16> @constant_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y) nounwin ; ; AVX512VL-LABEL: constant_funnnel_v16i16: ; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768] +; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = ; AVX512VL-NEXT: vpmulhuw %ymm2, %ymm1, %ymm1 ; AVX512VL-NEXT: vpmullw %ymm2, %ymm0, %ymm2 ; AVX512VL-NEXT: vpor %ymm1, %ymm2, %ymm1 @@ -1935,9 +1935,9 @@ define <16 x i16> @constant_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y) nounwin ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 ; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 -; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1] +; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = ; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm1, %zmm1 -; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] +; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = ; AVX512BW-NEXT: vpsllvw %zmm2, %zmm0, %zmm2 ; AVX512BW-NEXT: vpor %ymm1, %ymm2, %ymm1 ; AVX512BW-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15] @@ -1948,9 +1948,9 @@ define <16 x i16> @constant_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y) nounwin ; AVX512VBMI2: # %bb.0: ; AVX512VBMI2-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 ; AVX512VBMI2-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 -; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} ymm2 = [16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1] +; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} ymm2 = ; AVX512VBMI2-NEXT: vpsrlvw %zmm2, %zmm1, %zmm1 -; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] +; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} ymm2 = ; AVX512VBMI2-NEXT: vpsllvw %zmm2, %zmm0, %zmm2 ; AVX512VBMI2-NEXT: vpor %ymm1, %ymm2, %ymm1 ; AVX512VBMI2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15] @@ -1988,7 +1988,7 @@ define <16 x i16> @constant_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y) nounwin ; ; XOPAVX2-LABEL: constant_funnnel_v16i16: ; XOPAVX2: # %bb.0: -; XOPAVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768] +; XOPAVX2-NEXT: vmovdqa {{.*#+}} ymm2 = ; XOPAVX2-NEXT: vpmulhuw %ymm2, %ymm1, %ymm1 ; XOPAVX2-NEXT: vpmullw %ymm2, %ymm0, %ymm2 ; XOPAVX2-NEXT: vpor %ymm1, %ymm2, %ymm1 @@ -2005,11 +2005,11 @@ define <32 x i8> @constant_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y) nounwind { ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15] -; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [1,128,64,32,16,8,4,2] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [256,128,64,32,16,8,4,2] ; AVX1-NEXT: vpmullw %xmm5, %xmm4, %xmm4 ; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero -; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [1,2,4,8,16,32,64,128] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [256,2,4,8,16,32,64,128] ; AVX1-NEXT: vpmullw %xmm6, %xmm2, %xmm2 ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 ; AVX1-NEXT: vpackuswb %xmm4, %xmm2, %xmm2 @@ -2023,19 +2023,21 @@ define <32 x i8> @constant_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y) nounwind { ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] -; AVX1-NEXT: vpmullw %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] -; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = +; AVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255] +; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero +; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = ; AVX1-NEXT: vpmullw %xmm6, %xmm2, %xmm2 -; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2 ; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; AVX1-NEXT: vpmullw %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3 -; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; AVX1-NEXT: vpmullw %xmm6, %xmm5, %xmm5 -; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm4 +; AVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX1-NEXT: vpmullw %xmm6, %xmm4, %xmm4 +; AVX1-NEXT: vpand %xmm5, %xmm4, %xmm4 ; AVX1-NEXT: vpackuswb %xmm3, %xmm4, %xmm3 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2 ; AVX1-NEXT: vorps %ymm1, %ymm2, %ymm1 @@ -2182,12 +2184,12 @@ define <32 x i8> @constant_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y) nounwind { ; XOPAVX1-LABEL: constant_funnnel_v32i8: ; XOPAVX1: # %bb.0: ; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [248,249,250,251,252,253,254,255,248,255,254,253,252,251,250,249] +; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm3 = ; XOPAVX1-NEXT: vpshlb %xmm3, %xmm2, %xmm2 ; XOPAVX1-NEXT: vpshlb %xmm3, %xmm1, %xmm1 ; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 ; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 -; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,6,7,0,7,6,5,4,3,2,1] +; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm3 = ; XOPAVX1-NEXT: vpshlb %xmm3, %xmm2, %xmm2 ; XOPAVX1-NEXT: vpshlb %xmm3, %xmm0, %xmm3 ; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2 @@ -2199,12 +2201,12 @@ define <32 x i8> @constant_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y) nounwind { ; XOPAVX2-LABEL: constant_funnnel_v32i8: ; XOPAVX2: # %bb.0: ; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 -; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [248,249,250,251,252,253,254,255,248,255,254,253,252,251,250,249] +; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm3 = ; XOPAVX2-NEXT: vpshlb %xmm3, %xmm2, %xmm2 ; XOPAVX2-NEXT: vpshlb %xmm3, %xmm1, %xmm1 ; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 ; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 -; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,6,7,0,7,6,5,4,3,2,1] +; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm3 = ; XOPAVX2-NEXT: vpshlb %xmm3, %xmm2, %xmm2 ; XOPAVX2-NEXT: vpshlb %xmm3, %xmm0, %xmm3 ; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2 diff --git a/llvm/test/CodeGen/X86/vector-fshl-512.ll b/llvm/test/CodeGen/X86/vector-fshl-512.ll index 7e01a59..0e07ddb 100644 --- a/llvm/test/CodeGen/X86/vector-fshl-512.ll +++ b/llvm/test/CodeGen/X86/vector-fshl-512.ll @@ -1044,7 +1044,7 @@ define <16 x i32> @constant_funnnel_v16i32(<16 x i32> %x, <16 x i32> %y) nounwin define <32 x i16> @constant_funnnel_v32i16(<32 x i16> %x, <32 x i16> %y) nounwind { ; AVX512F-LABEL: constant_funnnel_v32i16: ; AVX512F: # %bb.0: -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768] +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = ; AVX512F-NEXT: vpmulhuw %ymm4, %ymm2, %ymm2 ; AVX512F-NEXT: vpmullw %ymm4, %ymm0, %ymm5 ; AVX512F-NEXT: vpor %ymm2, %ymm5, %ymm2 @@ -1059,7 +1059,7 @@ define <32 x i16> @constant_funnnel_v32i16(<32 x i16> %x, <32 x i16> %y) nounwin ; ; AVX512VL-LABEL: constant_funnnel_v32i16: ; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768] +; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = ; AVX512VL-NEXT: vpmulhuw %ymm4, %ymm2, %ymm2 ; AVX512VL-NEXT: vpmullw %ymm4, %ymm0, %ymm5 ; AVX512VL-NEXT: vpor %ymm2, %ymm5, %ymm2 @@ -1126,12 +1126,12 @@ define <64 x i8> @constant_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y) nounwind { ; AVX512F-NEXT: vpblendvb %ymm10, %ymm7, %ymm4, %ymm4 ; AVX512F-NEXT: vpxor %xmm7, %xmm7, %xmm7 ; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm11 = ymm2[8],ymm7[8],ymm2[9],ymm7[9],ymm2[10],ymm7[10],ymm2[11],ymm7[11],ymm2[12],ymm7[12],ymm2[13],ymm7[13],ymm2[14],ymm7[14],ymm2[15],ymm7[15],ymm2[24],ymm7[24],ymm2[25],ymm7[25],ymm2[26],ymm7[26],ymm2[27],ymm7[27],ymm2[28],ymm7[28],ymm2[29],ymm7[29],ymm2[30],ymm7[30],ymm2[31],ymm7[31] -; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm12 = [1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2] +; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm12 = [256,128,64,32,16,8,4,2,256,128,64,32,16,8,4,2] ; AVX512F-NEXT: # ymm12 = mem[0,1,0,1] ; AVX512F-NEXT: vpmullw %ymm12, %ymm11, %ymm11 ; AVX512F-NEXT: vpsrlw $8, %ymm11, %ymm11 ; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm7[0],ymm2[1],ymm7[1],ymm2[2],ymm7[2],ymm2[3],ymm7[3],ymm2[4],ymm7[4],ymm2[5],ymm7[5],ymm2[6],ymm7[6],ymm2[7],ymm7[7],ymm2[16],ymm7[16],ymm2[17],ymm7[17],ymm2[18],ymm7[18],ymm2[19],ymm7[19],ymm2[20],ymm7[20],ymm2[21],ymm7[21],ymm2[22],ymm7[22],ymm2[23],ymm7[23] -; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm13 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128] +; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm13 = [256,2,4,8,16,32,64,128,256,2,4,8,16,32,64,128] ; AVX512F-NEXT: # ymm13 = mem[0,1,0,1] ; AVX512F-NEXT: vpmullw %ymm13, %ymm2, %ymm2 ; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2 @@ -1176,13 +1176,13 @@ define <64 x i8> @constant_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y) nounwind { ; AVX512VL-NEXT: vpblendvb %ymm10, %ymm7, %ymm4, %ymm4 ; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm7 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31] ; AVX512VL-NEXT: vpsrlw $8, %ymm7, %ymm7 -; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2] +; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [256,128,64,32,16,8,4,2,256,128,64,32,16,8,4,2] ; AVX512VL-NEXT: # ymm11 = mem[0,1,0,1] ; AVX512VL-NEXT: vpmullw %ymm11, %ymm7, %ymm7 ; AVX512VL-NEXT: vpsrlw $8, %ymm7, %ymm7 ; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] ; AVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2 -; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm12 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128] +; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm12 = [256,2,4,8,16,32,64,128,256,2,4,8,16,32,64,128] ; AVX512VL-NEXT: # ymm12 = mem[0,1,0,1] ; AVX512VL-NEXT: vpmullw %ymm12, %ymm2, %ymm2 ; AVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2 diff --git a/llvm/test/CodeGen/X86/vector-fshr-128.ll b/llvm/test/CodeGen/X86/vector-fshr-128.ll index 7c31f9c..29035a5 100644 --- a/llvm/test/CodeGen/X86/vector-fshr-128.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-128.ll @@ -2429,17 +2429,15 @@ define <4 x i32> @constant_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind { define <8 x i16> @constant_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind { ; SSE2-LABEL: constant_funnnel_v8i16: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,65535,65535,65535,65535,65535,65535] -; SSE2-NEXT: movdqa %xmm2, %xmm3 -; SSE2-NEXT: pandn %xmm1, %xmm3 -; SSE2-NEXT: movdqa {{.*#+}} xmm4 = -; SSE2-NEXT: pmulhuw %xmm4, %xmm1 -; SSE2-NEXT: pand %xmm2, %xmm1 -; SSE2-NEXT: pmullw %xmm4, %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = +; SSE2-NEXT: movdqa %xmm1, %xmm3 +; SSE2-NEXT: pmulhuw %xmm2, %xmm3 +; SSE2-NEXT: pmullw %xmm2, %xmm0 ; SSE2-NEXT: por %xmm3, %xmm0 -; SSE2-NEXT: por %xmm1, %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,65535,65535,65535,65535,65535,65535] ; SSE2-NEXT: pand %xmm2, %xmm0 -; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: pandn %xmm1, %xmm2 +; SSE2-NEXT: por %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: constant_funnnel_v8i16: @@ -2483,9 +2481,9 @@ define <8 x i16> @constant_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind { ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 -; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7] +; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = ; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm1, %zmm2 -; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm3 = [16,15,14,13,12,11,10,9] +; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm3 = ; AVX512BW-NEXT: vpsllvw %zmm3, %zmm0, %zmm0 ; AVX512BW-NEXT: vpor %xmm2, %xmm0, %xmm0 ; AVX512BW-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5,6,7] @@ -2496,9 +2494,9 @@ define <8 x i16> @constant_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind { ; AVX512VBMI2: # %bb.0: ; AVX512VBMI2-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 ; AVX512VBMI2-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 -; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7] +; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} xmm2 = ; AVX512VBMI2-NEXT: vpsrlvw %zmm2, %zmm1, %zmm2 -; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} xmm3 = [16,15,14,13,12,11,10,9] +; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} xmm3 = ; AVX512VBMI2-NEXT: vpsllvw %zmm3, %zmm0, %zmm0 ; AVX512VBMI2-NEXT: vpor %xmm2, %xmm0, %xmm0 ; AVX512VBMI2-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5,6,7] @@ -2529,17 +2527,15 @@ define <8 x i16> @constant_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind { ; ; X32-SSE-LABEL: constant_funnnel_v8i16: ; X32-SSE: # %bb.0: -; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,65535,65535,65535,65535,65535,65535] -; X32-SSE-NEXT: movdqa %xmm2, %xmm3 -; X32-SSE-NEXT: pandn %xmm1, %xmm3 -; X32-SSE-NEXT: movdqa {{.*#+}} xmm4 = -; X32-SSE-NEXT: pmulhuw %xmm4, %xmm1 -; X32-SSE-NEXT: pand %xmm2, %xmm1 -; X32-SSE-NEXT: pmullw %xmm4, %xmm0 +; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = +; X32-SSE-NEXT: movdqa %xmm1, %xmm3 +; X32-SSE-NEXT: pmulhuw %xmm2, %xmm3 +; X32-SSE-NEXT: pmullw %xmm2, %xmm0 ; X32-SSE-NEXT: por %xmm3, %xmm0 -; X32-SSE-NEXT: por %xmm1, %xmm0 +; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,65535,65535,65535,65535,65535,65535] ; X32-SSE-NEXT: pand %xmm2, %xmm0 -; X32-SSE-NEXT: por %xmm3, %xmm0 +; X32-SSE-NEXT: pandn %xmm1, %xmm2 +; X32-SSE-NEXT: por %xmm2, %xmm0 ; X32-SSE-NEXT: retl %res = call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> ) ret <8 x i16> %res @@ -2551,23 +2547,25 @@ define <16 x i8> @constant_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind { ; SSE2-NEXT: pxor %xmm2, %xmm2 ; SSE2-NEXT: movdqa %xmm1, %xmm3 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15] -; SSE2-NEXT: pmullw {{.*}}(%rip), %xmm3 +; SSE2-NEXT: movdqa {{.*#+}} xmm4 = +; SSE2-NEXT: pmullw %xmm4, %xmm3 ; SSE2-NEXT: psrlw $8, %xmm3 -; SSE2-NEXT: movdqa %xmm1, %xmm4 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7] -; SSE2-NEXT: pmullw {{.*}}(%rip), %xmm4 -; SSE2-NEXT: psrlw $8, %xmm4 -; SSE2-NEXT: packuswb %xmm3, %xmm4 -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] -; SSE2-NEXT: pmullw {{.*}}(%rip), %xmm2 -; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] -; SSE2-NEXT: pand %xmm3, %xmm2 +; SSE2-NEXT: movdqa %xmm1, %xmm5 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3],xmm5[4],xmm2[4],xmm5[5],xmm2[5],xmm5[6],xmm2[6],xmm5[7],xmm2[7] +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = +; SSE2-NEXT: pmullw %xmm2, %xmm5 +; SSE2-NEXT: psrlw $8, %xmm5 +; SSE2-NEXT: packuswb %xmm3, %xmm5 +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] +; SSE2-NEXT: pmullw %xmm4, %xmm3 +; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] +; SSE2-NEXT: pand %xmm4, %xmm3 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-NEXT: pmullw {{.*}}(%rip), %xmm0 -; SSE2-NEXT: pand %xmm3, %xmm0 -; SSE2-NEXT: packuswb %xmm2, %xmm0 -; SSE2-NEXT: por %xmm4, %xmm0 +; SSE2-NEXT: pmullw %xmm2, %xmm0 +; SSE2-NEXT: pand %xmm4, %xmm0 +; SSE2-NEXT: packuswb %xmm3, %xmm0 +; SSE2-NEXT: por %xmm5, %xmm0 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255] ; SSE2-NEXT: pand %xmm2, %xmm0 ; SSE2-NEXT: pandn %xmm1, %xmm2 @@ -2576,47 +2574,51 @@ define <16 x i8> @constant_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind { ; ; SSE41-LABEL: constant_funnnel_v16i8: ; SSE41: # %bb.0: -; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm0 -; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] -; SSE41-NEXT: pand %xmm3, %xmm0 -; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm2 -; SSE41-NEXT: pand %xmm3, %xmm2 -; SSE41-NEXT: packuswb %xmm0, %xmm2 -; SSE41-NEXT: pxor %xmm0, %xmm0 +; SSE41-NEXT: pxor %xmm2, %xmm2 ; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] -; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm3 +; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15] +; SSE41-NEXT: movdqa {{.*#+}} xmm2 = +; SSE41-NEXT: pmullw %xmm2, %xmm3 ; SSE41-NEXT: psrlw $8, %xmm3 ; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero -; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm4 +; SSE41-NEXT: movdqa {{.*#+}} xmm5 = +; SSE41-NEXT: pmullw %xmm5, %xmm4 ; SSE41-NEXT: psrlw $8, %xmm4 ; SSE41-NEXT: packuswb %xmm3, %xmm4 -; SSE41-NEXT: por %xmm2, %xmm4 +; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE41-NEXT: pmullw %xmm2, %xmm0 +; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE41-NEXT: pand %xmm2, %xmm0 +; SSE41-NEXT: pmullw %xmm5, %xmm3 +; SSE41-NEXT: pand %xmm2, %xmm3 +; SSE41-NEXT: packuswb %xmm0, %xmm3 +; SSE41-NEXT: por %xmm4, %xmm3 ; SSE41-NEXT: movaps {{.*#+}} xmm0 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255] -; SSE41-NEXT: pblendvb %xmm0, %xmm4, %xmm1 +; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: retq ; ; AVX1-LABEL: constant_funnnel_v16i8: ; AVX1: # %bb.0: -; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm2, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] -; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0 -; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15] -; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm2, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = +; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 -; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero -; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm3, %xmm3 -; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 -; AVX1-NEXT: vpackuswb %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = +; AVX1-NEXT: vpmullw %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 +; AVX1-NEXT: vpackuswb %xmm2, %xmm4, %xmm2 +; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; AVX1-NEXT: vpmullw %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] +; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX1-NEXT: vpmullw %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vpackuswb %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255] ; AVX1-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0 @@ -2668,10 +2670,10 @@ define <16 x i8> @constant_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind { ; ; AVX512BW-LABEL: constant_funnnel_v16i8: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,0,7,6,5,4,3,2,1] +; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero ; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm3, %zmm2 -; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm3 = [8,7,6,5,4,3,2,1,8,1,2,3,4,5,6,7] +; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm3 = ; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero ; AVX512BW-NEXT: vpsllvw %zmm3, %zmm0, %zmm0 ; AVX512BW-NEXT: vpor %ymm2, %ymm0, %ymm0 @@ -2683,10 +2685,10 @@ define <16 x i8> @constant_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind { ; ; AVX512VBMI2-LABEL: constant_funnnel_v16i8: ; AVX512VBMI2: # %bb.0: -; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,0,7,6,5,4,3,2,1] +; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} ymm2 = ; AVX512VBMI2-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero ; AVX512VBMI2-NEXT: vpsrlvw %zmm2, %zmm3, %zmm2 -; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} ymm3 = [8,7,6,5,4,3,2,1,8,1,2,3,4,5,6,7] +; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} ymm3 = ; AVX512VBMI2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero ; AVX512VBMI2-NEXT: vpsllvw %zmm3, %zmm0, %zmm0 ; AVX512VBMI2-NEXT: vpor %ymm2, %ymm0, %ymm0 @@ -2738,23 +2740,25 @@ define <16 x i8> @constant_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind { ; X32-SSE-NEXT: pxor %xmm2, %xmm2 ; X32-SSE-NEXT: movdqa %xmm1, %xmm3 ; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15] -; X32-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm3 +; X32-SSE-NEXT: movdqa {{.*#+}} xmm4 = +; X32-SSE-NEXT: pmullw %xmm4, %xmm3 ; X32-SSE-NEXT: psrlw $8, %xmm3 -; X32-SSE-NEXT: movdqa %xmm1, %xmm4 -; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7] -; X32-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm4 -; X32-SSE-NEXT: psrlw $8, %xmm4 -; X32-SSE-NEXT: packuswb %xmm3, %xmm4 -; X32-SSE-NEXT: movdqa %xmm0, %xmm2 -; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15] -; X32-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm2 -; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] -; X32-SSE-NEXT: pand %xmm3, %xmm2 +; X32-SSE-NEXT: movdqa %xmm1, %xmm5 +; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3],xmm5[4],xmm2[4],xmm5[5],xmm2[5],xmm5[6],xmm2[6],xmm5[7],xmm2[7] +; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = +; X32-SSE-NEXT: pmullw %xmm2, %xmm5 +; X32-SSE-NEXT: psrlw $8, %xmm5 +; X32-SSE-NEXT: packuswb %xmm3, %xmm5 +; X32-SSE-NEXT: movdqa %xmm0, %xmm3 +; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15] +; X32-SSE-NEXT: pmullw %xmm4, %xmm3 +; X32-SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] +; X32-SSE-NEXT: pand %xmm4, %xmm3 ; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X32-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0 -; X32-SSE-NEXT: pand %xmm3, %xmm0 -; X32-SSE-NEXT: packuswb %xmm2, %xmm0 -; X32-SSE-NEXT: por %xmm4, %xmm0 +; X32-SSE-NEXT: pmullw %xmm2, %xmm0 +; X32-SSE-NEXT: pand %xmm4, %xmm0 +; X32-SSE-NEXT: packuswb %xmm3, %xmm0 +; X32-SSE-NEXT: por %xmm5, %xmm0 ; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255] ; X32-SSE-NEXT: pand %xmm2, %xmm0 ; X32-SSE-NEXT: pandn %xmm1, %xmm2 diff --git a/llvm/test/CodeGen/X86/vector-fshr-256.ll b/llvm/test/CodeGen/X86/vector-fshr-256.ll index c3e58ae..0f67bbe 100644 --- a/llvm/test/CodeGen/X86/vector-fshr-256.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-256.ll @@ -1887,17 +1887,16 @@ define <8 x i32> @constant_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y) nounwind { define <16 x i16> @constant_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y) nounwind { ; AVX1-LABEL: constant_funnnel_v16i16: ; AVX1: # %bb.0: -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [256,128,64,32,16,8,4,2] -; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpmulhuw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = +; AVX1-NEXT: vpmulhuw %xmm4, %xmm1, %xmm5 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm5, %ymm2 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 +; AVX1-NEXT: vpmullw %xmm3, %xmm5, %xmm3 ; AVX1-NEXT: vpmullw %xmm4, %xmm0, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX1-NEXT: vpmulhuw %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vpmulhuw %xmm4, %xmm1, %xmm3 -; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3,4,5,6,7] -; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 ; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535] ; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0 @@ -1909,8 +1908,6 @@ define <16 x i16> @constant_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y) nounwin ; AVX2: # %bb.0: ; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = ; AVX2-NEXT: vpmulhuw %ymm2, %ymm1, %ymm3 -; AVX2-NEXT: vpblendw {{.*#+}} ymm4 = ymm1[0],ymm3[1,2,3,4,5,6,7],ymm1[8],ymm3[9,10,11,12,13,14,15] -; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7] ; AVX2-NEXT: vpmullw %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vpor %ymm3, %ymm0, %ymm0 ; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm0[1,2,3,4,5,6,7],ymm1[8],ymm0[9,10,11,12,13,14,15] @@ -1921,8 +1918,6 @@ define <16 x i16> @constant_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y) nounwin ; AVX512F: # %bb.0: ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = ; AVX512F-NEXT: vpmulhuw %ymm2, %ymm1, %ymm3 -; AVX512F-NEXT: vpblendw {{.*#+}} ymm4 = ymm1[0],ymm3[1,2,3,4,5,6,7],ymm1[8],ymm3[9,10,11,12,13,14,15] -; AVX512F-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7] ; AVX512F-NEXT: vpmullw %ymm2, %ymm0, %ymm0 ; AVX512F-NEXT: vpor %ymm3, %ymm0, %ymm0 ; AVX512F-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm0[1,2,3,4,5,6,7],ymm1[8],ymm0[9,10,11,12,13,14,15] @@ -1933,8 +1928,6 @@ define <16 x i16> @constant_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y) nounwin ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = ; AVX512VL-NEXT: vpmulhuw %ymm2, %ymm1, %ymm3 -; AVX512VL-NEXT: vpblendw {{.*#+}} ymm4 = ymm1[0],ymm3[1,2,3,4,5,6,7],ymm1[8],ymm3[9,10,11,12,13,14,15] -; AVX512VL-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7] ; AVX512VL-NEXT: vpmullw %ymm2, %ymm0, %ymm0 ; AVX512VL-NEXT: vpor %ymm3, %ymm0, %ymm0 ; AVX512VL-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm0[1,2,3,4,5,6,7],ymm1[8],ymm0[9,10,11,12,13,14,15] @@ -1945,9 +1938,9 @@ define <16 x i16> @constant_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y) nounwin ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 ; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 -; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] +; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = ; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm1, %zmm2 -; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm3 = [16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1] +; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm3 = ; AVX512BW-NEXT: vpsllvw %zmm3, %zmm0, %zmm0 ; AVX512BW-NEXT: vpor %ymm2, %ymm0, %ymm0 ; AVX512BW-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm0[1,2,3,4,5,6,7],ymm1[8],ymm0[9,10,11,12,13,14,15] @@ -1958,9 +1951,9 @@ define <16 x i16> @constant_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y) nounwin ; AVX512VBMI2: # %bb.0: ; AVX512VBMI2-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 ; AVX512VBMI2-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 -; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] +; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} ymm2 = ; AVX512VBMI2-NEXT: vpsrlvw %zmm2, %zmm1, %zmm2 -; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} ymm3 = [16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1] +; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} ymm3 = ; AVX512VBMI2-NEXT: vpsllvw %zmm3, %zmm0, %zmm0 ; AVX512VBMI2-NEXT: vpor %ymm2, %ymm0, %ymm0 ; AVX512VBMI2-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm0[1,2,3,4,5,6,7],ymm1[8],ymm0[9,10,11,12,13,14,15] @@ -2000,8 +1993,6 @@ define <16 x i16> @constant_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y) nounwin ; XOPAVX2: # %bb.0: ; XOPAVX2-NEXT: vmovdqa {{.*#+}} ymm2 = ; XOPAVX2-NEXT: vpmulhuw %ymm2, %ymm1, %ymm3 -; XOPAVX2-NEXT: vpblendw {{.*#+}} ymm4 = ymm1[0],ymm3[1,2,3,4,5,6,7],ymm1[8],ymm3[9,10,11,12,13,14,15] -; XOPAVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7] ; XOPAVX2-NEXT: vpmullw %ymm2, %ymm0, %ymm0 ; XOPAVX2-NEXT: vpor %ymm3, %ymm0, %ymm0 ; XOPAVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm0[1,2,3,4,5,6,7],ymm1[8],ymm0[9,10,11,12,13,14,15] @@ -2063,7 +2054,7 @@ define <32 x i8> @constant_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y) nounwind { ; AVX2: # %bb.0: ; AVX2-NEXT: vpsllw $4, %ymm0, %ymm2 ; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 -; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536] +; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536] ; AVX2-NEXT: # ymm3 = mem[0,1,0,1] ; AVX2-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vpsllw $2, %ymm0, %ymm2 @@ -2090,7 +2081,7 @@ define <32 x i8> @constant_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y) nounwind { ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm2 ; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 -; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536] +; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536] ; AVX512F-NEXT: # ymm3 = mem[0,1,0,1] ; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm0 ; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm2 @@ -2117,7 +2108,7 @@ define <32 x i8> @constant_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y) nounwind { ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm2 ; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 -; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536] +; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536] ; AVX512VL-NEXT: # ymm3 = mem[0,1,0,1] ; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm0, %ymm0 ; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm2 @@ -2194,12 +2185,12 @@ define <32 x i8> @constant_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y) nounwind { ; XOPAVX1-LABEL: constant_funnnel_v32i8: ; XOPAVX1: # %bb.0: ; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,255,254,253,252,251,250,249,0,249,250,251,252,253,254,255] +; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm3 = ; XOPAVX1-NEXT: vpshlb %xmm3, %xmm2, %xmm2 ; XOPAVX1-NEXT: vpshlb %xmm3, %xmm1, %xmm3 ; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2 ; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [8,7,6,5,4,3,2,1,8,1,2,3,4,5,6,7] +; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm4 = ; XOPAVX1-NEXT: vpshlb %xmm4, %xmm3, %xmm3 ; XOPAVX1-NEXT: vpshlb %xmm4, %xmm0, %xmm0 ; XOPAVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 @@ -2211,12 +2202,12 @@ define <32 x i8> @constant_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y) nounwind { ; XOPAVX2-LABEL: constant_funnnel_v32i8: ; XOPAVX2: # %bb.0: ; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 -; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [0,255,254,253,252,251,250,249,0,249,250,251,252,253,254,255] +; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm3 = ; XOPAVX2-NEXT: vpshlb %xmm3, %xmm2, %xmm2 ; XOPAVX2-NEXT: vpshlb %xmm3, %xmm1, %xmm3 ; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2 ; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm3 -; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm4 = [8,7,6,5,4,3,2,1,8,1,2,3,4,5,6,7] +; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm4 = ; XOPAVX2-NEXT: vpshlb %xmm4, %xmm3, %xmm3 ; XOPAVX2-NEXT: vpshlb %xmm4, %xmm0, %xmm0 ; XOPAVX2-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm0 diff --git a/llvm/test/CodeGen/X86/vector-fshr-512.ll b/llvm/test/CodeGen/X86/vector-fshr-512.ll index f2b31d4..26dbc92 100644 --- a/llvm/test/CodeGen/X86/vector-fshr-512.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-512.ll @@ -1034,15 +1034,11 @@ define <32 x i16> @constant_funnnel_v32i16(<32 x i16> %x, <32 x i16> %y) nounwin ; AVX512F: # %bb.0: ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = ; AVX512F-NEXT: vpmulhuw %ymm4, %ymm2, %ymm5 -; AVX512F-NEXT: vpblendw {{.*#+}} ymm6 = ymm2[0],ymm5[1,2,3,4,5,6,7],ymm2[8],ymm5[9,10,11,12,13,14,15] -; AVX512F-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7] ; AVX512F-NEXT: vpmullw %ymm4, %ymm0, %ymm0 ; AVX512F-NEXT: vpor %ymm5, %ymm0, %ymm0 ; AVX512F-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm0[1,2,3,4,5,6,7],ymm2[8],ymm0[9,10,11,12,13,14,15] ; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] ; AVX512F-NEXT: vpmulhuw %ymm4, %ymm3, %ymm2 -; AVX512F-NEXT: vpblendw {{.*#+}} ymm5 = ymm3[0],ymm2[1,2,3,4,5,6,7],ymm3[8],ymm2[9,10,11,12,13,14,15] -; AVX512F-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm2[4,5,6,7] ; AVX512F-NEXT: vpmullw %ymm4, %ymm1, %ymm1 ; AVX512F-NEXT: vpor %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0],ymm1[1,2,3,4,5,6,7],ymm3[8],ymm1[9,10,11,12,13,14,15] @@ -1053,15 +1049,11 @@ define <32 x i16> @constant_funnnel_v32i16(<32 x i16> %x, <32 x i16> %y) nounwin ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = ; AVX512VL-NEXT: vpmulhuw %ymm4, %ymm2, %ymm5 -; AVX512VL-NEXT: vpblendw {{.*#+}} ymm6 = ymm2[0],ymm5[1,2,3,4,5,6,7],ymm2[8],ymm5[9,10,11,12,13,14,15] -; AVX512VL-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7] ; AVX512VL-NEXT: vpmullw %ymm4, %ymm0, %ymm0 ; AVX512VL-NEXT: vpor %ymm5, %ymm0, %ymm0 ; AVX512VL-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm0[1,2,3,4,5,6,7],ymm2[8],ymm0[9,10,11,12,13,14,15] ; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] ; AVX512VL-NEXT: vpmulhuw %ymm4, %ymm3, %ymm2 -; AVX512VL-NEXT: vpblendw {{.*#+}} ymm5 = ymm3[0],ymm2[1,2,3,4,5,6,7],ymm3[8],ymm2[9,10,11,12,13,14,15] -; AVX512VL-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm2[4,5,6,7] ; AVX512VL-NEXT: vpmullw %ymm4, %ymm1, %ymm1 ; AVX512VL-NEXT: vpor %ymm2, %ymm1, %ymm1 ; AVX512VL-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0],ymm1[1,2,3,4,5,6,7],ymm3[8],ymm1[9,10,11,12,13,14,15] @@ -1109,7 +1101,7 @@ define <64 x i8> @constant_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y) nounwind { ; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm4 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] ; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4 -; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536] +; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536] ; AVX512F-NEXT: # ymm6 = mem[0,1,0,1] ; AVX512F-NEXT: vpblendvb %ymm6, %ymm4, %ymm0, %ymm0 ; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm4 @@ -1159,7 +1151,7 @@ define <64 x i8> @constant_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y) nounwind { ; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm4 ; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] ; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4 -; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536] +; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536] ; AVX512VL-NEXT: # ymm6 = mem[0,1,0,1] ; AVX512VL-NEXT: vpblendvb %ymm6, %ymm4, %ymm0, %ymm0 ; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm4 @@ -1209,7 +1201,7 @@ define <64 x i8> @constant_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y) nounwind { ; ; AVX512BW-LABEL: constant_funnnel_v64i8: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm2 = [57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536] +; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm2 = [57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536] ; AVX512BW-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] ; AVX512BW-NEXT: vpmovb2m %zmm2, %k1 ; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm3 @@ -1240,7 +1232,7 @@ define <64 x i8> @constant_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y) nounwind { ; ; AVX512VBMI2-LABEL: constant_funnnel_v64i8: ; AVX512VBMI2: # %bb.0: -; AVX512VBMI2-NEXT: vbroadcasti32x4 {{.*#+}} zmm2 = [57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536] +; AVX512VBMI2-NEXT: vbroadcasti32x4 {{.*#+}} zmm2 = [57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536] ; AVX512VBMI2-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] ; AVX512VBMI2-NEXT: vpmovb2m %zmm2, %k1 ; AVX512VBMI2-NEXT: vpsllw $4, %zmm0, %zmm3 @@ -1271,7 +1263,7 @@ define <64 x i8> @constant_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y) nounwind { ; ; AVX512VLBW-LABEL: constant_funnnel_v64i8: ; AVX512VLBW: # %bb.0: -; AVX512VLBW-NEXT: vbroadcasti32x4 {{.*#+}} zmm2 = [57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536] +; AVX512VLBW-NEXT: vbroadcasti32x4 {{.*#+}} zmm2 = [57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536] ; AVX512VLBW-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] ; AVX512VLBW-NEXT: vpmovb2m %zmm2, %k1 ; AVX512VLBW-NEXT: vpsllw $4, %zmm0, %zmm3 @@ -1302,7 +1294,7 @@ define <64 x i8> @constant_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y) nounwind { ; ; AVX512VLVBMI2-LABEL: constant_funnnel_v64i8: ; AVX512VLVBMI2: # %bb.0: -; AVX512VLVBMI2-NEXT: vbroadcasti32x4 {{.*#+}} zmm2 = [57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536,57600,41152,24704,8256,8448,24640,41088,57536] +; AVX512VLVBMI2-NEXT: vbroadcasti32x4 {{.*#+}} zmm2 = [57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536,57344,41152,24704,8256,8192,24640,41088,57536] ; AVX512VLVBMI2-NEXT: # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] ; AVX512VLVBMI2-NEXT: vpmovb2m %zmm2, %k1 ; AVX512VLVBMI2-NEXT: vpsllw $4, %zmm0, %zmm3 -- 2.7.4