From 1e4d8709992da36e46dca5b214a6810a0380948e Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Thu, 1 Dec 2016 15:41:40 +0000 Subject: [PATCH] [X86][SSE] Add support for combining AND bitmasks to shuffles. llvm-svn: 288365 --- llvm/lib/Target/X86/X86ISelLowering.cpp | 11 +++ .../CodeGen/X86/clear_upper_vector_element_bits.ll | 18 +++- llvm/test/CodeGen/X86/known-bits.ll | 6 +- llvm/test/CodeGen/X86/vec_int_to_fp.ll | 9 +- llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll | 102 ++++++++++++++------- .../CodeGen/X86/vector-shuffle-combining-avx2.ll | 6 +- .../CodeGen/X86/vector-shuffle-combining-ssse3.ll | 3 +- 7 files changed, 102 insertions(+), 53 deletions(-) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 661d281..85766c9 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -29889,6 +29889,17 @@ static SDValue combineAnd(SDNode *N, SelectionDAG &DAG, SDValue N1 = N->getOperand(1); SDLoc DL(N); + // Attempt to recursively combine a bitmask AND with shuffles. + if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) { + SDValue Op(N, 0); + SmallVector NonceMask; // Just a placeholder. + NonceMask.push_back(0); + if (combineX86ShufflesRecursively({Op}, 0, Op, NonceMask, + /*Depth*/ 1, /*HasPSHUFB*/ false, DAG, + DCI, Subtarget)) + return SDValue(); // This routine will use CombineTo to replace N. + } + // Create BEXTR instructions // BEXTR is ((X >> imm) & (2**size-1)) if (VT != MVT::i32 && VT != MVT::i64) diff --git a/llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll b/llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll index c50654a..150061a 100644 --- a/llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll +++ b/llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll @@ -13,10 +13,17 @@ define <2 x i64> @_clearupper2xi64a(<2 x i64>) nounwind { ; SSE-NEXT: andps {{.*}}(%rip), %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: _clearupper2xi64a: -; AVX: # BB#0: -; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: _clearupper2xi64a: +; AVX1: # BB#0: +; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; AVX1-NEXT: retq +; +; AVX2-LABEL: _clearupper2xi64a: +; AVX2: # BB#0: +; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] +; AVX2-NEXT: retq %x0 = extractelement <2 x i64> %0, i32 0 %x1 = extractelement <2 x i64> %0, i32 1 %trunc0 = trunc i64 %x0 to i32 @@ -36,7 +43,8 @@ define <4 x i32> @_clearupper4xi32a(<4 x i32>) nounwind { ; ; AVX1-LABEL: _clearupper4xi32a: ; AVX1: # BB#0: -; AVX1-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] ; AVX1-NEXT: retq ; ; AVX2-LABEL: _clearupper4xi32a: diff --git a/llvm/test/CodeGen/X86/known-bits.ll b/llvm/test/CodeGen/X86/known-bits.ll index 9207c5d6..46451f2 100644 --- a/llvm/test/CodeGen/X86/known-bits.ll +++ b/llvm/test/CodeGen/X86/known-bits.ll @@ -16,8 +16,7 @@ define void @knownbits_zext_in_reg(i8*) nounwind { ; X32-NEXT: shrl $14, %eax ; X32-NEXT: movzbl %al, %eax ; X32-NEXT: vmovd %eax, %xmm0 -; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,1,1] -; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X32-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; X32-NEXT: vpextrd $1, %xmm0, %ebp ; X32-NEXT: xorl %ecx, %ecx ; X32-NEXT: vmovd %xmm0, %esi @@ -55,8 +54,7 @@ define void @knownbits_zext_in_reg(i8*) nounwind { ; X64-NEXT: shrl $14, %eax ; X64-NEXT: movzbl %al, %eax ; X64-NEXT: vmovd %eax, %xmm0 -; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,1,1] -; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; X64-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; X64-NEXT: vpextrd $1, %xmm0, %r8d ; X64-NEXT: xorl %esi, %esi ; X64-NEXT: vmovd %xmm0, %r9d diff --git a/llvm/test/CodeGen/X86/vec_int_to_fp.ll b/llvm/test/CodeGen/X86/vec_int_to_fp.ll index 0ac2218..9517454 100644 --- a/llvm/test/CodeGen/X86/vec_int_to_fp.ll +++ b/llvm/test/CodeGen/X86/vec_int_to_fp.ll @@ -573,7 +573,8 @@ define <2 x double> @uitofp_4i32_to_2f64(<4 x i32> %a) { ; ; AVX1-LABEL: uitofp_4i32_to_2f64: ; AVX1: # BB#0: -; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm1 +; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] ; AVX1-NEXT: vcvtdq2pd %xmm1, %ymm1 ; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0 ; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0 @@ -873,7 +874,8 @@ define <4 x double> @uitofp_4i32_to_4f64(<4 x i32> %a) { ; ; AVX1-LABEL: uitofp_4i32_to_4f64: ; AVX1: # BB#0: -; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm1 +; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] ; AVX1-NEXT: vcvtdq2pd %xmm1, %ymm1 ; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0 ; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0 @@ -3256,7 +3258,8 @@ define <4 x double> @uitofp_load_4i32_to_4f64(<4 x i32> *%a) { ; AVX1-LABEL: uitofp_load_4i32_to_4f64: ; AVX1: # BB#0: ; AVX1-NEXT: vmovdqa (%rdi), %xmm0 -; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm1 +; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] ; AVX1-NEXT: vcvtdq2pd %xmm1, %ymm1 ; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0 ; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0 diff --git a/llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll b/llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll index 8f0e8b2..78b799c 100644 --- a/llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll +++ b/llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll @@ -11,11 +11,11 @@ ; RUN: llc < %s -mtriple=x86_64 -enable-unsafe-fp-math -mattr=+avx512vl \ ; RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL -; CST: [[MASKCSTADDR:.LCPI[0-9_]+]]: -; CST-NEXT: .long 65535 # 0xffff -; CST-NEXT: .long 65535 # 0xffff -; CST-NEXT: .long 65535 # 0xffff -; CST-NEXT: .long 65535 # 0xffff +; SSE2: [[MASKCSTADDR:.LCPI[0-9_]+]]: +; SSE2-NEXT: .long 65535 # 0xffff +; SSE2-NEXT: .long 65535 # 0xffff +; SSE2-NEXT: .long 65535 # 0xffff +; SSE2-NEXT: .long 65535 # 0xffff ; CST: [[FPMASKCSTADDR:.LCPI[0-9_]+]]: ; CST-NEXT: .long 1199570944 # float 65536 @@ -30,20 +30,32 @@ ; AVX2-NEXT: .long 65535 # 0xffff define <4 x float> @test_uitofp_v4i32_to_v4f32(<4 x i32> %arg) { -; SSE-LABEL: test_uitofp_v4i32_to_v4f32: -; SSE: # BB#0: -; SSE-NEXT: movaps {{.*#+}} xmm1 = [65535,65535,65535,65535] -; SSE-NEXT: andps %xmm0, %xmm1 -; SSE-NEXT: cvtdq2ps %xmm1, %xmm1 -; SSE-NEXT: psrld $16, %xmm0 -; SSE-NEXT: cvtdq2ps %xmm0, %xmm0 -; SSE-NEXT: mulps [[FPMASKCSTADDR]](%rip), %xmm0 -; SSE-NEXT: addps %xmm1, %xmm0 -; SSE-NEXT: retq +; SSE2-LABEL: test_uitofp_v4i32_to_v4f32: +; SSE2: # BB#0: +; SSE2-NEXT: movaps {{.*#+}} xmm1 = [65535,65535,65535,65535] +; SSE2-NEXT: andps %xmm0, %xmm1 +; SSE2-NEXT: cvtdq2ps %xmm1, %xmm1 +; SSE2-NEXT: psrld $16, %xmm0 +; SSE2-NEXT: cvtdq2ps %xmm0, %xmm0 +; SSE2-NEXT: mulps [[FPMASKCSTADDR]](%rip), %xmm0 +; SSE2-NEXT: addps %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; SSE41-LABEL: test_uitofp_v4i32_to_v4f32: +; SSE41: # BB#0: +; SSE41-NEXT: pxor %xmm1, %xmm1 +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] +; SSE41-NEXT: cvtdq2ps %xmm1, %xmm1 +; SSE41-NEXT: psrld $16, %xmm0 +; SSE41-NEXT: cvtdq2ps %xmm0, %xmm0 +; SSE41-NEXT: mulps [[FPMASKCSTADDR]](%rip), %xmm0 +; SSE41-NEXT: addps %xmm1, %xmm0 +; SSE41-NEXT: retq ; ; AVX-LABEL: test_uitofp_v4i32_to_v4f32: ; AVX: # BB#0: -; AVX-NEXT: vandps [[MASKCSTADDR]](%rip), %xmm0, %xmm1 +; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] ; AVX-NEXT: vcvtdq2ps %xmm1, %xmm1 ; AVX-NEXT: vpsrld $16, %xmm0, %xmm0 ; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0 @@ -97,25 +109,45 @@ define <4 x float> @test_uitofp_v4i32_to_v4f32(<4 x i32> %arg) { ; AVX2-NEXT: .long 65535 # 0xffff define <8 x float> @test_uitofp_v8i32_to_v8f32(<8 x i32> %arg) { -; SSE-LABEL: test_uitofp_v8i32_to_v8f32: -; SSE: # BB#0: -; SSE-NEXT: movdqa %xmm0, %xmm2 -; SSE-NEXT: psrld $16, %xmm2 -; SSE-NEXT: cvtdq2ps %xmm2, %xmm2 -; SSE-NEXT: movaps {{.*#+}} xmm3 = [6.553600e+04,6.553600e+04,6.553600e+04,6.553600e+04] -; SSE-NEXT: mulps %xmm3, %xmm2 -; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,65535] -; SSE-NEXT: pand %xmm4, %xmm0 -; SSE-NEXT: cvtdq2ps %xmm0, %xmm0 -; SSE-NEXT: addps %xmm2, %xmm0 -; SSE-NEXT: movdqa %xmm1, %xmm2 -; SSE-NEXT: psrld $16, %xmm2 -; SSE-NEXT: cvtdq2ps %xmm2, %xmm2 -; SSE-NEXT: mulps %xmm3, %xmm2 -; SSE-NEXT: pand %xmm4, %xmm1 -; SSE-NEXT: cvtdq2ps %xmm1, %xmm1 -; SSE-NEXT: addps %xmm2, %xmm1 -; SSE-NEXT: retq +; SSE2-LABEL: test_uitofp_v8i32_to_v8f32: +; SSE2: # BB#0: +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: psrld $16, %xmm2 +; SSE2-NEXT: cvtdq2ps %xmm2, %xmm2 +; SSE2-NEXT: movaps {{.*#+}} xmm3 = [6.553600e+04,6.553600e+04,6.553600e+04,6.553600e+04] +; SSE2-NEXT: mulps %xmm3, %xmm2 +; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,65535] +; SSE2-NEXT: pand %xmm4, %xmm0 +; SSE2-NEXT: cvtdq2ps %xmm0, %xmm0 +; SSE2-NEXT: addps %xmm2, %xmm0 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: psrld $16, %xmm2 +; SSE2-NEXT: cvtdq2ps %xmm2, %xmm2 +; SSE2-NEXT: mulps %xmm3, %xmm2 +; SSE2-NEXT: pand %xmm4, %xmm1 +; SSE2-NEXT: cvtdq2ps %xmm1, %xmm1 +; SSE2-NEXT: addps %xmm2, %xmm1 +; SSE2-NEXT: retq +; +; SSE41-LABEL: test_uitofp_v8i32_to_v8f32: +; SSE41: # BB#0: +; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: psrld $16, %xmm2 +; SSE41-NEXT: cvtdq2ps %xmm2, %xmm2 +; SSE41-NEXT: movaps {{.*#+}} xmm3 = [6.553600e+04,6.553600e+04,6.553600e+04,6.553600e+04] +; SSE41-NEXT: mulps %xmm3, %xmm2 +; SSE41-NEXT: pxor %xmm4, %xmm4 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3],xmm0[4],xmm4[5],xmm0[6],xmm4[7] +; SSE41-NEXT: cvtdq2ps %xmm0, %xmm0 +; SSE41-NEXT: addps %xmm2, %xmm0 +; SSE41-NEXT: movdqa %xmm1, %xmm2 +; SSE41-NEXT: psrld $16, %xmm2 +; SSE41-NEXT: cvtdq2ps %xmm2, %xmm2 +; SSE41-NEXT: mulps %xmm3, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7] +; SSE41-NEXT: cvtdq2ps %xmm1, %xmm1 +; SSE41-NEXT: addps %xmm2, %xmm1 +; SSE41-NEXT: retq ; ; AVX-LABEL: test_uitofp_v8i32_to_v8f32: ; AVX: # BB#0: diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll index 68fceef..d34bbb6 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll @@ -87,14 +87,12 @@ define <32 x i8> @combine_and_pshufb(<32 x i8> %a0) { define <32 x i8> @combine_pshufb_and(<32 x i8> %a0) { ; X32-LABEL: combine_pshufb_and: ; X32: # BB#0: -; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1],zero,zero,zero,zero,zero,zero,ymm0[8,9],zero,zero,zero,zero,zero,zero,ymm0[16,17],zero,zero,zero,zero,zero,zero,ymm0[24,25],zero,zero,zero,zero,zero,zero -; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0 +; X32-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: combine_pshufb_and: ; X64: # BB#0: -; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1],zero,zero,zero,zero,zero,zero,ymm0[8,9],zero,zero,zero,zero,zero,zero,ymm0[16,17],zero,zero,zero,zero,zero,zero,ymm0[24,25],zero,zero,zero,zero,zero,zero -; X64-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; X64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0 ; X64-NEXT: retq %1 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> ) %2 = shufflevector <32 x i8> %1, <32 x i8> zeroinitializer, <32 x i32> diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll index f38373b..7e29a48 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll @@ -299,8 +299,7 @@ define <16 x i8> @combine_and_pshufb(<16 x i8> %a0) { define <16 x i8> @combine_pshufb_and(<16 x i8> %a0) { ; SSSE3-LABEL: combine_pshufb_and: ; SSSE3: # BB#0: -; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[8,9],zero,zero,zero,zero,zero,zero -; SSSE3-NEXT: pand {{.*}}(%rip), %xmm0 +; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: combine_pshufb_and: -- 2.7.4