From b0afc69435659c417ba5e0af5d2c211a06bc9679 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Tue, 5 Feb 2019 19:15:48 +0000 Subject: [PATCH] [X86][SSE] Disable ZERO_EXTEND shuffle combining rL352997 enabled ZERO_EXTEND from non-shuffle-able value types. I've disabled it for now to fix a regression identified by @asbirlea until I can fix this properly. llvm-svn: 353198 --- llvm/lib/Target/X86/X86ISelLowering.cpp | 4 ++-- llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll | 27 +++++++--------------- .../CodeGen/X86/vector-shuffle-combining-avx2.ll | 26 +++++++++++++++++++++ 3 files changed, 36 insertions(+), 21 deletions(-) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 2cfc931..5d40e89 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -6793,8 +6793,8 @@ static bool getFauxShuffleMask(SDValue N, SmallVectorImpl &Mask, Mask.append(NumElts, 0); return true; } - case ISD::ZERO_EXTEND_VECTOR_INREG: - case ISD::ZERO_EXTEND: { + case ISD::ZERO_EXTEND_VECTOR_INREG: { + // TODO: Handle ISD::ZERO_EXTEND SDValue Src = N.getOperand(0); MVT SrcVT = Src.getSimpleValueType(); unsigned NumSrcBitsPerElt = SrcVT.getScalarSizeInBits(); diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll index 8d13670..651cb73 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll @@ -1526,8 +1526,9 @@ define <8 x i32> @shuffle_v8i32_08192a3b(<8 x i32> %a, <8 x i32> %b) { ; ; AVX512VL-LABEL: shuffle_v8i32_08192a3b: ; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,8,1,9,2,10,3,11] -; AVX512VL-NEXT: vpermt2d %ymm1, %ymm2, %ymm0 +; AVX512VL-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm0 = [0,8,2,9,4,10,6,11] +; AVX512VL-NEXT: vpermi2d %ymm1, %ymm2, %ymm0 ; AVX512VL-NEXT: retq %shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> ret <8 x i32> %shuffle @@ -1571,23 +1572,11 @@ define <8 x i32> @shuffle_v8i32_091b2d3f(<8 x i32> %a, <8 x i32> %b) { ; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7] ; AVX1-NEXT: retq ; -; AVX2-LABEL: shuffle_v8i32_091b2d3f: -; AVX2: # %bb.0: -; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7] -; AVX2-NEXT: retq -; -; AVX512VL-SLOW-LABEL: shuffle_v8i32_091b2d3f: -; AVX512VL-SLOW: # %bb.0: -; AVX512VL-SLOW-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; AVX512VL-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7] -; AVX512VL-SLOW-NEXT: retq -; -; AVX512VL-FAST-LABEL: shuffle_v8i32_091b2d3f: -; AVX512VL-FAST: # %bb.0: -; AVX512VL-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,9,1,11,2,13,3,15] -; AVX512VL-FAST-NEXT: vpermt2d %ymm1, %ymm2, %ymm0 -; AVX512VL-FAST-NEXT: retq +; AVX2OR512VL-LABEL: shuffle_v8i32_091b2d3f: +; AVX2OR512VL: # %bb.0: +; AVX2OR512VL-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; AVX2OR512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7] +; AVX2OR512VL-NEXT: retq %shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> ret <8 x i32> %shuffle } diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll index ff9a621..963fb98 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll @@ -760,3 +760,29 @@ entry: %shuf2 = shufflevector <8 x float> %inp1, <8 x float> %shuf1, <8 x i32> ret <8 x float> %shuf2 } + +define void @packss_zext_v8i1() { +; X86-LABEL: packss_zext_v8i1: +; X86: # %bb.0: +; X86-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; X86-NEXT: vmovups %ymm0, (%eax) +; X86-NEXT: vzeroupper +; X86-NEXT: retl +; +; X64-LABEL: packss_zext_v8i1: +; X64: # %bb.0: +; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; X64-NEXT: vmovups %ymm0, (%rax) +; X64-NEXT: vzeroupper +; X64-NEXT: retq + %tmp0 = icmp sgt <8 x i32> undef, undef + %tmp1 = zext <8 x i1> %tmp0 to <8 x i32> + %tmp2 = shufflevector <8 x i32> %tmp1, <8 x i32> zeroinitializer, <16 x i32> + %tmp3 = trunc <16 x i32> %tmp2 to <16 x i16> + %tmp4 = add <16 x i16> zeroinitializer, %tmp3 + %tmp6 = sext <16 x i16> %tmp4 to <16 x i32> + %tmp10 = shufflevector <16 x i32> %tmp6, <16 x i32> undef, <8 x i32> + %tmp11 = tail call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> undef, <8 x i32> %tmp10) + store <16 x i16> %tmp11, <16 x i16>* undef, align 2 + ret void +} -- 2.7.4