From 700e4a1ab87f1a04cfe0733ac0b67149eaad0d30 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Thu, 14 Jul 2016 12:21:40 +0000 Subject: [PATCH] [X86][AVX] Add 128-bit wide shuffle tests that should combine to blend-with-zero llvm-svn: 275402 --- .../CodeGen/X86/vector-shuffle-combining-avx.ll | 26 ++++++++++++++++++++++ .../CodeGen/X86/vector-shuffle-combining-avx2.ll | 12 ++++++++++ 2 files changed, 38 insertions(+) diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll index 351c91e..2dc50cf 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll @@ -123,6 +123,32 @@ define <8 x float> @combine_vpermilvar_vperm2f128_zero_8f32(<8 x float> %a0) { ret <8 x float> %3 } +define <4 x double> @combine_vperm2f128_vpermilvar_as_vpblendpd(<4 x double> %a0) { +; AVX1-LABEL: combine_vperm2f128_vpermilvar_as_vpblendpd: +; AVX1: # BB#0: +; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2] +; AVX1-NEXT: vxorpd %ymm1, %ymm1, %ymm1 +; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3] +; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2] +; AVX1-NEXT: retq +; +; AVX2-LABEL: combine_vperm2f128_vpermilvar_as_vpblendpd: +; AVX2: # BB#0: +; AVX2-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2] +; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; AVX2-NEXT: retq +; +; AVX512F-LABEL: combine_vperm2f128_vpermilvar_as_vpblendpd: +; AVX512F: # BB#0: +; AVX512F-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2] +; AVX512F-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; AVX512F-NEXT: retq + %1 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> ) + %2 = shufflevector <4 x double> %1, <4 x double> zeroinitializer, <4 x i32> + %3 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %2, <4 x i64> ) + ret <4 x double> %3 +} + define <8 x float> @combine_vpermilvar_8f32_movddup(<8 x float> %a0) { ; ALL-LABEL: combine_vpermilvar_8f32_movddup: ; ALL: # BB#0: diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll index b087fc4..97492dd 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll @@ -62,6 +62,18 @@ define <4 x i64> @combine_permq_pshufb_as_vperm2i128(<4 x i64> %a0) { ret <4 x i64> %5 } +define <32 x i8> @combine_permq_pshufb_as_vpblendd(<4 x i64> %a0) { +; CHECK-LABEL: combine_permq_pshufb_as_vpblendd: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5] +; CHECK-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; CHECK-NEXT: retq + %1 = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> + %2 = bitcast <4 x i64> %1 to <32 x i8> + %3 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %2, <32 x i8> ) + ret <32 x i8> %3 +} + define <16 x i8> @combine_pshufb_as_vpbroadcastb128(<16 x i8> %a) { ; CHECK-LABEL: combine_pshufb_as_vpbroadcastb128: ; CHECK: # BB#0: -- 2.7.4