From bce1f6b49196e174f299e577a42151e2adbf49d8 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Sun, 2 Oct 2016 20:43:02 +0000 Subject: [PATCH] [X86][AVX2] Missed opportunities to combine to VPERMD/VPERMPS llvm-svn: 283077 --- .../CodeGen/X86/vector-shuffle-combining-avx2.ll | 48 ++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll index bea9c4a..72eff32 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll @@ -89,6 +89,54 @@ define <4 x i64> @combine_permq_pshufb_as_vperm2i128(<4 x i64> %a0) { ret <4 x i64> %5 } +define <8 x i32> @combine_as_vpermd(<8 x i32> %a0) { +; X32-LABEL: combine_as_vpermd: +; X32: # BB#0: +; X32-NEXT: vmovdqa {{.*#+}} ymm1 = <4,u,u,5,u,u,0,7> +; X32-NEXT: vpermd %ymm0, %ymm1, %ymm1 +; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3] +; X32-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6,7] +; X32-NEXT: retl +; +; X64-LABEL: combine_as_vpermd: +; X64: # BB#0: +; X64-NEXT: vmovdqa {{.*#+}} ymm1 = <4,u,u,5,u,u,0,7> +; X64-NEXT: vpermd %ymm0, %ymm1, %ymm1 +; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3] +; X64-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6,7] +; X64-NEXT: retq + %1 = shufflevector <8 x i32> %a0, <8 x i32> undef, <8 x i32> + %2 = tail call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %a0, <8 x i32> ) + %3 = shufflevector <8 x i32> %1, <8 x i32> %2, <8 x i32> + ret <8 x i32> %3 +} + +define <8 x float> @combine_as_vpermps(<8 x float> %a0) { +; X32-LABEL: combine_as_vpermps: +; X32: # BB#0: +; X32-NEXT: vpermilps {{.*#+}} ymm1 = ymm0[1,0,3,2,5,4,7,6] +; X32-NEXT: vmovaps {{.*#+}} ymm2 = +; X32-NEXT: vpermps %ymm0, %ymm2, %ymm0 +; X32-NEXT: vmovaps {{.*#+}} ymm2 = <7,u,6,u,0,1,u,u> +; X32-NEXT: vpermps %ymm1, %ymm2, %ymm1 +; X32-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6,7] +; X32-NEXT: retl +; +; X64-LABEL: combine_as_vpermps: +; X64: # BB#0: +; X64-NEXT: vpermilps {{.*#+}} ymm1 = ymm0[1,0,3,2,5,4,7,6] +; X64-NEXT: vmovaps {{.*#+}} ymm2 = +; X64-NEXT: vpermps %ymm0, %ymm2, %ymm0 +; X64-NEXT: vmovaps {{.*#+}} ymm2 = <7,u,6,u,0,1,u,u> +; X64-NEXT: vpermps %ymm1, %ymm2, %ymm1 +; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6,7] +; X64-NEXT: retq + %1 = shufflevector <8 x float> %a0, <8 x float> undef, <8 x i32> + %2 = tail call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a0, <8 x i32> ) + %3 = shufflevector <8 x float> %1, <8 x float> %2, <8 x i32> + ret <8 x float> %3 +} + define <32 x i8> @combine_permq_pshufb_as_vpblendd(<4 x i64> %a0) { ; X32-LABEL: combine_permq_pshufb_as_vpblendd: ; X32: # BB#0: -- 2.7.4