From 0242cead2c10b3cdbefc197aaafc87cd7cce0b7c Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Mon, 7 Aug 2017 16:49:09 +0000 Subject: [PATCH] [X86][AVX] Add full test coverage of subvector_broadcasts from registers X86SubVBroadcast is for memory subvector broadcasts, but we must test that it handles all cases without the load as well just in case. This was noticed while I was triaging the test cases from PR34041. llvm-svn: 310268 --- llvm/test/CodeGen/X86/subvector-broadcast.ll | 648 +++++++++++++++++++++++++++ 1 file changed, 648 insertions(+) diff --git a/llvm/test/CodeGen/X86/subvector-broadcast.ll b/llvm/test/CodeGen/X86/subvector-broadcast.ll index d826509..314cc09 100644 --- a/llvm/test/CodeGen/X86/subvector-broadcast.ll +++ b/llvm/test/CodeGen/X86/subvector-broadcast.ll @@ -1280,3 +1280,651 @@ entry: store <8 x double> %2, <8 x double>* @gb2, align 8 ret void } + +; +; Subvector Broadcast from register +; + +define <4 x double> @reg_broadcast_2f64_4f64(<2 x double> %a0) nounwind { +; X32-LABEL: reg_broadcast_2f64_4f64: +; X32: # BB#0: +; X32-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: reg_broadcast_2f64_4f64: +; X64: # BB#0: +; X64-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-NEXT: retq + %1 = shufflevector <2 x double> %a0, <2 x double> undef, <4 x i32> + ret <4 x double> %1 +} + +define <8 x double> @reg_broadcast_2f64_8f64(<2 x double> %a0) nounwind { +; X32-AVX-LABEL: reg_broadcast_2f64_8f64: +; X32-AVX: # BB#0: +; X32-AVX-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-AVX-NEXT: vmovaps %ymm0, %ymm1 +; X32-AVX-NEXT: retl +; +; X32-AVX512-LABEL: reg_broadcast_2f64_8f64: +; X32-AVX512: # BB#0: +; X32-AVX512-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X32-AVX512-NEXT: retl +; +; X64-AVX-LABEL: reg_broadcast_2f64_8f64: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX-NEXT: vmovaps %ymm0, %ymm1 +; X64-AVX-NEXT: retq +; +; X64-AVX512-LABEL: reg_broadcast_2f64_8f64: +; X64-AVX512: # BB#0: +; X64-AVX512-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X64-AVX512-NEXT: retq + %1 = shufflevector <2 x double> %a0, <2 x double> undef, <8 x i32> + ret <8 x double> %1 +} + +define <8 x double> @reg_broadcast_4f64_8f64(<4 x double> %a0) nounwind { +; X32-AVX-LABEL: reg_broadcast_4f64_8f64: +; X32-AVX: # BB#0: +; X32-AVX-NEXT: vmovaps %ymm0, %ymm1 +; X32-AVX-NEXT: retl +; +; X32-AVX512-LABEL: reg_broadcast_4f64_8f64: +; X32-AVX512: # BB#0: +; X32-AVX512-NEXT: # kill: %YMM0 %YMM0 %ZMM0 +; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X32-AVX512-NEXT: retl +; +; X64-AVX-LABEL: reg_broadcast_4f64_8f64: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vmovaps %ymm0, %ymm1 +; X64-AVX-NEXT: retq +; +; X64-AVX512-LABEL: reg_broadcast_4f64_8f64: +; X64-AVX512: # BB#0: +; X64-AVX512-NEXT: # kill: %YMM0 %YMM0 %ZMM0 +; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X64-AVX512-NEXT: retq + %1 = shufflevector <4 x double> %a0, <4 x double> undef, <8 x i32> + ret <8 x double> %1 +} + +define <4 x i64> @reg_broadcast_2i64_4i64(<2 x i64> %a0) nounwind { +; X32-LABEL: reg_broadcast_2i64_4i64: +; X32: # BB#0: +; X32-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: reg_broadcast_2i64_4i64: +; X64: # BB#0: +; X64-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-NEXT: retq + %1 = shufflevector <2 x i64> %a0, <2 x i64> undef, <4 x i32> + ret <4 x i64> %1 +} + +define <8 x i64> @reg_broadcast_2i64_8i64(<2 x i64> %a0) nounwind { +; X32-AVX-LABEL: reg_broadcast_2i64_8i64: +; X32-AVX: # BB#0: +; X32-AVX-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-AVX-NEXT: vmovaps %ymm0, %ymm1 +; X32-AVX-NEXT: retl +; +; X32-AVX512-LABEL: reg_broadcast_2i64_8i64: +; X32-AVX512: # BB#0: +; X32-AVX512-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X32-AVX512-NEXT: retl +; +; X64-AVX-LABEL: reg_broadcast_2i64_8i64: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX-NEXT: vmovaps %ymm0, %ymm1 +; X64-AVX-NEXT: retq +; +; X64-AVX512-LABEL: reg_broadcast_2i64_8i64: +; X64-AVX512: # BB#0: +; X64-AVX512-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X64-AVX512-NEXT: retq + %1 = shufflevector <2 x i64> %a0, <2 x i64> undef, <8 x i32> + ret <8 x i64> %1 +} + +define <8 x i64> @reg_broadcast_4i64_8i64(<4 x i64> %a0) nounwind { +; X32-AVX-LABEL: reg_broadcast_4i64_8i64: +; X32-AVX: # BB#0: +; X32-AVX-NEXT: vmovaps %ymm0, %ymm1 +; X32-AVX-NEXT: retl +; +; X32-AVX512-LABEL: reg_broadcast_4i64_8i64: +; X32-AVX512: # BB#0: +; X32-AVX512-NEXT: # kill: %YMM0 %YMM0 %ZMM0 +; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X32-AVX512-NEXT: retl +; +; X64-AVX-LABEL: reg_broadcast_4i64_8i64: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vmovaps %ymm0, %ymm1 +; X64-AVX-NEXT: retq +; +; X64-AVX512-LABEL: reg_broadcast_4i64_8i64: +; X64-AVX512: # BB#0: +; X64-AVX512-NEXT: # kill: %YMM0 %YMM0 %ZMM0 +; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X64-AVX512-NEXT: retq + %1 = shufflevector <4 x i64> %a0, <4 x i64> undef, <8 x i32> + ret <8 x i64> %1 +} + +define <8 x float> @reg_broadcast_4f32_8f32(<4 x float> %a0) nounwind { +; X32-LABEL: reg_broadcast_4f32_8f32: +; X32: # BB#0: +; X32-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: reg_broadcast_4f32_8f32: +; X64: # BB#0: +; X64-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-NEXT: retq + %1 = shufflevector <4 x float> %a0, <4 x float> undef, <8 x i32> + ret <8 x float> %1 +} + +define <16 x float> @reg_broadcast_4f32_16f32(<4 x float> %a0) nounwind { +; X32-AVX-LABEL: reg_broadcast_4f32_16f32: +; X32-AVX: # BB#0: +; X32-AVX-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-AVX-NEXT: vmovaps %ymm0, %ymm1 +; X32-AVX-NEXT: retl +; +; X32-AVX512F-LABEL: reg_broadcast_4f32_16f32: +; X32-AVX512F: # BB#0: +; X32-AVX512F-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X32-AVX512F-NEXT: retl +; +; X32-AVX512BW-LABEL: reg_broadcast_4f32_16f32: +; X32-AVX512BW: # BB#0: +; X32-AVX512BW-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-AVX512BW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X32-AVX512BW-NEXT: retl +; +; X32-AVX512DQ-LABEL: reg_broadcast_4f32_16f32: +; X32-AVX512DQ: # BB#0: +; X32-AVX512DQ-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-AVX512DQ-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-AVX512DQ-NEXT: vinsertf32x8 $1, %ymm0, %zmm0, %zmm0 +; X32-AVX512DQ-NEXT: retl +; +; X64-AVX-LABEL: reg_broadcast_4f32_16f32: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX-NEXT: vmovaps %ymm0, %ymm1 +; X64-AVX-NEXT: retq +; +; X64-AVX512F-LABEL: reg_broadcast_4f32_16f32: +; X64-AVX512F: # BB#0: +; X64-AVX512F-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X64-AVX512F-NEXT: retq +; +; X64-AVX512BW-LABEL: reg_broadcast_4f32_16f32: +; X64-AVX512BW: # BB#0: +; X64-AVX512BW-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-AVX512BW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X64-AVX512BW-NEXT: retq +; +; X64-AVX512DQ-LABEL: reg_broadcast_4f32_16f32: +; X64-AVX512DQ: # BB#0: +; X64-AVX512DQ-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-AVX512DQ-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512DQ-NEXT: vinsertf32x8 $1, %ymm0, %zmm0, %zmm0 +; X64-AVX512DQ-NEXT: retq + %1 = shufflevector <4 x float> %a0, <4 x float> undef, <16 x i32> + ret <16 x float> %1 +} + +define <16 x float> @reg_broadcast_8f32_16f32(<8 x float> %a0) nounwind { +; X32-AVX-LABEL: reg_broadcast_8f32_16f32: +; X32-AVX: # BB#0: +; X32-AVX-NEXT: vmovaps %ymm0, %ymm1 +; X32-AVX-NEXT: retl +; +; X32-AVX512F-LABEL: reg_broadcast_8f32_16f32: +; X32-AVX512F: # BB#0: +; X32-AVX512F-NEXT: # kill: %YMM0 %YMM0 %ZMM0 +; X32-AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X32-AVX512F-NEXT: retl +; +; X32-AVX512BW-LABEL: reg_broadcast_8f32_16f32: +; X32-AVX512BW: # BB#0: +; X32-AVX512BW-NEXT: # kill: %YMM0 %YMM0 %ZMM0 +; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X32-AVX512BW-NEXT: retl +; +; X32-AVX512DQ-LABEL: reg_broadcast_8f32_16f32: +; X32-AVX512DQ: # BB#0: +; X32-AVX512DQ-NEXT: # kill: %YMM0 %YMM0 %ZMM0 +; X32-AVX512DQ-NEXT: vinsertf32x8 $1, %ymm0, %zmm0, %zmm0 +; X32-AVX512DQ-NEXT: retl +; +; X64-AVX-LABEL: reg_broadcast_8f32_16f32: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vmovaps %ymm0, %ymm1 +; X64-AVX-NEXT: retq +; +; X64-AVX512F-LABEL: reg_broadcast_8f32_16f32: +; X64-AVX512F: # BB#0: +; X64-AVX512F-NEXT: # kill: %YMM0 %YMM0 %ZMM0 +; X64-AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X64-AVX512F-NEXT: retq +; +; X64-AVX512BW-LABEL: reg_broadcast_8f32_16f32: +; X64-AVX512BW: # BB#0: +; X64-AVX512BW-NEXT: # kill: %YMM0 %YMM0 %ZMM0 +; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X64-AVX512BW-NEXT: retq +; +; X64-AVX512DQ-LABEL: reg_broadcast_8f32_16f32: +; X64-AVX512DQ: # BB#0: +; X64-AVX512DQ-NEXT: # kill: %YMM0 %YMM0 %ZMM0 +; X64-AVX512DQ-NEXT: vinsertf32x8 $1, %ymm0, %zmm0, %zmm0 +; X64-AVX512DQ-NEXT: retq + %1 = shufflevector <8 x float> %a0, <8 x float> undef, <16 x i32> + ret <16 x float> %1 +} + +define <8 x i32> @reg_broadcast_4i32_8i32(<4 x i32> %a0) nounwind { +; X32-LABEL: reg_broadcast_4i32_8i32: +; X32: # BB#0: +; X32-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: reg_broadcast_4i32_8i32: +; X64: # BB#0: +; X64-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-NEXT: retq + %1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <8 x i32> + ret <8 x i32> %1 +} + +define <16 x i32> @reg_broadcast_4i32_16i32(<4 x i32> %a0) nounwind { +; X32-AVX-LABEL: reg_broadcast_4i32_16i32: +; X32-AVX: # BB#0: +; X32-AVX-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-AVX-NEXT: vmovaps %ymm0, %ymm1 +; X32-AVX-NEXT: retl +; +; X32-AVX512F-LABEL: reg_broadcast_4i32_16i32: +; X32-AVX512F: # BB#0: +; X32-AVX512F-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X32-AVX512F-NEXT: retl +; +; X32-AVX512BW-LABEL: reg_broadcast_4i32_16i32: +; X32-AVX512BW: # BB#0: +; X32-AVX512BW-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-AVX512BW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X32-AVX512BW-NEXT: retl +; +; X32-AVX512DQ-LABEL: reg_broadcast_4i32_16i32: +; X32-AVX512DQ: # BB#0: +; X32-AVX512DQ-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-AVX512DQ-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-AVX512DQ-NEXT: vinsertf32x8 $1, %ymm0, %zmm0, %zmm0 +; X32-AVX512DQ-NEXT: retl +; +; X64-AVX-LABEL: reg_broadcast_4i32_16i32: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX-NEXT: vmovaps %ymm0, %ymm1 +; X64-AVX-NEXT: retq +; +; X64-AVX512F-LABEL: reg_broadcast_4i32_16i32: +; X64-AVX512F: # BB#0: +; X64-AVX512F-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X64-AVX512F-NEXT: retq +; +; X64-AVX512BW-LABEL: reg_broadcast_4i32_16i32: +; X64-AVX512BW: # BB#0: +; X64-AVX512BW-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-AVX512BW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X64-AVX512BW-NEXT: retq +; +; X64-AVX512DQ-LABEL: reg_broadcast_4i32_16i32: +; X64-AVX512DQ: # BB#0: +; X64-AVX512DQ-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-AVX512DQ-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512DQ-NEXT: vinsertf32x8 $1, %ymm0, %zmm0, %zmm0 +; X64-AVX512DQ-NEXT: retq + %1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <16 x i32> + ret <16 x i32> %1 +} + +define <16 x i32> @reg_broadcast_8i32_16i32(<8 x i32> %a0) nounwind { +; X32-AVX-LABEL: reg_broadcast_8i32_16i32: +; X32-AVX: # BB#0: +; X32-AVX-NEXT: vmovaps %ymm0, %ymm1 +; X32-AVX-NEXT: retl +; +; X32-AVX512F-LABEL: reg_broadcast_8i32_16i32: +; X32-AVX512F: # BB#0: +; X32-AVX512F-NEXT: # kill: %YMM0 %YMM0 %ZMM0 +; X32-AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X32-AVX512F-NEXT: retl +; +; X32-AVX512BW-LABEL: reg_broadcast_8i32_16i32: +; X32-AVX512BW: # BB#0: +; X32-AVX512BW-NEXT: # kill: %YMM0 %YMM0 %ZMM0 +; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X32-AVX512BW-NEXT: retl +; +; X32-AVX512DQ-LABEL: reg_broadcast_8i32_16i32: +; X32-AVX512DQ: # BB#0: +; X32-AVX512DQ-NEXT: # kill: %YMM0 %YMM0 %ZMM0 +; X32-AVX512DQ-NEXT: vinsertf32x8 $1, %ymm0, %zmm0, %zmm0 +; X32-AVX512DQ-NEXT: retl +; +; X64-AVX-LABEL: reg_broadcast_8i32_16i32: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vmovaps %ymm0, %ymm1 +; X64-AVX-NEXT: retq +; +; X64-AVX512F-LABEL: reg_broadcast_8i32_16i32: +; X64-AVX512F: # BB#0: +; X64-AVX512F-NEXT: # kill: %YMM0 %YMM0 %ZMM0 +; X64-AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X64-AVX512F-NEXT: retq +; +; X64-AVX512BW-LABEL: reg_broadcast_8i32_16i32: +; X64-AVX512BW: # BB#0: +; X64-AVX512BW-NEXT: # kill: %YMM0 %YMM0 %ZMM0 +; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X64-AVX512BW-NEXT: retq +; +; X64-AVX512DQ-LABEL: reg_broadcast_8i32_16i32: +; X64-AVX512DQ: # BB#0: +; X64-AVX512DQ-NEXT: # kill: %YMM0 %YMM0 %ZMM0 +; X64-AVX512DQ-NEXT: vinsertf32x8 $1, %ymm0, %zmm0, %zmm0 +; X64-AVX512DQ-NEXT: retq + %1 = shufflevector <8 x i32> %a0, <8 x i32> undef, <16 x i32> + ret <16 x i32> %1 +} + +define <16 x i16> @reg_broadcast_8i16_16i16(<8 x i16> %a0) nounwind { +; X32-LABEL: reg_broadcast_8i16_16i16: +; X32: # BB#0: +; X32-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: reg_broadcast_8i16_16i16: +; X64: # BB#0: +; X64-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-NEXT: retq + %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <16 x i32> + ret <16 x i16> %1 +} + +define <32 x i16> @reg_broadcast_8i16_32i16(<8 x i16> %a0) nounwind { +; X32-AVX-LABEL: reg_broadcast_8i16_32i16: +; X32-AVX: # BB#0: +; X32-AVX-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-AVX-NEXT: vmovaps %ymm0, %ymm1 +; X32-AVX-NEXT: retl +; +; X32-AVX512F-LABEL: reg_broadcast_8i16_32i16: +; X32-AVX512F: # BB#0: +; X32-AVX512F-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-AVX512F-NEXT: vmovaps %ymm0, %ymm1 +; X32-AVX512F-NEXT: retl +; +; X32-AVX512BW-LABEL: reg_broadcast_8i16_32i16: +; X32-AVX512BW: # BB#0: +; X32-AVX512BW-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-AVX512BW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X32-AVX512BW-NEXT: retl +; +; X32-AVX512DQ-LABEL: reg_broadcast_8i16_32i16: +; X32-AVX512DQ: # BB#0: +; X32-AVX512DQ-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-AVX512DQ-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1 +; X32-AVX512DQ-NEXT: retl +; +; X64-AVX-LABEL: reg_broadcast_8i16_32i16: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX-NEXT: vmovaps %ymm0, %ymm1 +; X64-AVX-NEXT: retq +; +; X64-AVX512F-LABEL: reg_broadcast_8i16_32i16: +; X64-AVX512F: # BB#0: +; X64-AVX512F-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512F-NEXT: vmovaps %ymm0, %ymm1 +; X64-AVX512F-NEXT: retq +; +; X64-AVX512BW-LABEL: reg_broadcast_8i16_32i16: +; X64-AVX512BW: # BB#0: +; X64-AVX512BW-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-AVX512BW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X64-AVX512BW-NEXT: retq +; +; X64-AVX512DQ-LABEL: reg_broadcast_8i16_32i16: +; X64-AVX512DQ: # BB#0: +; X64-AVX512DQ-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-AVX512DQ-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1 +; X64-AVX512DQ-NEXT: retq + %1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <32 x i32> + ret <32 x i16> %1 +} + +define <32 x i16> @reg_broadcast_16i16_32i16(<16 x i16> %a0) nounwind { +; X32-AVX-LABEL: reg_broadcast_16i16_32i16: +; X32-AVX: # BB#0: +; X32-AVX-NEXT: vmovaps %ymm0, %ymm1 +; X32-AVX-NEXT: retl +; +; X32-AVX512F-LABEL: reg_broadcast_16i16_32i16: +; X32-AVX512F: # BB#0: +; X32-AVX512F-NEXT: vmovaps %ymm0, %ymm1 +; X32-AVX512F-NEXT: retl +; +; X32-AVX512BW-LABEL: reg_broadcast_16i16_32i16: +; X32-AVX512BW: # BB#0: +; X32-AVX512BW-NEXT: # kill: %YMM0 %YMM0 %ZMM0 +; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X32-AVX512BW-NEXT: retl +; +; X32-AVX512DQ-LABEL: reg_broadcast_16i16_32i16: +; X32-AVX512DQ: # BB#0: +; X32-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1 +; X32-AVX512DQ-NEXT: retl +; +; X64-AVX-LABEL: reg_broadcast_16i16_32i16: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vmovaps %ymm0, %ymm1 +; X64-AVX-NEXT: retq +; +; X64-AVX512F-LABEL: reg_broadcast_16i16_32i16: +; X64-AVX512F: # BB#0: +; X64-AVX512F-NEXT: vmovaps %ymm0, %ymm1 +; X64-AVX512F-NEXT: retq +; +; X64-AVX512BW-LABEL: reg_broadcast_16i16_32i16: +; X64-AVX512BW: # BB#0: +; X64-AVX512BW-NEXT: # kill: %YMM0 %YMM0 %ZMM0 +; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X64-AVX512BW-NEXT: retq +; +; X64-AVX512DQ-LABEL: reg_broadcast_16i16_32i16: +; X64-AVX512DQ: # BB#0: +; X64-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1 +; X64-AVX512DQ-NEXT: retq + %1 = shufflevector <16 x i16> %a0, <16 x i16> undef, <32 x i32> + ret <32 x i16> %1 +} + +define <32 x i8> @reg_broadcast_16i8_32i8(<16 x i8> %a0) nounwind { +; X32-LABEL: reg_broadcast_16i8_32i8: +; X32: # BB#0: +; X32-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: reg_broadcast_16i8_32i8: +; X64: # BB#0: +; X64-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-NEXT: retq + %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <32 x i32> + ret <32 x i8> %1 +} + +define <64 x i8> @reg_broadcast_16i8_64i8(<16 x i8> %a0) nounwind { +; X32-AVX-LABEL: reg_broadcast_16i8_64i8: +; X32-AVX: # BB#0: +; X32-AVX-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-AVX-NEXT: vmovaps %ymm0, %ymm1 +; X32-AVX-NEXT: retl +; +; X32-AVX512F-LABEL: reg_broadcast_16i8_64i8: +; X32-AVX512F: # BB#0: +; X32-AVX512F-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-AVX512F-NEXT: vmovaps %ymm0, %ymm1 +; X32-AVX512F-NEXT: retl +; +; X32-AVX512BW-LABEL: reg_broadcast_16i8_64i8: +; X32-AVX512BW: # BB#0: +; X32-AVX512BW-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-AVX512BW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X32-AVX512BW-NEXT: retl +; +; X32-AVX512DQ-LABEL: reg_broadcast_16i8_64i8: +; X32-AVX512DQ: # BB#0: +; X32-AVX512DQ-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-AVX512DQ-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X32-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1 +; X32-AVX512DQ-NEXT: retl +; +; X64-AVX-LABEL: reg_broadcast_16i8_64i8: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX-NEXT: vmovaps %ymm0, %ymm1 +; X64-AVX-NEXT: retq +; +; X64-AVX512F-LABEL: reg_broadcast_16i8_64i8: +; X64-AVX512F: # BB#0: +; X64-AVX512F-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512F-NEXT: vmovaps %ymm0, %ymm1 +; X64-AVX512F-NEXT: retq +; +; X64-AVX512BW-LABEL: reg_broadcast_16i8_64i8: +; X64-AVX512BW: # BB#0: +; X64-AVX512BW-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-AVX512BW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X64-AVX512BW-NEXT: retq +; +; X64-AVX512DQ-LABEL: reg_broadcast_16i8_64i8: +; X64-AVX512DQ: # BB#0: +; X64-AVX512DQ-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-AVX512DQ-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; X64-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1 +; X64-AVX512DQ-NEXT: retq + %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <64 x i32> + ret <64 x i8> %1 +} + +define <64 x i8> @reg_broadcast_32i8_64i8(<32 x i8> %a0) nounwind { +; X32-AVX-LABEL: reg_broadcast_32i8_64i8: +; X32-AVX: # BB#0: +; X32-AVX-NEXT: vmovaps %ymm0, %ymm1 +; X32-AVX-NEXT: retl +; +; X32-AVX512F-LABEL: reg_broadcast_32i8_64i8: +; X32-AVX512F: # BB#0: +; X32-AVX512F-NEXT: vmovaps %ymm0, %ymm1 +; X32-AVX512F-NEXT: retl +; +; X32-AVX512BW-LABEL: reg_broadcast_32i8_64i8: +; X32-AVX512BW: # BB#0: +; X32-AVX512BW-NEXT: # kill: %YMM0 %YMM0 %ZMM0 +; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X32-AVX512BW-NEXT: retl +; +; X32-AVX512DQ-LABEL: reg_broadcast_32i8_64i8: +; X32-AVX512DQ: # BB#0: +; X32-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1 +; X32-AVX512DQ-NEXT: retl +; +; X64-AVX-LABEL: reg_broadcast_32i8_64i8: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vmovaps %ymm0, %ymm1 +; X64-AVX-NEXT: retq +; +; X64-AVX512F-LABEL: reg_broadcast_32i8_64i8: +; X64-AVX512F: # BB#0: +; X64-AVX512F-NEXT: vmovaps %ymm0, %ymm1 +; X64-AVX512F-NEXT: retq +; +; X64-AVX512BW-LABEL: reg_broadcast_32i8_64i8: +; X64-AVX512BW: # BB#0: +; X64-AVX512BW-NEXT: # kill: %YMM0 %YMM0 %ZMM0 +; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 +; X64-AVX512BW-NEXT: retq +; +; X64-AVX512DQ-LABEL: reg_broadcast_32i8_64i8: +; X64-AVX512DQ: # BB#0: +; X64-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1 +; X64-AVX512DQ-NEXT: retq + %1 = shufflevector <32 x i8> %a0, <32 x i8> undef, <64 x i32> + ret <64 x i8> %1 +} -- 2.7.4