From 9afec8890743bd529d179ee308da5ba91e0aa999 Mon Sep 17 00:00:00 2001 From: Roman Lebedev Date: Sun, 3 Oct 2021 17:50:51 +0300 Subject: [PATCH] [NFC][X86][Codegen] Add test coverage for interleaved i64 load/store stride=3 --- .../X86/vector-interleaved-load-i64-stride-3.ll | 653 ++++++++++++++++++++ .../X86/vector-interleaved-store-i64-stride-3.ll | 654 +++++++++++++++++++++ 2 files changed, 1307 insertions(+) create mode 100644 llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-3.ll create mode 100644 llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-3.ll diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-3.ll new file mode 100644 index 0000000..b2fee11 --- /dev/null +++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-3.ll @@ -0,0 +1,653 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX1 +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2 +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2 +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2 +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512 + +; These patterns are produced by LoopVectorizer for interleaved loads. + +define void @load_i64_stride3_vf2(<6 x i64>* %in.vec, <2 x i64>* %out.vec0, <2 x i64>* %out.vec1, <2 x i64>* %out.vec2) nounwind { +; SSE-LABEL: load_i64_stride3_vf2: +; SSE: # %bb.0: +; SSE-NEXT: movapd (%rdi), %xmm0 +; SSE-NEXT: movapd 16(%rdi), %xmm1 +; SSE-NEXT: movapd 32(%rdi), %xmm2 +; SSE-NEXT: movapd %xmm1, %xmm3 +; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm0[0],xmm3[1] +; SSE-NEXT: shufpd {{.*#+}} xmm0 = xmm0[1],xmm2[0] +; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1] +; SSE-NEXT: movapd %xmm3, (%rsi) +; SSE-NEXT: movapd %xmm0, (%rdx) +; SSE-NEXT: movapd %xmm2, (%rcx) +; SSE-NEXT: retq +; +; AVX1-LABEL: load_i64_stride3_vf2: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovdqa (%rdi), %xmm0 +; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1 +; AVX1-NEXT: vmovdqa 32(%rdi), %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm0[0,1,2,3],xmm1[4,5,6,7] +; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],mem[4,5,6,7] +; AVX1-NEXT: vmovdqa %xmm3, (%rsi) +; AVX1-NEXT: vmovdqa %xmm0, (%rdx) +; AVX1-NEXT: vmovdqa %xmm1, (%rcx) +; AVX1-NEXT: retq +; +; AVX2-LABEL: load_i64_stride3_vf2: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovdqa (%rdi), %xmm0 +; AVX2-NEXT: vmovdqa 16(%rdi), %xmm1 +; AVX2-NEXT: vmovdqa 32(%rdi), %xmm2 +; AVX2-NEXT: vpblendd {{.*#+}} xmm3 = xmm0[0,1],xmm1[2,3] +; AVX2-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7] +; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1],mem[2,3] +; AVX2-NEXT: vmovdqa %xmm3, (%rsi) +; AVX2-NEXT: vmovdqa %xmm0, (%rdx) +; AVX2-NEXT: vmovdqa %xmm1, (%rcx) +; AVX2-NEXT: retq +; +; AVX512-LABEL: load_i64_stride3_vf2: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovdqa (%rdi), %xmm0 +; AVX512-NEXT: vmovdqa 16(%rdi), %xmm1 +; AVX512-NEXT: vmovdqa 32(%rdi), %xmm2 +; AVX512-NEXT: vpblendd {{.*#+}} xmm3 = xmm0[0,1],xmm1[2,3] +; AVX512-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7] +; AVX512-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3] +; AVX512-NEXT: vmovdqa %xmm3, (%rsi) +; AVX512-NEXT: vmovdqa %xmm0, (%rdx) +; AVX512-NEXT: vmovdqa %xmm1, (%rcx) +; AVX512-NEXT: retq + %wide.vec = load <6 x i64>, <6 x i64>* %in.vec, align 32 + + %strided.vec0 = shufflevector <6 x i64> %wide.vec, <6 x i64> poison, <2 x i32> + %strided.vec1 = shufflevector <6 x i64> %wide.vec, <6 x i64> poison, <2 x i32> + %strided.vec2 = shufflevector <6 x i64> %wide.vec, <6 x i64> poison, <2 x i32> + + store <2 x i64> %strided.vec0, <2 x i64>* %out.vec0, align 32 + store <2 x i64> %strided.vec1, <2 x i64>* %out.vec1, align 32 + store <2 x i64> %strided.vec2, <2 x i64>* %out.vec2, align 32 + + ret void +} + +define void @load_i64_stride3_vf4(<12 x i64>* %in.vec, <4 x i64>* %out.vec0, <4 x i64>* %out.vec1, <4 x i64>* %out.vec2) nounwind { +; SSE-LABEL: load_i64_stride3_vf4: +; SSE: # %bb.0: +; SSE-NEXT: movapd 80(%rdi), %xmm0 +; SSE-NEXT: movapd (%rdi), %xmm1 +; SSE-NEXT: movapd 16(%rdi), %xmm2 +; SSE-NEXT: movapd 32(%rdi), %xmm3 +; SSE-NEXT: movapd 48(%rdi), %xmm4 +; SSE-NEXT: movapd 64(%rdi), %xmm5 +; SSE-NEXT: movapd %xmm5, %xmm6 +; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm4[0],xmm6[1] +; SSE-NEXT: movapd %xmm2, %xmm7 +; SSE-NEXT: movsd {{.*#+}} xmm7 = xmm1[0],xmm7[1] +; SSE-NEXT: shufpd {{.*#+}} xmm4 = xmm4[1],xmm0[0] +; SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm3[0] +; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm5[0],xmm0[1] +; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm2[0],xmm3[1] +; SSE-NEXT: movapd %xmm6, 16(%rsi) +; SSE-NEXT: movapd %xmm7, (%rsi) +; SSE-NEXT: movapd %xmm4, 16(%rdx) +; SSE-NEXT: movapd %xmm1, (%rdx) +; SSE-NEXT: movapd %xmm0, 16(%rcx) +; SSE-NEXT: movapd %xmm3, (%rcx) +; SSE-NEXT: retq +; +; AVX1-LABEL: load_i64_stride3_vf4: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovapd 32(%rdi), %ymm0 +; AVX1-NEXT: vmovapd (%rdi), %ymm1 +; AVX1-NEXT: vmovapd 16(%rdi), %xmm2 +; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm1[0,1],ymm0[2],ymm1[3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0],ymm2[1],ymm3[2,3] +; AVX1-NEXT: vmovaps 64(%rdi), %xmm4 +; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm5 +; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1,2],ymm5[3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm0[2,3] +; AVX1-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],ymm0[0],ymm1[3],ymm0[2] +; AVX1-NEXT: vbroadcastsd 80(%rdi), %ymm5 +; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm5[3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],mem[2,3] +; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm2, %ymm2 +; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2],ymm0[3] +; AVX1-NEXT: vmovapd %ymm3, (%rsi) +; AVX1-NEXT: vmovapd %ymm1, (%rdx) +; AVX1-NEXT: vmovapd %ymm0, (%rcx) +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: load_i64_stride3_vf4: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovdqa 32(%rdi), %ymm0 +; AVX2-NEXT: vmovdqa (%rdi), %ymm1 +; AVX2-NEXT: vinserti128 $1, 64(%rdi), %ymm0, %ymm2 +; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm1[0,3,2,3] +; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm0[4,5],ymm3[6,7] +; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7] +; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7] +; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],ymm1[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23] +; AVX2-NEXT: vpbroadcastq 80(%rdi), %ymm1 +; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7] +; AVX2-NEXT: vmovaps 16(%rdi), %xmm1 +; AVX2-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3] +; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = mem[0,1,0,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7] +; AVX2-NEXT: vmovdqa %ymm2, (%rsi) +; AVX2-NEXT: vmovdqa %ymm0, (%rdx) +; AVX2-NEXT: vmovaps %ymm1, (%rcx) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: load_i64_stride3_vf4: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovdqu64 (%rdi), %zmm0 +; AVX512-NEXT: vmovdqa 64(%rdi), %ymm1 +; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [0,3,6,9] +; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 +; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [1,4,7,10] +; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm3 +; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = [2,5,8,11] +; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm4 +; AVX512-NEXT: vmovdqa %ymm2, (%rsi) +; AVX512-NEXT: vmovdqa %ymm3, (%rdx) +; AVX512-NEXT: vmovdqa %ymm4, (%rcx) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %wide.vec = load <12 x i64>, <12 x i64>* %in.vec, align 32 + + %strided.vec0 = shufflevector <12 x i64> %wide.vec, <12 x i64> poison, <4 x i32> + %strided.vec1 = shufflevector <12 x i64> %wide.vec, <12 x i64> poison, <4 x i32> + %strided.vec2 = shufflevector <12 x i64> %wide.vec, <12 x i64> poison, <4 x i32> + + store <4 x i64> %strided.vec0, <4 x i64>* %out.vec0, align 32 + store <4 x i64> %strided.vec1, <4 x i64>* %out.vec1, align 32 + store <4 x i64> %strided.vec2, <4 x i64>* %out.vec2, align 32 + + ret void +} + +define void @load_i64_stride3_vf8(<24 x i64>* %in.vec, <8 x i64>* %out.vec0, <8 x i64>* %out.vec1, <8 x i64>* %out.vec2) nounwind { +; SSE-LABEL: load_i64_stride3_vf8: +; SSE: # %bb.0: +; SSE-NEXT: movapd 128(%rdi), %xmm14 +; SSE-NEXT: movapd 176(%rdi), %xmm13 +; SSE-NEXT: movapd 80(%rdi), %xmm12 +; SSE-NEXT: movapd 96(%rdi), %xmm4 +; SSE-NEXT: movapd 112(%rdi), %xmm8 +; SSE-NEXT: movapd 144(%rdi), %xmm3 +; SSE-NEXT: movapd 160(%rdi), %xmm9 +; SSE-NEXT: movapd (%rdi), %xmm6 +; SSE-NEXT: movapd 16(%rdi), %xmm10 +; SSE-NEXT: movapd 32(%rdi), %xmm5 +; SSE-NEXT: movapd 48(%rdi), %xmm7 +; SSE-NEXT: movapd 64(%rdi), %xmm11 +; SSE-NEXT: movapd %xmm11, %xmm15 +; SSE-NEXT: movsd {{.*#+}} xmm15 = xmm7[0],xmm15[1] +; SSE-NEXT: movapd %xmm9, %xmm1 +; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm3[0],xmm1[1] +; SSE-NEXT: movapd %xmm8, %xmm2 +; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm4[0],xmm2[1] +; SSE-NEXT: movapd %xmm10, %xmm0 +; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm6[0],xmm0[1] +; SSE-NEXT: shufpd {{.*#+}} xmm7 = xmm7[1],xmm12[0] +; SSE-NEXT: shufpd {{.*#+}} xmm3 = xmm3[1],xmm13[0] +; SSE-NEXT: shufpd {{.*#+}} xmm4 = xmm4[1],xmm14[0] +; SSE-NEXT: shufpd {{.*#+}} xmm6 = xmm6[1],xmm5[0] +; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm11[0],xmm12[1] +; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm9[0],xmm13[1] +; SSE-NEXT: movsd {{.*#+}} xmm14 = xmm8[0],xmm14[1] +; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm10[0],xmm5[1] +; SSE-NEXT: movapd %xmm2, 32(%rsi) +; SSE-NEXT: movapd %xmm0, (%rsi) +; SSE-NEXT: movapd %xmm1, 48(%rsi) +; SSE-NEXT: movapd %xmm15, 16(%rsi) +; SSE-NEXT: movapd %xmm4, 32(%rdx) +; SSE-NEXT: movapd %xmm6, (%rdx) +; SSE-NEXT: movapd %xmm3, 48(%rdx) +; SSE-NEXT: movapd %xmm7, 16(%rdx) +; SSE-NEXT: movapd %xmm14, 32(%rcx) +; SSE-NEXT: movapd %xmm5, (%rcx) +; SSE-NEXT: movapd %xmm13, 48(%rcx) +; SSE-NEXT: movapd %xmm12, 16(%rcx) +; SSE-NEXT: retq +; +; AVX1-LABEL: load_i64_stride3_vf8: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovapd 32(%rdi), %ymm0 +; AVX1-NEXT: vmovapd (%rdi), %ymm1 +; AVX1-NEXT: vmovapd 128(%rdi), %ymm2 +; AVX1-NEXT: vmovapd 96(%rdi), %ymm3 +; AVX1-NEXT: vmovapd 112(%rdi), %xmm4 +; AVX1-NEXT: vblendpd {{.*#+}} ymm5 = ymm3[0,1],ymm2[2],ymm3[3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm5 = ymm5[0],ymm4[1],ymm5[2,3] +; AVX1-NEXT: vmovaps 160(%rdi), %xmm6 +; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm7 +; AVX1-NEXT: vblendpd {{.*#+}} ymm9 = ymm5[0,1,2],ymm7[3] +; AVX1-NEXT: vmovapd 16(%rdi), %xmm7 +; AVX1-NEXT: vblendpd {{.*#+}} ymm8 = ymm1[0,1],ymm0[2],ymm1[3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm8 = ymm8[0],ymm7[1],ymm8[2,3] +; AVX1-NEXT: vmovaps 64(%rdi), %xmm5 +; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm10 +; AVX1-NEXT: vblendpd {{.*#+}} ymm8 = ymm8[0,1,2],ymm10[3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm0[2,3] +; AVX1-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],ymm0[0],ymm1[3],ymm0[2] +; AVX1-NEXT: vbroadcastsd 80(%rdi), %ymm10 +; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm10[3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm2[2,3] +; AVX1-NEXT: vshufpd {{.*#+}} ymm3 = ymm3[1],ymm2[0],ymm3[3],ymm2[2] +; AVX1-NEXT: vbroadcastsd 176(%rdi), %ymm10 +; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1,2],ymm10[3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],mem[2,3] +; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm7, %ymm5 +; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm5[0],ymm0[1],ymm5[2],ymm0[3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0,1],mem[2,3] +; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4 +; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2],ymm2[3] +; AVX1-NEXT: vmovapd %ymm8, (%rsi) +; AVX1-NEXT: vmovapd %ymm9, 32(%rsi) +; AVX1-NEXT: vmovapd %ymm3, 32(%rdx) +; AVX1-NEXT: vmovapd %ymm1, (%rdx) +; AVX1-NEXT: vmovapd %ymm2, 32(%rcx) +; AVX1-NEXT: vmovapd %ymm0, (%rcx) +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: load_i64_stride3_vf8: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovdqa 32(%rdi), %ymm0 +; AVX2-NEXT: vmovdqa (%rdi), %ymm1 +; AVX2-NEXT: vmovdqa 128(%rdi), %ymm2 +; AVX2-NEXT: vmovdqa 96(%rdi), %ymm3 +; AVX2-NEXT: vinserti128 $1, 160(%rdi), %ymm0, %ymm4 +; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm3[0,3,2,3] +; AVX2-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm2[4,5],ymm5[6,7] +; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5],ymm4[6,7] +; AVX2-NEXT: vinserti128 $1, 64(%rdi), %ymm0, %ymm5 +; AVX2-NEXT: vpermq {{.*#+}} ymm6 = ymm1[0,3,2,3] +; AVX2-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm0[4,5],ymm6[6,7] +; AVX2-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3,4,5],ymm5[6,7] +; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7] +; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm1[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],ymm1[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23] +; AVX2-NEXT: vpbroadcastq 80(%rdi), %ymm1 +; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7] +; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm2[4,5,6,7] +; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm1[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],ymm1[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23] +; AVX2-NEXT: vpbroadcastq 176(%rdi), %ymm2 +; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7] +; AVX2-NEXT: vmovaps 16(%rdi), %xmm2 +; AVX2-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],mem[2,3] +; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = mem[0,1,0,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7] +; AVX2-NEXT: vmovaps 112(%rdi), %xmm3 +; AVX2-NEXT: vblendps {{.*#+}} xmm3 = xmm3[0,1],mem[2,3] +; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = mem[0,1,0,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm6[4,5,6,7] +; AVX2-NEXT: vmovdqa %ymm5, (%rsi) +; AVX2-NEXT: vmovdqa %ymm4, 32(%rsi) +; AVX2-NEXT: vmovdqa %ymm1, 32(%rdx) +; AVX2-NEXT: vmovdqa %ymm0, (%rdx) +; AVX2-NEXT: vmovaps %ymm3, 32(%rcx) +; AVX2-NEXT: vmovaps %ymm2, (%rcx) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: load_i64_stride3_vf8: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovdqu64 (%rdi), %zmm0 +; AVX512-NEXT: vmovdqu64 64(%rdi), %zmm1 +; AVX512-NEXT: vmovdqu64 128(%rdi), %zmm2 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <0,3,6,9,12,15,u,u> +; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm3 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,4,5,10,13] +; AVX512-NEXT: vpermi2q %zmm2, %zmm3, %zmm4 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <1,4,7,10,13,u,u,u> +; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm3 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,4,8,11,14] +; AVX512-NEXT: vpermi2q %zmm2, %zmm3, %zmm5 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <10,13,0,3,6,u,u,u> +; AVX512-NEXT: vpermi2q %zmm0, %zmm1, %zmm3 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,4,9,12,15] +; AVX512-NEXT: vpermi2q %zmm2, %zmm3, %zmm0 +; AVX512-NEXT: vmovdqu64 %zmm4, (%rsi) +; AVX512-NEXT: vmovdqu64 %zmm5, (%rdx) +; AVX512-NEXT: vmovdqu64 %zmm0, (%rcx) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %wide.vec = load <24 x i64>, <24 x i64>* %in.vec, align 32 + + %strided.vec0 = shufflevector <24 x i64> %wide.vec, <24 x i64> poison, <8 x i32> + %strided.vec1 = shufflevector <24 x i64> %wide.vec, <24 x i64> poison, <8 x i32> + %strided.vec2 = shufflevector <24 x i64> %wide.vec, <24 x i64> poison, <8 x i32> + + store <8 x i64> %strided.vec0, <8 x i64>* %out.vec0, align 32 + store <8 x i64> %strided.vec1, <8 x i64>* %out.vec1, align 32 + store <8 x i64> %strided.vec2, <8 x i64>* %out.vec2, align 32 + + ret void +} + +define void @load_i64_stride3_vf16(<48 x i64>* %in.vec, <16 x i64>* %out.vec0, <16 x i64>* %out.vec1, <16 x i64>* %out.vec2) nounwind { +; SSE-LABEL: load_i64_stride3_vf16: +; SSE: # %bb.0: +; SSE-NEXT: subq $24, %rsp +; SSE-NEXT: movaps 224(%rdi), %xmm0 +; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movapd 128(%rdi), %xmm0 +; SSE-NEXT: movapd 272(%rdi), %xmm6 +; SSE-NEXT: movapd 176(%rdi), %xmm5 +; SSE-NEXT: movapd 80(%rdi), %xmm1 +; SSE-NEXT: movapd 192(%rdi), %xmm7 +; SSE-NEXT: movapd 208(%rdi), %xmm11 +; SSE-NEXT: movapd 96(%rdi), %xmm8 +; SSE-NEXT: movapd 112(%rdi), %xmm12 +; SSE-NEXT: movapd 240(%rdi), %xmm4 +; SSE-NEXT: movapd 256(%rdi), %xmm13 +; SSE-NEXT: movapd 144(%rdi), %xmm15 +; SSE-NEXT: movapd 160(%rdi), %xmm9 +; SSE-NEXT: movapd 48(%rdi), %xmm14 +; SSE-NEXT: movapd 64(%rdi), %xmm3 +; SSE-NEXT: movapd %xmm3, %xmm2 +; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm14[0],xmm2[1] +; SSE-NEXT: movapd %xmm2, (%rsp) # 16-byte Spill +; SSE-NEXT: shufpd {{.*#+}} xmm14 = xmm14[1],xmm1[0] +; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm3[0],xmm1[1] +; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movapd %xmm9, %xmm10 +; SSE-NEXT: movsd {{.*#+}} xmm10 = xmm15[0],xmm10[1] +; SSE-NEXT: shufpd {{.*#+}} xmm15 = xmm15[1],xmm5[0] +; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm9[0],xmm5[1] +; SSE-NEXT: movapd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movapd %xmm12, %xmm9 +; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm8[0],xmm9[1] +; SSE-NEXT: shufpd {{.*#+}} xmm8 = xmm8[1],xmm0[0] +; SSE-NEXT: movapd %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm12[0],xmm0[1] +; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movapd %xmm13, %xmm12 +; SSE-NEXT: movsd {{.*#+}} xmm12 = xmm4[0],xmm12[1] +; SSE-NEXT: shufpd {{.*#+}} xmm4 = xmm4[1],xmm6[0] +; SSE-NEXT: movapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm13[0],xmm6[1] +; SSE-NEXT: movapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movapd %xmm11, %xmm13 +; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm7[0],xmm13[1] +; SSE-NEXT: movapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE-NEXT: shufpd {{.*#+}} xmm7 = xmm7[1],xmm0[0] +; SSE-NEXT: movapd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm11[0],xmm0[1] +; SSE-NEXT: movapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movapd 336(%rdi), %xmm11 +; SSE-NEXT: movapd 352(%rdi), %xmm1 +; SSE-NEXT: movapd %xmm1, %xmm8 +; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm11[0],xmm8[1] +; SSE-NEXT: movapd 368(%rdi), %xmm6 +; SSE-NEXT: shufpd {{.*#+}} xmm11 = xmm11[1],xmm6[0] +; SSE-NEXT: movsd {{.*#+}} xmm6 = xmm1[0],xmm6[1] +; SSE-NEXT: movapd 288(%rdi), %xmm1 +; SSE-NEXT: movapd 304(%rdi), %xmm4 +; SSE-NEXT: movapd %xmm4, %xmm2 +; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm1[0],xmm2[1] +; SSE-NEXT: movapd 320(%rdi), %xmm0 +; SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm0[0] +; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1] +; SSE-NEXT: movapd (%rdi), %xmm4 +; SSE-NEXT: movapd 16(%rdi), %xmm7 +; SSE-NEXT: movapd %xmm7, %xmm5 +; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm4[0],xmm5[1] +; SSE-NEXT: movapd 32(%rdi), %xmm3 +; SSE-NEXT: shufpd {{.*#+}} xmm4 = xmm4[1],xmm3[0] +; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm7[0],xmm3[1] +; SSE-NEXT: movapd %xmm2, 96(%rsi) +; SSE-NEXT: movapd %xmm13, 64(%rsi) +; SSE-NEXT: movapd %xmm9, 32(%rsi) +; SSE-NEXT: movapd %xmm5, (%rsi) +; SSE-NEXT: movapd %xmm8, 112(%rsi) +; SSE-NEXT: movapd %xmm12, 80(%rsi) +; SSE-NEXT: movapd %xmm10, 48(%rsi) +; SSE-NEXT: movaps (%rsp), %xmm2 # 16-byte Reload +; SSE-NEXT: movaps %xmm2, 16(%rsi) +; SSE-NEXT: movapd %xmm1, 96(%rdx) +; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; SSE-NEXT: movaps %xmm1, 64(%rdx) +; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; SSE-NEXT: movaps %xmm1, 32(%rdx) +; SSE-NEXT: movapd %xmm4, (%rdx) +; SSE-NEXT: movapd %xmm11, 112(%rdx) +; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; SSE-NEXT: movaps %xmm1, 80(%rdx) +; SSE-NEXT: movapd %xmm15, 48(%rdx) +; SSE-NEXT: movapd %xmm14, 16(%rdx) +; SSE-NEXT: movapd %xmm0, 96(%rcx) +; SSE-NEXT: movapd %xmm6, 112(%rcx) +; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE-NEXT: movaps %xmm0, 64(%rcx) +; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE-NEXT: movaps %xmm0, 80(%rcx) +; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE-NEXT: movaps %xmm0, 32(%rcx) +; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE-NEXT: movaps %xmm0, 48(%rcx) +; SSE-NEXT: movapd %xmm3, (%rcx) +; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE-NEXT: movaps %xmm0, 16(%rcx) +; SSE-NEXT: addq $24, %rsp +; SSE-NEXT: retq +; +; AVX1-LABEL: load_i64_stride3_vf16: +; AVX1: # %bb.0: +; AVX1-NEXT: subq $72, %rsp +; AVX1-NEXT: vmovapd 320(%rdi), %ymm5 +; AVX1-NEXT: vmovapd 288(%rdi), %ymm12 +; AVX1-NEXT: vmovupd %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVX1-NEXT: vmovapd 32(%rdi), %ymm2 +; AVX1-NEXT: vmovapd (%rdi), %ymm9 +; AVX1-NEXT: vmovapd 128(%rdi), %ymm3 +; AVX1-NEXT: vmovapd 96(%rdi), %ymm14 +; AVX1-NEXT: vmovapd 112(%rdi), %xmm0 +; AVX1-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm14[0,1],ymm3[2],ymm14[3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm0[1],ymm1[2,3] +; AVX1-NEXT: vmovaps 160(%rdi), %xmm10 +; AVX1-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm4 +; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm4[3] +; AVX1-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVX1-NEXT: vmovapd 16(%rdi), %xmm8 +; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm9[0,1],ymm2[2],ymm9[3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm8[1],ymm1[2,3] +; AVX1-NEXT: vmovaps 64(%rdi), %xmm7 +; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm6 +; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm6[3] +; AVX1-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill +; AVX1-NEXT: vmovapd 304(%rdi), %xmm11 +; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm12[0,1],ymm5[2],ymm12[3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm11[1],ymm1[2,3] +; AVX1-NEXT: vmovaps 352(%rdi), %xmm6 +; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm12 +; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm12[3] +; AVX1-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVX1-NEXT: vmovapd 224(%rdi), %ymm13 +; AVX1-NEXT: vmovapd 192(%rdi), %ymm0 +; AVX1-NEXT: vmovapd 208(%rdi), %xmm4 +; AVX1-NEXT: vblendpd {{.*#+}} ymm12 = ymm0[0,1],ymm13[2],ymm0[3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm12 = ymm12[0],ymm4[1],ymm12[2,3] +; AVX1-NEXT: vmovaps 256(%rdi), %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm15 +; AVX1-NEXT: vblendpd {{.*#+}} ymm12 = ymm12[0,1,2],ymm15[3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm13[2,3] +; AVX1-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm13[0],ymm0[3],ymm13[2] +; AVX1-NEXT: vbroadcastsd 272(%rdi), %ymm15 +; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm15[3] +; AVX1-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVX1-NEXT: vblendpd {{.*#+}} ymm14 = ymm14[0,1],ymm3[2,3] +; AVX1-NEXT: vshufpd {{.*#+}} ymm14 = ymm14[1],ymm3[0],ymm14[3],ymm3[2] +; AVX1-NEXT: vbroadcastsd 176(%rdi), %ymm15 +; AVX1-NEXT: vblendpd {{.*#+}} ymm14 = ymm14[0,1,2],ymm15[3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm9 = ymm9[0,1],ymm2[2,3] +; AVX1-NEXT: vshufpd {{.*#+}} ymm9 = ymm9[1],ymm2[0],ymm9[3],ymm2[2] +; AVX1-NEXT: vbroadcastsd 80(%rdi), %ymm15 +; AVX1-NEXT: vblendpd {{.*#+}} ymm9 = ymm9[0,1,2],ymm15[3] +; AVX1-NEXT: vblendpd $3, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm15 # 32-byte Folded Reload +; AVX1-NEXT: # ymm15 = mem[0,1],ymm5[2,3] +; AVX1-NEXT: vshufpd {{.*#+}} ymm15 = ymm15[1],ymm5[0],ymm15[3],ymm5[2] +; AVX1-NEXT: vbroadcastsd 368(%rdi), %ymm0 +; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm15[0,1,2],ymm0[3] +; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload +; AVX1-NEXT: vinsertf128 $1, %xmm10, %ymm15, %ymm10 +; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1],mem[2,3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm10[0],ymm3[1],ymm10[2],ymm3[3] +; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm8, %ymm7 +; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0,1],mem[2,3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm7[0],ymm2[1],ymm7[2],ymm2[3] +; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm11, %ymm6 +; AVX1-NEXT: vblendpd {{.*#+}} ymm5 = ymm5[0,1],mem[2,3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2],ymm5[3] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm4, %ymm1 +; AVX1-NEXT: vblendpd {{.*#+}} ymm4 = ymm13[0,1],mem[2,3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2],ymm4[3] +; AVX1-NEXT: vmovapd %ymm12, 64(%rsi) +; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload +; AVX1-NEXT: vmovaps %ymm4, 96(%rsi) +; AVX1-NEXT: vmovups (%rsp), %ymm4 # 32-byte Reload +; AVX1-NEXT: vmovaps %ymm4, (%rsi) +; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload +; AVX1-NEXT: vmovaps %ymm4, 32(%rsi) +; AVX1-NEXT: vmovapd %ymm0, 96(%rdx) +; AVX1-NEXT: vmovapd %ymm9, (%rdx) +; AVX1-NEXT: vmovapd %ymm14, 32(%rdx) +; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; AVX1-NEXT: vmovaps %ymm0, 64(%rdx) +; AVX1-NEXT: vmovapd %ymm1, 64(%rcx) +; AVX1-NEXT: vmovapd %ymm5, 96(%rcx) +; AVX1-NEXT: vmovapd %ymm2, (%rcx) +; AVX1-NEXT: vmovapd %ymm3, 32(%rcx) +; AVX1-NEXT: addq $72, %rsp +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: load_i64_stride3_vf16: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovdqa 224(%rdi), %ymm3 +; AVX2-NEXT: vmovdqa 192(%rdi), %ymm6 +; AVX2-NEXT: vmovdqa 320(%rdi), %ymm5 +; AVX2-NEXT: vmovdqa 288(%rdi), %ymm7 +; AVX2-NEXT: vmovdqa 32(%rdi), %ymm8 +; AVX2-NEXT: vmovdqa (%rdi), %ymm9 +; AVX2-NEXT: vmovdqa 128(%rdi), %ymm10 +; AVX2-NEXT: vmovdqa 96(%rdi), %ymm11 +; AVX2-NEXT: vinserti128 $1, 160(%rdi), %ymm0, %ymm0 +; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm11[0,3,2,3] +; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm10[4,5],ymm1[6,7] +; AVX2-NEXT: vpblendd {{.*#+}} ymm13 = ymm1[0,1,2,3,4,5],ymm0[6,7] +; AVX2-NEXT: vinserti128 $1, 64(%rdi), %ymm0, %ymm1 +; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm9[0,3,2,3] +; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm8[4,5],ymm2[6,7] +; AVX2-NEXT: vpblendd {{.*#+}} ymm14 = ymm2[0,1,2,3,4,5],ymm1[6,7] +; AVX2-NEXT: vinserti128 $1, 352(%rdi), %ymm0, %ymm2 +; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm7[0,3,2,3] +; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5],ymm4[6,7] +; AVX2-NEXT: vpblendd {{.*#+}} ymm15 = ymm4[0,1,2,3,4,5],ymm2[6,7] +; AVX2-NEXT: vinserti128 $1, 256(%rdi), %ymm0, %ymm4 +; AVX2-NEXT: vpermq {{.*#+}} ymm12 = ymm6[0,3,2,3] +; AVX2-NEXT: vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm3[4,5],ymm12[6,7] +; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm12[0,1,2,3,4,5],ymm4[6,7] +; AVX2-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm3[4,5,6,7] +; AVX2-NEXT: vpalignr {{.*#+}} ymm3 = ymm6[8,9,10,11,12,13,14,15],ymm3[0,1,2,3,4,5,6,7],ymm6[24,25,26,27,28,29,30,31],ymm3[16,17,18,19,20,21,22,23] +; AVX2-NEXT: vpbroadcastq 272(%rdi), %ymm6 +; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm6[6,7] +; AVX2-NEXT: vpblendd {{.*#+}} ymm6 = ymm11[0,1,2,3],ymm10[4,5,6,7] +; AVX2-NEXT: vpalignr {{.*#+}} ymm6 = ymm6[8,9,10,11,12,13,14,15],ymm10[0,1,2,3,4,5,6,7],ymm6[24,25,26,27,28,29,30,31],ymm10[16,17,18,19,20,21,22,23] +; AVX2-NEXT: vpbroadcastq 176(%rdi), %ymm10 +; AVX2-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm10[6,7] +; AVX2-NEXT: vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm8[4,5,6,7] +; AVX2-NEXT: vpalignr {{.*#+}} ymm8 = ymm9[8,9,10,11,12,13,14,15],ymm8[0,1,2,3,4,5,6,7],ymm9[24,25,26,27,28,29,30,31],ymm8[16,17,18,19,20,21,22,23] +; AVX2-NEXT: vpbroadcastq 80(%rdi), %ymm9 +; AVX2-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm9[6,7] +; AVX2-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm5[4,5,6,7] +; AVX2-NEXT: vpalignr {{.*#+}} ymm5 = ymm7[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],ymm7[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23] +; AVX2-NEXT: vpbroadcastq 368(%rdi), %ymm7 +; AVX2-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm7[6,7] +; AVX2-NEXT: vmovaps 112(%rdi), %xmm7 +; AVX2-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0,1],mem[2,3] +; AVX2-NEXT: vpermpd {{.*#+}} ymm9 = mem[0,1,0,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm9[4,5,6,7] +; AVX2-NEXT: vmovaps 16(%rdi), %xmm0 +; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3] +; AVX2-NEXT: vpermpd {{.*#+}} ymm9 = mem[0,1,0,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5,6,7] +; AVX2-NEXT: vmovaps 304(%rdi), %xmm1 +; AVX2-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3] +; AVX2-NEXT: vpermpd {{.*#+}} ymm9 = mem[0,1,0,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm9[4,5,6,7] +; AVX2-NEXT: vmovaps 208(%rdi), %xmm2 +; AVX2-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],mem[2,3] +; AVX2-NEXT: vpermpd {{.*#+}} ymm9 = mem[0,1,0,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm9[4,5,6,7] +; AVX2-NEXT: vmovdqa %ymm4, 64(%rsi) +; AVX2-NEXT: vmovdqa %ymm15, 96(%rsi) +; AVX2-NEXT: vmovdqa %ymm14, (%rsi) +; AVX2-NEXT: vmovdqa %ymm13, 32(%rsi) +; AVX2-NEXT: vmovdqa %ymm5, 96(%rdx) +; AVX2-NEXT: vmovdqa %ymm8, (%rdx) +; AVX2-NEXT: vmovdqa %ymm6, 32(%rdx) +; AVX2-NEXT: vmovdqa %ymm3, 64(%rdx) +; AVX2-NEXT: vmovaps %ymm2, 64(%rcx) +; AVX2-NEXT: vmovaps %ymm1, 96(%rcx) +; AVX2-NEXT: vmovaps %ymm0, (%rcx) +; AVX2-NEXT: vmovaps %ymm7, 32(%rcx) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: load_i64_stride3_vf16: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovdqu64 320(%rdi), %zmm0 +; AVX512-NEXT: vmovdqu64 256(%rdi), %zmm1 +; AVX512-NEXT: vmovdqu64 (%rdi), %zmm2 +; AVX512-NEXT: vmovdqu64 64(%rdi), %zmm3 +; AVX512-NEXT: vmovdqu64 128(%rdi), %zmm4 +; AVX512-NEXT: vmovdqu64 192(%rdi), %zmm5 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm6 = <0,3,6,9,12,15,u,u> +; AVX512-NEXT: vmovdqa64 %zmm5, %zmm7 +; AVX512-NEXT: vpermt2q %zmm1, %zmm6, %zmm7 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,1,2,3,4,5,10,13] +; AVX512-NEXT: vpermt2q %zmm0, %zmm8, %zmm7 +; AVX512-NEXT: vpermi2q %zmm3, %zmm2, %zmm6 +; AVX512-NEXT: vpermt2q %zmm4, %zmm8, %zmm6 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm8 = <1,4,7,10,13,u,u,u> +; AVX512-NEXT: vmovdqa64 %zmm5, %zmm9 +; AVX512-NEXT: vpermt2q %zmm1, %zmm8, %zmm9 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm10 = [0,1,2,3,4,8,11,14] +; AVX512-NEXT: vpermt2q %zmm0, %zmm10, %zmm9 +; AVX512-NEXT: vpermi2q %zmm3, %zmm2, %zmm8 +; AVX512-NEXT: vpermt2q %zmm4, %zmm10, %zmm8 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm10 = <10,13,0,3,6,u,u,u> +; AVX512-NEXT: vpermt2q %zmm5, %zmm10, %zmm1 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,4,9,12,15] +; AVX512-NEXT: vpermt2q %zmm0, %zmm5, %zmm1 +; AVX512-NEXT: vpermt2q %zmm2, %zmm10, %zmm3 +; AVX512-NEXT: vpermt2q %zmm4, %zmm5, %zmm3 +; AVX512-NEXT: vmovdqu64 %zmm7, 64(%rsi) +; AVX512-NEXT: vmovdqu64 %zmm6, (%rsi) +; AVX512-NEXT: vmovdqu64 %zmm9, 64(%rdx) +; AVX512-NEXT: vmovdqu64 %zmm8, (%rdx) +; AVX512-NEXT: vmovdqu64 %zmm1, 64(%rcx) +; AVX512-NEXT: vmovdqu64 %zmm3, (%rcx) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %wide.vec = load <48 x i64>, <48 x i64>* %in.vec, align 32 + + %strided.vec0 = shufflevector <48 x i64> %wide.vec, <48 x i64> poison, <16 x i32> + %strided.vec1 = shufflevector <48 x i64> %wide.vec, <48 x i64> poison, <16 x i32> + %strided.vec2 = shufflevector <48 x i64> %wide.vec, <48 x i64> poison, <16 x i32> + + store <16 x i64> %strided.vec0, <16 x i64>* %out.vec0, align 32 + store <16 x i64> %strided.vec1, <16 x i64>* %out.vec1, align 32 + store <16 x i64> %strided.vec2, <16 x i64>* %out.vec2, align 32 + + ret void +} diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-3.ll new file mode 100644 index 0000000..6de0d93 --- /dev/null +++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-3.ll @@ -0,0 +1,654 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX1 +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2 +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2 +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2 +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512 + +; These patterns are produced by LoopVectorizer for interleaved stores. + +define void @store_i64_stride3_vf2(<2 x i64>* %in.vecptr0, <2 x i64>* %in.vecptr1, <2 x i64>* %in.vecptr2, <6 x i64>* %out.vec) nounwind { +; SSE-LABEL: store_i64_stride3_vf2: +; SSE: # %bb.0: +; SSE-NEXT: movapd (%rdi), %xmm0 +; SSE-NEXT: movapd (%rsi), %xmm1 +; SSE-NEXT: movapd (%rdx), %xmm2 +; SSE-NEXT: movapd %xmm0, %xmm3 +; SSE-NEXT: unpcklpd {{.*#+}} xmm3 = xmm3[0],xmm1[0] +; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1] +; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1] +; SSE-NEXT: movapd %xmm0, 16(%rcx) +; SSE-NEXT: movapd %xmm1, 32(%rcx) +; SSE-NEXT: movapd %xmm3, (%rcx) +; SSE-NEXT: retq +; +; AVX1-LABEL: store_i64_stride3_vf2: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovaps (%rdi), %xmm0 +; AVX1-NEXT: vmovaps (%rsi), %xmm1 +; AVX1-NEXT: vmovaps (%rdx), %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm3 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[3] +; AVX1-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1] +; AVX1-NEXT: vmovaps %xmm1, 32(%rcx) +; AVX1-NEXT: vmovapd %ymm0, (%rcx) +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: store_i64_stride3_vf2: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovaps (%rdi), %xmm0 +; AVX2-NEXT: vmovaps (%rsi), %xmm1 +; AVX2-NEXT: vmovaps (%rdx), %xmm2 +; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm3 +; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,1] +; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5],ymm0[6,7] +; AVX2-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1] +; AVX2-NEXT: vmovaps %xmm1, 32(%rcx) +; AVX2-NEXT: vmovaps %ymm0, (%rcx) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: store_i64_stride3_vf2: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovaps (%rdi), %xmm0 +; AVX512-NEXT: vmovaps (%rdx), %xmm1 +; AVX512-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm0 +; AVX512-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512-NEXT: vmovaps {{.*#+}} zmm1 = <0,2,4,1,3,5,u,u> +; AVX512-NEXT: vpermpd %zmm0, %zmm1, %zmm0 +; AVX512-NEXT: vextractf32x4 $2, %zmm0, 32(%rcx) +; AVX512-NEXT: vmovaps %ymm0, (%rcx) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %in.vec0 = load <2 x i64>, <2 x i64>* %in.vecptr0, align 32 + %in.vec1 = load <2 x i64>, <2 x i64>* %in.vecptr1, align 32 + %in.vec2 = load <2 x i64>, <2 x i64>* %in.vecptr2, align 32 + + %concat01 = shufflevector <2 x i64> %in.vec0, <2 x i64> %in.vec1, <4 x i32> + %concat2u = shufflevector <2 x i64> %in.vec2, <2 x i64> poison, <4 x i32> + %concat012 = shufflevector <4 x i64> %concat01, <4 x i64> %concat2u, <6 x i32> + %interleaved.vec = shufflevector <6 x i64> %concat012, <6 x i64> poison, <6 x i32> + + store <6 x i64> %interleaved.vec, <6 x i64>* %out.vec, align 32 + + ret void +} + +define void @store_i64_stride3_vf4(<4 x i64>* %in.vecptr0, <4 x i64>* %in.vecptr1, <4 x i64>* %in.vecptr2, <12 x i64>* %out.vec) nounwind { +; SSE-LABEL: store_i64_stride3_vf4: +; SSE: # %bb.0: +; SSE-NEXT: movaps (%rdi), %xmm0 +; SSE-NEXT: movaps 16(%rdi), %xmm1 +; SSE-NEXT: movaps (%rsi), %xmm2 +; SSE-NEXT: movaps 16(%rsi), %xmm3 +; SSE-NEXT: movaps (%rdx), %xmm4 +; SSE-NEXT: movaps 16(%rdx), %xmm5 +; SSE-NEXT: movaps %xmm3, %xmm6 +; SSE-NEXT: unpckhpd {{.*#+}} xmm6 = xmm6[1],xmm5[1] +; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm1[2,3] +; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm3[0] +; SSE-NEXT: movaps %xmm2, %xmm3 +; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm4[1] +; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm0[2,3] +; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; SSE-NEXT: movaps %xmm0, (%rcx) +; SSE-NEXT: movaps %xmm4, 16(%rcx) +; SSE-NEXT: movaps %xmm3, 32(%rcx) +; SSE-NEXT: movaps %xmm1, 48(%rcx) +; SSE-NEXT: movaps %xmm5, 64(%rcx) +; SSE-NEXT: movaps %xmm6, 80(%rcx) +; SSE-NEXT: retq +; +; AVX1-LABEL: store_i64_stride3_vf4: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovapd (%rdi), %ymm0 +; AVX1-NEXT: vmovapd (%rdx), %ymm1 +; AVX1-NEXT: vinsertf128 $1, (%rdx), %ymm0, %ymm2 +; AVX1-NEXT: vmovaps (%rdi), %xmm3 +; AVX1-NEXT: vunpcklpd {{.*#+}} xmm4 = xmm3[0],mem[0] +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 +; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5],ymm3[6,7] +; AVX1-NEXT: vmovapd 16(%rdx), %xmm3 +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm0[2,3],ymm1[2,3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0],ymm4[1,2,3] +; AVX1-NEXT: vbroadcastsd 24(%rsi), %ymm4 +; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3] +; AVX1-NEXT: vpermilpd {{.*#+}} ymm4 = mem[1,0,2,2] +; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm4[0],ymm1[1],ymm4[2,3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3] +; AVX1-NEXT: vmovapd %ymm0, 32(%rcx) +; AVX1-NEXT: vmovapd %ymm3, 64(%rcx) +; AVX1-NEXT: vmovaps %ymm2, (%rcx) +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: store_i64_stride3_vf4: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovaps (%rdi), %ymm0 +; AVX2-NEXT: vmovaps (%rdx), %ymm1 +; AVX2-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3] +; AVX2-NEXT: vmovaps 16(%rdx), %xmm3 +; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7] +; AVX2-NEXT: vbroadcastsd 24(%rsi), %ymm3 +; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5],ymm2[6,7] +; AVX2-NEXT: vinsertf128 $1, (%rdx), %ymm0, %ymm3 +; AVX2-NEXT: vmovddup {{.*#+}} xmm4 = mem[0,0] +; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm0[0,1,2,1] +; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5,6,7] +; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5],ymm4[6,7] +; AVX2-NEXT: vpermilps {{.*#+}} ymm4 = mem[2,3,0,1,6,7,4,5] +; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0,1],ymm1[2,3],ymm4[4,5,6,7] +; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7] +; AVX2-NEXT: vmovaps %ymm0, 32(%rcx) +; AVX2-NEXT: vmovaps %ymm3, (%rcx) +; AVX2-NEXT: vmovaps %ymm2, 64(%rcx) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: store_i64_stride3_vf4: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovdqa (%rdi), %ymm0 +; AVX512-NEXT: vmovdqa (%rdx), %ymm1 +; AVX512-NEXT: vinserti64x4 $1, (%rsi), %zmm0, %zmm0 +; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [2,11,15,3] +; AVX512-NEXT: vpermi2q %zmm0, %zmm1, %zmm2 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,4,8,1,5,9,2,6] +; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm3 +; AVX512-NEXT: vmovdqu64 %zmm3, (%rcx) +; AVX512-NEXT: vmovdqa %ymm2, 64(%rcx) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %in.vec0 = load <4 x i64>, <4 x i64>* %in.vecptr0, align 32 + %in.vec1 = load <4 x i64>, <4 x i64>* %in.vecptr1, align 32 + %in.vec2 = load <4 x i64>, <4 x i64>* %in.vecptr2, align 32 + + %concat01 = shufflevector <4 x i64> %in.vec0, <4 x i64> %in.vec1, <8 x i32> + %concat2u = shufflevector <4 x i64> %in.vec2, <4 x i64> poison, <8 x i32> + %concat012 = shufflevector <8 x i64> %concat01, <8 x i64> %concat2u, <12 x i32> + %interleaved.vec = shufflevector <12 x i64> %concat012, <12 x i64> poison, <12 x i32> + + store <12 x i64> %interleaved.vec, <12 x i64>* %out.vec, align 32 + + ret void +} + +define void @store_i64_stride3_vf8(<8 x i64>* %in.vecptr0, <8 x i64>* %in.vecptr1, <8 x i64>* %in.vecptr2, <24 x i64>* %out.vec) nounwind { +; SSE-LABEL: store_i64_stride3_vf8: +; SSE: # %bb.0: +; SSE-NEXT: movaps (%rdi), %xmm3 +; SSE-NEXT: movaps 16(%rdi), %xmm2 +; SSE-NEXT: movaps 32(%rdi), %xmm13 +; SSE-NEXT: movaps 48(%rdi), %xmm12 +; SSE-NEXT: movaps (%rsi), %xmm8 +; SSE-NEXT: movaps 16(%rsi), %xmm9 +; SSE-NEXT: movaps 32(%rsi), %xmm11 +; SSE-NEXT: movaps 48(%rsi), %xmm4 +; SSE-NEXT: movaps (%rdx), %xmm7 +; SSE-NEXT: movaps 16(%rdx), %xmm0 +; SSE-NEXT: movaps 32(%rdx), %xmm6 +; SSE-NEXT: movaps 48(%rdx), %xmm5 +; SSE-NEXT: movaps %xmm4, %xmm10 +; SSE-NEXT: unpckhpd {{.*#+}} xmm10 = xmm10[1],xmm5[1] +; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm12[2,3] +; SSE-NEXT: movlhps {{.*#+}} xmm12 = xmm12[0],xmm4[0] +; SSE-NEXT: movaps %xmm11, %xmm14 +; SSE-NEXT: unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm6[1] +; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm13[2,3] +; SSE-NEXT: movlhps {{.*#+}} xmm13 = xmm13[0],xmm11[0] +; SSE-NEXT: movaps %xmm9, %xmm1 +; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] +; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3] +; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm9[0] +; SSE-NEXT: movaps %xmm8, %xmm4 +; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm7[1] +; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm3[2,3] +; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm8[0] +; SSE-NEXT: movaps %xmm3, (%rcx) +; SSE-NEXT: movaps %xmm7, 16(%rcx) +; SSE-NEXT: movaps %xmm4, 32(%rcx) +; SSE-NEXT: movaps %xmm2, 48(%rcx) +; SSE-NEXT: movaps %xmm0, 64(%rcx) +; SSE-NEXT: movaps %xmm1, 80(%rcx) +; SSE-NEXT: movaps %xmm13, 96(%rcx) +; SSE-NEXT: movaps %xmm6, 112(%rcx) +; SSE-NEXT: movaps %xmm14, 128(%rcx) +; SSE-NEXT: movaps %xmm12, 144(%rcx) +; SSE-NEXT: movaps %xmm5, 160(%rcx) +; SSE-NEXT: movaps %xmm10, 176(%rcx) +; SSE-NEXT: retq +; +; AVX1-LABEL: store_i64_stride3_vf8: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovapd 32(%rdi), %ymm0 +; AVX1-NEXT: vmovapd (%rdi), %ymm1 +; AVX1-NEXT: vmovapd 32(%rdx), %ymm2 +; AVX1-NEXT: vmovapd (%rdx), %ymm3 +; AVX1-NEXT: vinsertf128 $1, 32(%rdx), %ymm0, %ymm4 +; AVX1-NEXT: vmovaps (%rdi), %xmm5 +; AVX1-NEXT: vmovaps 32(%rdi), %xmm6 +; AVX1-NEXT: vunpcklpd {{.*#+}} xmm7 = xmm6[0],mem[0] +; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm7, %ymm6 +; AVX1-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1,2,3],ymm4[4,5],ymm6[6,7] +; AVX1-NEXT: vinsertf128 $1, (%rdx), %ymm0, %ymm6 +; AVX1-NEXT: vunpcklpd {{.*#+}} xmm7 = xmm5[0],mem[0] +; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm7, %ymm5 +; AVX1-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5],ymm5[6,7] +; AVX1-NEXT: vmovapd 16(%rdx), %xmm6 +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm7 = ymm1[2,3],ymm3[2,3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm6 = ymm6[0],ymm7[1,2,3] +; AVX1-NEXT: vbroadcastsd 24(%rsi), %ymm7 +; AVX1-NEXT: vblendpd {{.*#+}} ymm6 = ymm6[0,1],ymm7[2],ymm6[3] +; AVX1-NEXT: vmovapd 48(%rdx), %xmm7 +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm8 = ymm0[2,3],ymm2[2,3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm7 = ymm7[0],ymm8[1,2,3] +; AVX1-NEXT: vbroadcastsd 56(%rsi), %ymm8 +; AVX1-NEXT: vblendpd {{.*#+}} ymm7 = ymm7[0,1],ymm8[2],ymm7[3] +; AVX1-NEXT: vpermilpd {{.*#+}} ymm8 = mem[1,0,2,2] +; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm8[0],ymm2[1],ymm8[2,3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2],ymm2[3] +; AVX1-NEXT: vpermilpd {{.*#+}} ymm2 = mem[1,0,2,2] +; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3] +; AVX1-NEXT: vmovapd %ymm1, 32(%rcx) +; AVX1-NEXT: vmovapd %ymm0, 128(%rcx) +; AVX1-NEXT: vmovapd %ymm7, 160(%rcx) +; AVX1-NEXT: vmovapd %ymm6, 64(%rcx) +; AVX1-NEXT: vmovaps %ymm5, (%rcx) +; AVX1-NEXT: vmovaps %ymm4, 96(%rcx) +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: store_i64_stride3_vf8: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovaps (%rdi), %ymm0 +; AVX2-NEXT: vmovaps 32(%rdi), %ymm1 +; AVX2-NEXT: vmovaps 32(%rdx), %ymm2 +; AVX2-NEXT: vmovaps (%rdx), %ymm3 +; AVX2-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm0[2,3],ymm3[2,3] +; AVX2-NEXT: vmovaps 16(%rdx), %xmm5 +; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7] +; AVX2-NEXT: vbroadcastsd 24(%rsi), %ymm5 +; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5],ymm4[6,7] +; AVX2-NEXT: vinsertf128 $1, 32(%rdx), %ymm0, %ymm5 +; AVX2-NEXT: vmovddup {{.*#+}} xmm6 = mem[0,0] +; AVX2-NEXT: vpermpd {{.*#+}} ymm7 = ymm1[0,1,2,1] +; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5,6,7] +; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5],ymm6[6,7] +; AVX2-NEXT: vperm2f128 {{.*#+}} ymm6 = ymm1[2,3],ymm2[2,3] +; AVX2-NEXT: vmovaps 48(%rdx), %xmm7 +; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5],ymm6[6,7] +; AVX2-NEXT: vbroadcastsd 56(%rsi), %ymm7 +; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5],ymm6[6,7] +; AVX2-NEXT: vinsertf128 $1, (%rdx), %ymm0, %ymm7 +; AVX2-NEXT: vmovddup {{.*#+}} xmm8 = mem[0,0] +; AVX2-NEXT: vpermpd {{.*#+}} ymm9 = ymm0[0,1,2,1] +; AVX2-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0,1],ymm8[2,3],ymm9[4,5,6,7] +; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5],ymm8[6,7] +; AVX2-NEXT: vpermilps {{.*#+}} ymm8 = mem[2,3,0,1,6,7,4,5] +; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm8[0,1],ymm2[2,3],ymm8[4,5,6,7] +; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7] +; AVX2-NEXT: vpermilps {{.*#+}} ymm2 = mem[2,3,0,1,6,7,4,5] +; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7] +; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7] +; AVX2-NEXT: vmovaps %ymm0, 32(%rcx) +; AVX2-NEXT: vmovaps %ymm1, 128(%rcx) +; AVX2-NEXT: vmovaps %ymm7, (%rcx) +; AVX2-NEXT: vmovaps %ymm6, 160(%rcx) +; AVX2-NEXT: vmovaps %ymm5, 96(%rcx) +; AVX2-NEXT: vmovaps %ymm4, 64(%rcx) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: store_i64_stride3_vf8: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovdqu64 (%rdi), %zmm0 +; AVX512-NEXT: vmovdqu64 (%rsi), %zmm1 +; AVX512-NEXT: vmovdqu64 (%rdx), %zmm2 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <0,8,u,1,9,u,2,10> +; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm3 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,8,3,4,9,6,7] +; AVX512-NEXT: vpermi2q %zmm2, %zmm3, %zmm4 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <2,11,u,3,12,u,4,13> +; AVX512-NEXT: vpermi2q %zmm0, %zmm2, %zmm3 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,1,11,3,4,12,6,7] +; AVX512-NEXT: vpermi2q %zmm1, %zmm3, %zmm5 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <5,13,u,6,14,u,7,15> +; AVX512-NEXT: vpermi2q %zmm2, %zmm1, %zmm3 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,1,14,3,4,15,6,7] +; AVX512-NEXT: vpermi2q %zmm0, %zmm3, %zmm1 +; AVX512-NEXT: vmovdqu64 %zmm1, 128(%rcx) +; AVX512-NEXT: vmovdqu64 %zmm5, 64(%rcx) +; AVX512-NEXT: vmovdqu64 %zmm4, (%rcx) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %in.vec0 = load <8 x i64>, <8 x i64>* %in.vecptr0, align 32 + %in.vec1 = load <8 x i64>, <8 x i64>* %in.vecptr1, align 32 + %in.vec2 = load <8 x i64>, <8 x i64>* %in.vecptr2, align 32 + + %concat01 = shufflevector <8 x i64> %in.vec0, <8 x i64> %in.vec1, <16 x i32> + %concat2u = shufflevector <8 x i64> %in.vec2, <8 x i64> poison, <16 x i32> + %concat012 = shufflevector <16 x i64> %concat01, <16 x i64> %concat2u, <24 x i32> + %interleaved.vec = shufflevector <24 x i64> %concat012, <24 x i64> poison, <24 x i32> + + store <24 x i64> %interleaved.vec, <24 x i64>* %out.vec, align 32 + + ret void +} + +define void @store_i64_stride3_vf16(<16 x i64>* %in.vecptr0, <16 x i64>* %in.vecptr1, <16 x i64>* %in.vecptr2, <48 x i64>* %out.vec) nounwind { +; SSE-LABEL: store_i64_stride3_vf16: +; SSE: # %bb.0: +; SSE-NEXT: subq $24, %rsp +; SSE-NEXT: movapd 64(%rdi), %xmm9 +; SSE-NEXT: movapd (%rdi), %xmm3 +; SSE-NEXT: movapd 16(%rdi), %xmm13 +; SSE-NEXT: movapd 32(%rdi), %xmm8 +; SSE-NEXT: movapd 48(%rdi), %xmm10 +; SSE-NEXT: movapd 64(%rsi), %xmm12 +; SSE-NEXT: movapd (%rsi), %xmm7 +; SSE-NEXT: movapd 16(%rsi), %xmm14 +; SSE-NEXT: movapd 32(%rsi), %xmm15 +; SSE-NEXT: movapd 48(%rsi), %xmm11 +; SSE-NEXT: movapd 64(%rdx), %xmm6 +; SSE-NEXT: movapd (%rdx), %xmm2 +; SSE-NEXT: movapd 16(%rdx), %xmm4 +; SSE-NEXT: movapd 32(%rdx), %xmm5 +; SSE-NEXT: movapd 48(%rdx), %xmm0 +; SSE-NEXT: movapd %xmm3, %xmm1 +; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm7[0] +; SSE-NEXT: movapd %xmm1, (%rsp) # 16-byte Spill +; SSE-NEXT: movsd {{.*#+}} xmm3 = xmm2[0],xmm3[1] +; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm2[1] +; SSE-NEXT: movapd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movapd %xmm13, %xmm3 +; SSE-NEXT: unpcklpd {{.*#+}} xmm3 = xmm3[0],xmm14[0] +; SSE-NEXT: movapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movsd {{.*#+}} xmm13 = xmm4[0],xmm13[1] +; SSE-NEXT: movapd %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm4[1] +; SSE-NEXT: movapd %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movapd %xmm8, %xmm13 +; SSE-NEXT: unpcklpd {{.*#+}} xmm13 = xmm13[0],xmm15[0] +; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm5[0],xmm8[1] +; SSE-NEXT: movapd %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: unpckhpd {{.*#+}} xmm15 = xmm15[1],xmm5[1] +; SSE-NEXT: movapd %xmm10, %xmm1 +; SSE-NEXT: unpcklpd {{.*#+}} xmm10 = xmm10[0],xmm11[0] +; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] +; SSE-NEXT: movapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: unpckhpd {{.*#+}} xmm11 = xmm11[1],xmm0[1] +; SSE-NEXT: movapd %xmm9, %xmm14 +; SSE-NEXT: unpcklpd {{.*#+}} xmm14 = xmm14[0],xmm12[0] +; SSE-NEXT: movsd {{.*#+}} xmm9 = xmm6[0],xmm9[1] +; SSE-NEXT: movapd %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm6[1] +; SSE-NEXT: movapd 80(%rdi), %xmm8 +; SSE-NEXT: movapd 80(%rsi), %xmm6 +; SSE-NEXT: movapd %xmm8, %xmm9 +; SSE-NEXT: unpcklpd {{.*#+}} xmm9 = xmm9[0],xmm6[0] +; SSE-NEXT: movapd 80(%rdx), %xmm0 +; SSE-NEXT: movsd {{.*#+}} xmm8 = xmm0[0],xmm8[1] +; SSE-NEXT: unpckhpd {{.*#+}} xmm6 = xmm6[1],xmm0[1] +; SSE-NEXT: movapd 96(%rdi), %xmm5 +; SSE-NEXT: movapd 96(%rsi), %xmm1 +; SSE-NEXT: movapd %xmm5, %xmm7 +; SSE-NEXT: unpcklpd {{.*#+}} xmm7 = xmm7[0],xmm1[0] +; SSE-NEXT: movapd 96(%rdx), %xmm2 +; SSE-NEXT: movsd {{.*#+}} xmm5 = xmm2[0],xmm5[1] +; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1] +; SSE-NEXT: movapd 112(%rdi), %xmm2 +; SSE-NEXT: movapd 112(%rsi), %xmm0 +; SSE-NEXT: movapd %xmm2, %xmm3 +; SSE-NEXT: unpcklpd {{.*#+}} xmm3 = xmm3[0],xmm0[0] +; SSE-NEXT: movapd 112(%rdx), %xmm4 +; SSE-NEXT: movsd {{.*#+}} xmm2 = xmm4[0],xmm2[1] +; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm4[1] +; SSE-NEXT: movapd %xmm0, 368(%rcx) +; SSE-NEXT: movapd %xmm2, 352(%rcx) +; SSE-NEXT: movapd %xmm3, 336(%rcx) +; SSE-NEXT: movapd %xmm1, 320(%rcx) +; SSE-NEXT: movapd %xmm5, 304(%rcx) +; SSE-NEXT: movapd %xmm7, 288(%rcx) +; SSE-NEXT: movapd %xmm6, 272(%rcx) +; SSE-NEXT: movapd %xmm8, 256(%rcx) +; SSE-NEXT: movapd %xmm9, 240(%rcx) +; SSE-NEXT: movapd %xmm12, 224(%rcx) +; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE-NEXT: movaps %xmm0, 208(%rcx) +; SSE-NEXT: movapd %xmm14, 192(%rcx) +; SSE-NEXT: movapd %xmm11, 176(%rcx) +; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE-NEXT: movaps %xmm0, 160(%rcx) +; SSE-NEXT: movapd %xmm10, 144(%rcx) +; SSE-NEXT: movapd %xmm15, 128(%rcx) +; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE-NEXT: movaps %xmm0, 112(%rcx) +; SSE-NEXT: movapd %xmm13, 96(%rcx) +; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE-NEXT: movaps %xmm0, 80(%rcx) +; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE-NEXT: movaps %xmm0, 64(%rcx) +; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE-NEXT: movaps %xmm0, 48(%rcx) +; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE-NEXT: movaps %xmm0, 32(%rcx) +; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE-NEXT: movaps %xmm0, 16(%rcx) +; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload +; SSE-NEXT: movaps %xmm0, (%rcx) +; SSE-NEXT: addq $24, %rsp +; SSE-NEXT: retq +; +; AVX1-LABEL: store_i64_stride3_vf16: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovapd (%rdi), %ymm13 +; AVX1-NEXT: vmovapd 96(%rdi), %ymm14 +; AVX1-NEXT: vmovapd 32(%rdi), %ymm4 +; AVX1-NEXT: vmovapd 64(%rdi), %ymm7 +; AVX1-NEXT: vmovapd (%rdx), %ymm3 +; AVX1-NEXT: vmovapd 96(%rdx), %ymm5 +; AVX1-NEXT: vmovapd 32(%rdx), %ymm8 +; AVX1-NEXT: vmovapd 64(%rdx), %ymm10 +; AVX1-NEXT: vinsertf128 $1, (%rdx), %ymm0, %ymm1 +; AVX1-NEXT: vmovaps (%rdi), %xmm6 +; AVX1-NEXT: vmovaps 32(%rdi), %xmm0 +; AVX1-NEXT: vmovaps 64(%rdi), %xmm2 +; AVX1-NEXT: vunpcklpd {{.*#+}} xmm9 = xmm6[0],mem[0] +; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm9, %ymm6 +; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm1[4,5],ymm6[6,7] +; AVX1-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVX1-NEXT: vinsertf128 $1, 64(%rdx), %ymm0, %ymm6 +; AVX1-NEXT: vunpcklpd {{.*#+}} xmm9 = xmm2[0],mem[0] +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm9, %ymm2 +; AVX1-NEXT: vblendps {{.*#+}} ymm6 = ymm2[0,1,2,3],ymm6[4,5],ymm2[6,7] +; AVX1-NEXT: vinsertf128 $1, 32(%rdx), %ymm0, %ymm2 +; AVX1-NEXT: vunpcklpd {{.*#+}} xmm9 = xmm0[0],mem[0] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm9, %ymm0 +; AVX1-NEXT: vblendps {{.*#+}} ymm9 = ymm0[0,1,2,3],ymm2[4,5],ymm0[6,7] +; AVX1-NEXT: vinsertf128 $1, 96(%rdx), %ymm0, %ymm0 +; AVX1-NEXT: vmovaps 96(%rdi), %xmm2 +; AVX1-NEXT: vunpcklpd {{.*#+}} xmm11 = xmm2[0],mem[0] +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm11, %ymm2 +; AVX1-NEXT: vblendps {{.*#+}} ymm11 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7] +; AVX1-NEXT: vmovapd 80(%rdx), %xmm0 +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm7[2,3],ymm10[2,3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3] +; AVX1-NEXT: vbroadcastsd 88(%rsi), %ymm2 +; AVX1-NEXT: vblendpd {{.*#+}} ymm12 = ymm0[0,1],ymm2[2],ymm0[3] +; AVX1-NEXT: vmovapd 48(%rdx), %xmm0 +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm4[2,3],ymm8[2,3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3] +; AVX1-NEXT: vbroadcastsd 56(%rsi), %ymm2 +; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2],ymm0[3] +; AVX1-NEXT: vmovapd 112(%rdx), %xmm2 +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm15 = ymm14[2,3],ymm5[2,3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0],ymm15[1,2,3] +; AVX1-NEXT: vbroadcastsd 120(%rsi), %ymm15 +; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm15[2],ymm2[3] +; AVX1-NEXT: vmovapd 16(%rdx), %xmm15 +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm13[2,3],ymm3[2,3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm15[0],ymm1[1,2,3] +; AVX1-NEXT: vbroadcastsd 24(%rsi), %ymm15 +; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm15[2],ymm1[3] +; AVX1-NEXT: vpermilpd {{.*#+}} ymm15 = mem[1,0,2,2] +; AVX1-NEXT: vblendpd {{.*#+}} ymm10 = ymm15[0],ymm10[1],ymm15[2,3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm7 = ymm10[0,1],ymm7[2],ymm10[3] +; AVX1-NEXT: vpermilpd {{.*#+}} ymm10 = mem[1,0,2,2] +; AVX1-NEXT: vblendpd {{.*#+}} ymm8 = ymm10[0],ymm8[1],ymm10[2,3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm4 = ymm8[0,1],ymm4[2],ymm8[3] +; AVX1-NEXT: vpermilpd {{.*#+}} ymm8 = mem[1,0,2,2] +; AVX1-NEXT: vblendpd {{.*#+}} ymm5 = ymm8[0],ymm5[1],ymm8[2,3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm5 = ymm5[0,1],ymm14[2],ymm5[3] +; AVX1-NEXT: vpermilpd {{.*#+}} ymm8 = mem[1,0,2,2] +; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm8[0],ymm3[1],ymm8[2,3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm13[2],ymm3[3] +; AVX1-NEXT: vmovapd %ymm3, 32(%rcx) +; AVX1-NEXT: vmovapd %ymm5, 320(%rcx) +; AVX1-NEXT: vmovapd %ymm4, 128(%rcx) +; AVX1-NEXT: vmovapd %ymm7, 224(%rcx) +; AVX1-NEXT: vmovapd %ymm1, 64(%rcx) +; AVX1-NEXT: vmovapd %ymm2, 352(%rcx) +; AVX1-NEXT: vmovapd %ymm0, 160(%rcx) +; AVX1-NEXT: vmovapd %ymm12, 256(%rcx) +; AVX1-NEXT: vmovaps %ymm11, 288(%rcx) +; AVX1-NEXT: vmovaps %ymm9, 96(%rcx) +; AVX1-NEXT: vmovaps %ymm6, 192(%rcx) +; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; AVX1-NEXT: vmovaps %ymm0, (%rcx) +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: store_i64_stride3_vf16: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovaps (%rdi), %ymm0 +; AVX2-NEXT: vmovaps 32(%rdi), %ymm4 +; AVX2-NEXT: vmovaps 64(%rdi), %ymm7 +; AVX2-NEXT: vmovaps 96(%rdi), %ymm3 +; AVX2-NEXT: vmovaps (%rdx), %ymm2 +; AVX2-NEXT: vmovaps 96(%rdx), %ymm6 +; AVX2-NEXT: vmovaps 32(%rdx), %ymm8 +; AVX2-NEXT: vmovaps 64(%rdx), %ymm10 +; AVX2-NEXT: vinsertf128 $1, (%rdx), %ymm0, %ymm1 +; AVX2-NEXT: vmovddup {{.*#+}} xmm5 = mem[0,0] +; AVX2-NEXT: vpermpd {{.*#+}} ymm9 = ymm0[0,1,2,1] +; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm9[0,1],ymm5[2,3],ymm9[4,5,6,7] +; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm1[4,5],ymm5[6,7] +; AVX2-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVX2-NEXT: vinsertf128 $1, 64(%rdx), %ymm0, %ymm5 +; AVX2-NEXT: vmovddup {{.*#+}} xmm9 = mem[0,0] +; AVX2-NEXT: vpermpd {{.*#+}} ymm11 = ymm7[0,1,2,1] +; AVX2-NEXT: vblendps {{.*#+}} ymm9 = ymm11[0,1],ymm9[2,3],ymm11[4,5,6,7] +; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm9[0,1,2,3],ymm5[4,5],ymm9[6,7] +; AVX2-NEXT: vperm2f128 {{.*#+}} ymm9 = ymm7[2,3],ymm10[2,3] +; AVX2-NEXT: vmovaps 80(%rdx), %xmm11 +; AVX2-NEXT: vblendps {{.*#+}} ymm9 = ymm11[0,1],ymm9[2,3],ymm11[4,5],ymm9[6,7] +; AVX2-NEXT: vbroadcastsd 88(%rsi), %ymm11 +; AVX2-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm11[4,5],ymm9[6,7] +; AVX2-NEXT: vinsertf128 $1, 32(%rdx), %ymm0, %ymm11 +; AVX2-NEXT: vmovddup {{.*#+}} xmm12 = mem[0,0] +; AVX2-NEXT: vpermpd {{.*#+}} ymm13 = ymm4[0,1,2,1] +; AVX2-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2,3],ymm13[4,5,6,7] +; AVX2-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0,1,2,3],ymm11[4,5],ymm12[6,7] +; AVX2-NEXT: vperm2f128 {{.*#+}} ymm12 = ymm4[2,3],ymm8[2,3] +; AVX2-NEXT: vmovaps 48(%rdx), %xmm13 +; AVX2-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2,3],ymm13[4,5],ymm12[6,7] +; AVX2-NEXT: vbroadcastsd 56(%rsi), %ymm13 +; AVX2-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm13[4,5],ymm12[6,7] +; AVX2-NEXT: vinsertf128 $1, 96(%rdx), %ymm0, %ymm13 +; AVX2-NEXT: vmovddup {{.*#+}} xmm14 = mem[0,0] +; AVX2-NEXT: vpermpd {{.*#+}} ymm15 = ymm3[0,1,2,1] +; AVX2-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3],ymm15[4,5,6,7] +; AVX2-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0,1,2,3],ymm13[4,5],ymm14[6,7] +; AVX2-NEXT: vperm2f128 {{.*#+}} ymm14 = ymm3[2,3],ymm6[2,3] +; AVX2-NEXT: vmovaps 112(%rdx), %xmm15 +; AVX2-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3],ymm15[4,5],ymm14[6,7] +; AVX2-NEXT: vbroadcastsd 120(%rsi), %ymm15 +; AVX2-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm15[4,5],ymm14[6,7] +; AVX2-NEXT: vperm2f128 {{.*#+}} ymm15 = ymm0[2,3],ymm2[2,3] +; AVX2-NEXT: vmovaps 16(%rdx), %xmm1 +; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm15[2,3],ymm1[4,5],ymm15[6,7] +; AVX2-NEXT: vbroadcastsd 24(%rsi), %ymm15 +; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm15[4,5],ymm1[6,7] +; AVX2-NEXT: vpermilps {{.*#+}} ymm15 = mem[2,3,0,1,6,7,4,5] +; AVX2-NEXT: vblendps {{.*#+}} ymm10 = ymm15[0,1],ymm10[2,3],ymm15[4,5,6,7] +; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm10[0,1,2,3],ymm7[4,5],ymm10[6,7] +; AVX2-NEXT: vpermilps {{.*#+}} ymm10 = mem[2,3,0,1,6,7,4,5] +; AVX2-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1],ymm8[2,3],ymm10[4,5,6,7] +; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm8[0,1,2,3],ymm4[4,5],ymm8[6,7] +; AVX2-NEXT: vpermilps {{.*#+}} ymm8 = mem[2,3,0,1,6,7,4,5] +; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm8[0,1],ymm6[2,3],ymm8[4,5,6,7] +; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5],ymm6[6,7] +; AVX2-NEXT: vpermilps {{.*#+}} ymm6 = mem[2,3,0,1,6,7,4,5] +; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm6[0,1],ymm2[2,3],ymm6[4,5,6,7] +; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7] +; AVX2-NEXT: vmovaps %ymm0, 32(%rcx) +; AVX2-NEXT: vmovaps %ymm3, 320(%rcx) +; AVX2-NEXT: vmovaps %ymm4, 128(%rcx) +; AVX2-NEXT: vmovaps %ymm7, 224(%rcx) +; AVX2-NEXT: vmovaps %ymm1, 64(%rcx) +; AVX2-NEXT: vmovaps %ymm14, 352(%rcx) +; AVX2-NEXT: vmovaps %ymm13, 288(%rcx) +; AVX2-NEXT: vmovaps %ymm12, 160(%rcx) +; AVX2-NEXT: vmovaps %ymm11, 96(%rcx) +; AVX2-NEXT: vmovaps %ymm9, 256(%rcx) +; AVX2-NEXT: vmovaps %ymm5, 192(%rcx) +; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; AVX2-NEXT: vmovaps %ymm0, (%rcx) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: store_i64_stride3_vf16: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovdqu64 (%rdi), %zmm0 +; AVX512-NEXT: vmovdqu64 64(%rdi), %zmm1 +; AVX512-NEXT: vmovdqu64 (%rsi), %zmm2 +; AVX512-NEXT: vmovdqu64 64(%rsi), %zmm3 +; AVX512-NEXT: vmovdqu64 (%rdx), %zmm4 +; AVX512-NEXT: vmovdqu64 64(%rdx), %zmm5 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm6 = <0,8,u,1,9,u,2,10> +; AVX512-NEXT: vmovdqa64 %zmm0, %zmm7 +; AVX512-NEXT: vpermt2q %zmm2, %zmm6, %zmm7 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,1,8,3,4,9,6,7] +; AVX512-NEXT: vpermt2q %zmm4, %zmm8, %zmm7 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm9 = <5,13,u,6,14,u,7,15> +; AVX512-NEXT: vmovdqa64 %zmm3, %zmm10 +; AVX512-NEXT: vpermt2q %zmm5, %zmm9, %zmm10 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm11 = [0,1,14,3,4,15,6,7] +; AVX512-NEXT: vpermt2q %zmm1, %zmm11, %zmm10 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm12 = <2,11,u,3,12,u,4,13> +; AVX512-NEXT: vmovdqa64 %zmm5, %zmm13 +; AVX512-NEXT: vpermt2q %zmm1, %zmm12, %zmm13 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm14 = [0,1,11,3,4,12,6,7] +; AVX512-NEXT: vpermt2q %zmm3, %zmm14, %zmm13 +; AVX512-NEXT: vpermt2q %zmm3, %zmm6, %zmm1 +; AVX512-NEXT: vpermt2q %zmm5, %zmm8, %zmm1 +; AVX512-NEXT: vpermi2q %zmm4, %zmm2, %zmm9 +; AVX512-NEXT: vpermt2q %zmm0, %zmm11, %zmm9 +; AVX512-NEXT: vpermt2q %zmm0, %zmm12, %zmm4 +; AVX512-NEXT: vpermt2q %zmm2, %zmm14, %zmm4 +; AVX512-NEXT: vmovdqu64 %zmm4, 64(%rcx) +; AVX512-NEXT: vmovdqu64 %zmm9, 128(%rcx) +; AVX512-NEXT: vmovdqu64 %zmm1, 192(%rcx) +; AVX512-NEXT: vmovdqu64 %zmm13, 256(%rcx) +; AVX512-NEXT: vmovdqu64 %zmm10, 320(%rcx) +; AVX512-NEXT: vmovdqu64 %zmm7, (%rcx) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %in.vec0 = load <16 x i64>, <16 x i64>* %in.vecptr0, align 32 + %in.vec1 = load <16 x i64>, <16 x i64>* %in.vecptr1, align 32 + %in.vec2 = load <16 x i64>, <16 x i64>* %in.vecptr2, align 32 + + %concat01 = shufflevector <16 x i64> %in.vec0, <16 x i64> %in.vec1, <32 x i32> + %concat2u = shufflevector <16 x i64> %in.vec2, <16 x i64> poison, <32 x i32> + %concat012 = shufflevector <32 x i64> %concat01, <32 x i64> %concat2u, <48 x i32> + %interleaved.vec = shufflevector <48 x i64> %concat012, <48 x i64> poison, <48 x i32> + + store <48 x i64> %interleaved.vec, <48 x i64>* %out.vec, align 32 + + ret void +} -- 2.7.4