From 6bba2bd7689656193e9d4568c2ebd985bc4d82ce Mon Sep 17 00:00:00 2001 From: Roman Lebedev Date: Mon, 4 Oct 2021 16:50:43 +0300 Subject: [PATCH] [NFC][X86][Codegen] Add test coverage for interleaved i32 load/store stride=4 --- .../X86/vector-interleaved-load-i32-stride-4.ll | 798 +++++++++++++++++++++ .../X86/vector-interleaved-store-i32-stride-4.ll | 692 ++++++++++++++++++ 2 files changed, 1490 insertions(+) create mode 100644 llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-4.ll create mode 100644 llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-4.ll diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-4.ll new file mode 100644 index 0000000..ab4fb54 --- /dev/null +++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-4.ll @@ -0,0 +1,798 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX1 +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2 +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2 +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2 +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512 + +; These patterns are produced by LoopVectorizer for interleaved loads. + +define void @load_i32_stride4_vf2(<8 x i32>* %in.vec, <2 x i32>* %out.vec0, <2 x i32>* %out.vec1, <2 x i32>* %out.vec2, <2 x i32>* %out.vec3) nounwind { +; SSE-LABEL: load_i32_stride4_vf2: +; SSE: # %bb.0: +; SSE-NEXT: movdqa (%rdi), %xmm0 +; SSE-NEXT: movdqa 16(%rdi), %xmm1 +; SSE-NEXT: movdqa %xmm0, %xmm2 +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,2,3] +; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] +; SSE-NEXT: movq %xmm2, (%rsi) +; SSE-NEXT: movq %xmm3, (%rdx) +; SSE-NEXT: movq %xmm0, (%rcx) +; SSE-NEXT: movq %xmm1, (%r8) +; SSE-NEXT: retq +; +; AVX1-LABEL: load_i32_stride4_vf2: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovdqa (%rdi), %xmm0 +; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1 +; AVX1-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,1,1] +; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm1[2,3],xmm3[4,5,6,7] +; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; AVX1-NEXT: vmovq %xmm2, (%rsi) +; AVX1-NEXT: vmovq %xmm3, (%rdx) +; AVX1-NEXT: vmovq %xmm0, (%rcx) +; AVX1-NEXT: vpextrq $1, %xmm0, (%r8) +; AVX1-NEXT: retq +; +; AVX2-LABEL: load_i32_stride4_vf2: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovdqa (%rdi), %xmm0 +; AVX2-NEXT: vmovdqa 16(%rdi), %xmm1 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,1,1] +; AVX2-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0],xmm1[1],xmm3[2,3] +; AVX2-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; AVX2-NEXT: vmovq %xmm2, (%rsi) +; AVX2-NEXT: vmovq %xmm3, (%rdx) +; AVX2-NEXT: vmovq %xmm0, (%rcx) +; AVX2-NEXT: vpextrq $1, %xmm0, (%r8) +; AVX2-NEXT: retq +; +; AVX512-LABEL: load_i32_stride4_vf2: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovdqa (%rdi), %xmm0 +; AVX512-NEXT: vmovdqa 16(%rdi), %xmm1 +; AVX512-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; AVX512-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,1,1] +; AVX512-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0],xmm1[1],xmm3[2,3] +; AVX512-NEXT: vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; AVX512-NEXT: vmovq %xmm2, (%rsi) +; AVX512-NEXT: vmovq %xmm3, (%rdx) +; AVX512-NEXT: vmovq %xmm0, (%rcx) +; AVX512-NEXT: vpextrq $1, %xmm0, (%r8) +; AVX512-NEXT: retq + %wide.vec = load <8 x i32>, <8 x i32>* %in.vec, align 32 + + %strided.vec0 = shufflevector <8 x i32> %wide.vec, <8 x i32> poison, <2 x i32> + %strided.vec1 = shufflevector <8 x i32> %wide.vec, <8 x i32> poison, <2 x i32> + %strided.vec2 = shufflevector <8 x i32> %wide.vec, <8 x i32> poison, <2 x i32> + %strided.vec3 = shufflevector <8 x i32> %wide.vec, <8 x i32> poison, <2 x i32> + + store <2 x i32> %strided.vec0, <2 x i32>* %out.vec0, align 32 + store <2 x i32> %strided.vec1, <2 x i32>* %out.vec1, align 32 + store <2 x i32> %strided.vec2, <2 x i32>* %out.vec2, align 32 + store <2 x i32> %strided.vec3, <2 x i32>* %out.vec3, align 32 + + ret void +} + +define void @load_i32_stride4_vf4(<16 x i32>* %in.vec, <4 x i32>* %out.vec0, <4 x i32>* %out.vec1, <4 x i32>* %out.vec2, <4 x i32>* %out.vec3) nounwind { +; SSE-LABEL: load_i32_stride4_vf4: +; SSE: # %bb.0: +; SSE-NEXT: movaps (%rdi), %xmm0 +; SSE-NEXT: movaps 16(%rdi), %xmm1 +; SSE-NEXT: movaps 32(%rdi), %xmm2 +; SSE-NEXT: movaps 48(%rdi), %xmm3 +; SSE-NEXT: movaps %xmm2, %xmm4 +; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] +; SSE-NEXT: movaps %xmm0, %xmm5 +; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1] +; SSE-NEXT: movaps %xmm5, %xmm6 +; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm4[0] +; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm4[1] +; SSE-NEXT: unpckhps {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3] +; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE-NEXT: movaps %xmm0, %xmm1 +; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1] +; SSE-NEXT: movaps %xmm6, (%rsi) +; SSE-NEXT: movaps %xmm5, (%rdx) +; SSE-NEXT: movaps %xmm1, (%rcx) +; SSE-NEXT: movaps %xmm0, (%r8) +; SSE-NEXT: retq +; +; AVX1-LABEL: load_i32_stride4_vf4: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovaps 32(%rdi), %xmm0 +; AVX1-NEXT: vmovaps 48(%rdi), %xmm1 +; AVX1-NEXT: vmovlhps {{.*#+}} xmm2 = xmm1[0],xmm0[0] +; AVX1-NEXT: vmovaps (%rdi), %xmm3 +; AVX1-NEXT: vmovaps 16(%rdi), %xmm4 +; AVX1-NEXT: vunpcklps {{.*#+}} xmm5 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] +; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm5[0,1],xmm2[2,0] +; AVX1-NEXT: vunpcklps {{.*#+}} xmm5 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; AVX1-NEXT: vinsertps {{.*#+}} xmm6 = xmm3[1],xmm4[1],zero,zero +; AVX1-NEXT: vblendps {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3] +; AVX1-NEXT: vinsertps {{.*#+}} xmm6 = zero,zero,xmm0[2],xmm1[2] +; AVX1-NEXT: vunpckhps {{.*#+}} xmm7 = xmm3[2],xmm4[2],xmm3[3],xmm4[3] +; AVX1-NEXT: vblendps {{.*#+}} xmm6 = xmm7[0,1],xmm6[2,3] +; AVX1-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm4[3,0],xmm3[3,0] +; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm1[2,0],xmm0[2,3] +; AVX1-NEXT: vmovaps %xmm2, (%rsi) +; AVX1-NEXT: vmovaps %xmm5, (%rdx) +; AVX1-NEXT: vmovaps %xmm6, (%rcx) +; AVX1-NEXT: vmovaps %xmm0, (%r8) +; AVX1-NEXT: retq +; +; AVX2-LABEL: load_i32_stride4_vf4: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovaps {{.*#+}} xmm0 = +; AVX2-NEXT: vmovaps 32(%rdi), %ymm1 +; AVX2-NEXT: vpermps %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vmovaps (%rdi), %xmm2 +; AVX2-NEXT: vmovaps 16(%rdi), %xmm3 +; AVX2-NEXT: vmovaps 32(%rdi), %xmm4 +; AVX2-NEXT: vunpcklps {{.*#+}} xmm5 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm5[0,1],xmm0[2,3] +; AVX2-NEXT: vmovaps 48(%rdi), %xmm5 +; AVX2-NEXT: vunpcklps {{.*#+}} xmm6 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] +; AVX2-NEXT: vmovaps {{.*#+}} xmm7 = <1,5,u,u> +; AVX2-NEXT: vmovaps (%rdi), %ymm8 +; AVX2-NEXT: vpermps %ymm8, %ymm7, %ymm7 +; AVX2-NEXT: vblendps {{.*#+}} xmm6 = xmm7[0,1],xmm6[2,3] +; AVX2-NEXT: vmovaps {{.*#+}} xmm7 = +; AVX2-NEXT: vpermps %ymm1, %ymm7, %ymm1 +; AVX2-NEXT: vunpckhps {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3] +; AVX2-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3] +; AVX2-NEXT: vunpckhps {{.*#+}} xmm2 = xmm4[2],xmm5[2],xmm4[3],xmm5[3] +; AVX2-NEXT: vmovaps {{.*#+}} xmm3 = <3,7,u,u> +; AVX2-NEXT: vpermps %ymm8, %ymm3, %ymm3 +; AVX2-NEXT: vblendps {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3] +; AVX2-NEXT: vmovaps %xmm0, (%rsi) +; AVX2-NEXT: vmovaps %xmm6, (%rdx) +; AVX2-NEXT: vmovaps %xmm1, (%rcx) +; AVX2-NEXT: vmovaps %xmm2, (%r8) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: load_i32_stride4_vf4: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovdqa {{.*#+}} xmm0 = [0,4,8,12] +; AVX512-NEXT: vmovdqa (%rdi), %ymm1 +; AVX512-NEXT: vmovdqa 32(%rdi), %ymm2 +; AVX512-NEXT: vpermi2d %ymm2, %ymm1, %ymm0 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm3 = [1,5,9,13] +; AVX512-NEXT: vpermi2d %ymm2, %ymm1, %ymm3 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm4 = [2,6,10,14] +; AVX512-NEXT: vpermi2d %ymm2, %ymm1, %ymm4 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm5 = [3,7,11,15] +; AVX512-NEXT: vpermi2d %ymm2, %ymm1, %ymm5 +; AVX512-NEXT: vmovdqa %xmm0, (%rsi) +; AVX512-NEXT: vmovdqa %xmm3, (%rdx) +; AVX512-NEXT: vmovdqa %xmm4, (%rcx) +; AVX512-NEXT: vmovdqa %xmm5, (%r8) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %wide.vec = load <16 x i32>, <16 x i32>* %in.vec, align 32 + + %strided.vec0 = shufflevector <16 x i32> %wide.vec, <16 x i32> poison, <4 x i32> + %strided.vec1 = shufflevector <16 x i32> %wide.vec, <16 x i32> poison, <4 x i32> + %strided.vec2 = shufflevector <16 x i32> %wide.vec, <16 x i32> poison, <4 x i32> + %strided.vec3 = shufflevector <16 x i32> %wide.vec, <16 x i32> poison, <4 x i32> + + store <4 x i32> %strided.vec0, <4 x i32>* %out.vec0, align 32 + store <4 x i32> %strided.vec1, <4 x i32>* %out.vec1, align 32 + store <4 x i32> %strided.vec2, <4 x i32>* %out.vec2, align 32 + store <4 x i32> %strided.vec3, <4 x i32>* %out.vec3, align 32 + + ret void +} + +define void @load_i32_stride4_vf8(<32 x i32>* %in.vec, <8 x i32>* %out.vec0, <8 x i32>* %out.vec1, <8 x i32>* %out.vec2, <8 x i32>* %out.vec3) nounwind { +; SSE-LABEL: load_i32_stride4_vf8: +; SSE: # %bb.0: +; SSE-NEXT: movaps (%rdi), %xmm5 +; SSE-NEXT: movaps 16(%rdi), %xmm8 +; SSE-NEXT: movaps 32(%rdi), %xmm6 +; SSE-NEXT: movaps 48(%rdi), %xmm9 +; SSE-NEXT: movaps 80(%rdi), %xmm10 +; SSE-NEXT: movaps 64(%rdi), %xmm4 +; SSE-NEXT: movaps 112(%rdi), %xmm11 +; SSE-NEXT: movaps 96(%rdi), %xmm3 +; SSE-NEXT: movaps %xmm3, %xmm7 +; SSE-NEXT: unpcklps {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1] +; SSE-NEXT: movaps %xmm4, %xmm1 +; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1] +; SSE-NEXT: movaps %xmm1, %xmm12 +; SSE-NEXT: movlhps {{.*#+}} xmm12 = xmm12[0],xmm7[0] +; SSE-NEXT: movaps %xmm6, %xmm2 +; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm9[0],xmm2[1],xmm9[1] +; SSE-NEXT: movaps %xmm5, %xmm0 +; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1] +; SSE-NEXT: movaps %xmm0, %xmm13 +; SSE-NEXT: movlhps {{.*#+}} xmm13 = xmm13[0],xmm2[0] +; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm7[1] +; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1] +; SSE-NEXT: unpckhps {{.*#+}} xmm3 = xmm3[2],xmm11[2],xmm3[3],xmm11[3] +; SSE-NEXT: unpckhps {{.*#+}} xmm4 = xmm4[2],xmm10[2],xmm4[3],xmm10[3] +; SSE-NEXT: movaps %xmm4, %xmm2 +; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] +; SSE-NEXT: unpckhps {{.*#+}} xmm6 = xmm6[2],xmm9[2],xmm6[3],xmm9[3] +; SSE-NEXT: unpckhps {{.*#+}} xmm5 = xmm5[2],xmm8[2],xmm5[3],xmm8[3] +; SSE-NEXT: movaps %xmm5, %xmm7 +; SSE-NEXT: movlhps {{.*#+}} xmm7 = xmm7[0],xmm6[0] +; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm3[1] +; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm6[1] +; SSE-NEXT: movaps %xmm12, 16(%rsi) +; SSE-NEXT: movaps %xmm13, (%rsi) +; SSE-NEXT: movaps %xmm1, 16(%rdx) +; SSE-NEXT: movaps %xmm0, (%rdx) +; SSE-NEXT: movaps %xmm2, 16(%rcx) +; SSE-NEXT: movaps %xmm7, (%rcx) +; SSE-NEXT: movaps %xmm4, 16(%r8) +; SSE-NEXT: movaps %xmm5, (%r8) +; SSE-NEXT: retq +; +; AVX1-LABEL: load_i32_stride4_vf8: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovaps 80(%rdi), %xmm10 +; AVX1-NEXT: vmovaps 64(%rdi), %xmm1 +; AVX1-NEXT: vunpcklps {{.*#+}} xmm2 = xmm1[0],xmm10[0],xmm1[1],xmm10[1] +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm3 +; AVX1-NEXT: vmovaps 96(%rdi), %xmm2 +; AVX1-NEXT: vmovaps 112(%rdi), %xmm4 +; AVX1-NEXT: vshufps {{.*#+}} xmm5 = xmm4[0,0],xmm2[0,0] +; AVX1-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[0,1,2,0] +; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5 +; AVX1-NEXT: vblendps {{.*#+}} ymm8 = ymm3[0,1,2,3,4,5],ymm5[6,7] +; AVX1-NEXT: vmovaps 32(%rdi), %xmm3 +; AVX1-NEXT: vmovaps 48(%rdi), %xmm5 +; AVX1-NEXT: vmovlhps {{.*#+}} xmm9 = xmm5[0],xmm3[0] +; AVX1-NEXT: vmovaps (%rdi), %xmm6 +; AVX1-NEXT: vmovaps 16(%rdi), %xmm7 +; AVX1-NEXT: vunpcklps {{.*#+}} xmm0 = xmm6[0],xmm7[0],xmm6[1],xmm7[1] +; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm9[2,0] +; AVX1-NEXT: vblendps {{.*#+}} ymm8 = ymm0[0,1,2,3],ymm8[4,5,6,7] +; AVX1-NEXT: vunpcklps {{.*#+}} xmm0 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm9 +; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[1],xmm10[1],zero,zero +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; AVX1-NEXT: vblendps {{.*#+}} ymm9 = ymm0[0,1,2,3,4,5],ymm9[6,7] +; AVX1-NEXT: vunpcklps {{.*#+}} xmm11 = xmm3[0],xmm5[0],xmm3[1],xmm5[1] +; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm6[1],xmm7[1],zero,zero +; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm11[2,3] +; AVX1-NEXT: vblendps {{.*#+}} ymm9 = ymm0[0,1,2,3],ymm9[4,5,6,7] +; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = zero,zero,xmm2[2],xmm4[2] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm11 +; AVX1-NEXT: vunpckhps {{.*#+}} xmm0 = xmm1[2],xmm10[2],xmm1[3],xmm10[3] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; AVX1-NEXT: vblendps {{.*#+}} ymm11 = ymm0[0,1,2,3,4,5],ymm11[6,7] +; AVX1-NEXT: vinsertps {{.*#+}} xmm12 = zero,zero,xmm3[2],xmm5[2] +; AVX1-NEXT: vunpckhps {{.*#+}} xmm0 = xmm6[2],xmm7[2],xmm6[3],xmm7[3] +; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm12[2,3] +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm11[4,5,6,7] +; AVX1-NEXT: vunpckhps {{.*#+}} xmm2 = xmm2[2],xmm4[2],xmm2[3],xmm4[3] +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2 +; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm10[3,0],xmm1[3,0] +; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,0,2,3] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 +; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7] +; AVX1-NEXT: vunpckhps {{.*#+}} xmm2 = xmm3[2],xmm5[2],xmm3[3],xmm5[3] +; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm7[3,0],xmm6[3,0] +; AVX1-NEXT: vpermilps {{.*#+}} xmm3 = xmm3[2,0,2,3] +; AVX1-NEXT: vblendps {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3] +; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7] +; AVX1-NEXT: vmovaps %ymm8, (%rsi) +; AVX1-NEXT: vmovaps %ymm9, (%rdx) +; AVX1-NEXT: vmovaps %ymm0, (%rcx) +; AVX1-NEXT: vmovaps %ymm1, (%r8) +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: load_i32_stride4_vf8: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovaps (%rdi), %ymm8 +; AVX2-NEXT: vmovaps 32(%rdi), %ymm9 +; AVX2-NEXT: vmovaps 64(%rdi), %ymm1 +; AVX2-NEXT: vmovaps 96(%rdi), %ymm2 +; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm3 = [17179869184,17179869184,17179869184,17179869184] +; AVX2-NEXT: vpermps %ymm2, %ymm3, %ymm5 +; AVX2-NEXT: vpermps %ymm1, %ymm3, %ymm3 +; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5],ymm5[6,7] +; AVX2-NEXT: vmovaps {{.*#+}} xmm5 = +; AVX2-NEXT: vpermps %ymm9, %ymm5, %ymm6 +; AVX2-NEXT: vmovaps (%rdi), %xmm7 +; AVX2-NEXT: vmovaps 16(%rdi), %xmm0 +; AVX2-NEXT: vmovaps 32(%rdi), %xmm5 +; AVX2-NEXT: vunpcklps {{.*#+}} xmm4 = xmm7[0],xmm0[0],xmm7[1],xmm0[1] +; AVX2-NEXT: vblendps {{.*#+}} xmm4 = xmm4[0,1],xmm6[2,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm10 = ymm4[0,1,2,3],ymm3[4,5,6,7] +; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm4 = [21474836481,21474836481,21474836481,21474836481] +; AVX2-NEXT: vpermps %ymm2, %ymm4, %ymm6 +; AVX2-NEXT: vpermps %ymm1, %ymm4, %ymm4 +; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm6[6,7] +; AVX2-NEXT: vmovaps 48(%rdi), %xmm6 +; AVX2-NEXT: vunpcklps {{.*#+}} xmm3 = xmm5[0],xmm6[0],xmm5[1],xmm6[1] +; AVX2-NEXT: vmovaps {{.*#+}} xmm11 = <1,5,u,u> +; AVX2-NEXT: vpermps %ymm8, %ymm11, %ymm11 +; AVX2-NEXT: vblendps {{.*#+}} xmm3 = xmm11[0,1],xmm3[2,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7] +; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm4 = [25769803778,25769803778,25769803778,25769803778] +; AVX2-NEXT: vpermps %ymm2, %ymm4, %ymm11 +; AVX2-NEXT: vpermps %ymm1, %ymm4, %ymm4 +; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm11[6,7] +; AVX2-NEXT: vmovaps {{.*#+}} xmm11 = +; AVX2-NEXT: vpermps %ymm9, %ymm11, %ymm9 +; AVX2-NEXT: vunpckhps {{.*#+}} xmm0 = xmm7[2],xmm0[2],xmm7[3],xmm0[3] +; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm9[2,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7] +; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm4 = [30064771075,30064771075,30064771075,30064771075] +; AVX2-NEXT: vpermps %ymm2, %ymm4, %ymm2 +; AVX2-NEXT: vpermps %ymm1, %ymm4, %ymm1 +; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7] +; AVX2-NEXT: vunpckhps {{.*#+}} xmm2 = xmm5[2],xmm6[2],xmm5[3],xmm6[3] +; AVX2-NEXT: vmovaps {{.*#+}} xmm4 = <3,7,u,u> +; AVX2-NEXT: vpermps %ymm8, %ymm4, %ymm4 +; AVX2-NEXT: vblendps {{.*#+}} xmm2 = xmm4[0,1],xmm2[2,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7] +; AVX2-NEXT: vmovaps %ymm10, (%rsi) +; AVX2-NEXT: vmovaps %ymm3, (%rdx) +; AVX2-NEXT: vmovaps %ymm0, (%rcx) +; AVX2-NEXT: vmovaps %ymm1, (%r8) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: load_i32_stride4_vf8: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovdqa {{.*#+}} ymm0 = [0,4,8,12,16,20,24,28] +; AVX512-NEXT: vmovdqu64 (%rdi), %zmm1 +; AVX512-NEXT: vmovdqu64 64(%rdi), %zmm2 +; AVX512-NEXT: vpermi2d %zmm2, %zmm1, %zmm0 +; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [1,5,9,13,17,21,25,29] +; AVX512-NEXT: vpermi2d %zmm2, %zmm1, %zmm3 +; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = [2,6,10,14,18,22,26,30] +; AVX512-NEXT: vpermi2d %zmm2, %zmm1, %zmm4 +; AVX512-NEXT: vmovdqa {{.*#+}} ymm5 = [3,7,11,15,19,23,27,31] +; AVX512-NEXT: vpermi2d %zmm2, %zmm1, %zmm5 +; AVX512-NEXT: vmovdqa %ymm0, (%rsi) +; AVX512-NEXT: vmovdqa %ymm3, (%rdx) +; AVX512-NEXT: vmovdqa %ymm4, (%rcx) +; AVX512-NEXT: vmovdqa %ymm5, (%r8) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %wide.vec = load <32 x i32>, <32 x i32>* %in.vec, align 32 + + %strided.vec0 = shufflevector <32 x i32> %wide.vec, <32 x i32> poison, <8 x i32> + %strided.vec1 = shufflevector <32 x i32> %wide.vec, <32 x i32> poison, <8 x i32> + %strided.vec2 = shufflevector <32 x i32> %wide.vec, <32 x i32> poison, <8 x i32> + %strided.vec3 = shufflevector <32 x i32> %wide.vec, <32 x i32> poison, <8 x i32> + + store <8 x i32> %strided.vec0, <8 x i32>* %out.vec0, align 32 + store <8 x i32> %strided.vec1, <8 x i32>* %out.vec1, align 32 + store <8 x i32> %strided.vec2, <8 x i32>* %out.vec2, align 32 + store <8 x i32> %strided.vec3, <8 x i32>* %out.vec3, align 32 + + ret void +} + +define void @load_i32_stride4_vf16(<64 x i32>* %in.vec, <16 x i32>* %out.vec0, <16 x i32>* %out.vec1, <16 x i32>* %out.vec2, <16 x i32>* %out.vec3) nounwind { +; SSE-LABEL: load_i32_stride4_vf16: +; SSE: # %bb.0: +; SSE-NEXT: subq $24, %rsp +; SSE-NEXT: movaps 208(%rdi), %xmm10 +; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movaps 240(%rdi), %xmm5 +; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movaps 224(%rdi), %xmm2 +; SSE-NEXT: movaps 80(%rdi), %xmm3 +; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movaps 64(%rdi), %xmm9 +; SSE-NEXT: movaps 112(%rdi), %xmm6 +; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movaps 96(%rdi), %xmm7 +; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movaps 144(%rdi), %xmm1 +; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movaps 128(%rdi), %xmm12 +; SSE-NEXT: movaps 176(%rdi), %xmm4 +; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movaps 160(%rdi), %xmm8 +; SSE-NEXT: movaps %xmm8, %xmm0 +; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] +; SSE-NEXT: movaps %xmm12, %xmm11 +; SSE-NEXT: unpcklps {{.*#+}} xmm11 = xmm11[0],xmm1[0],xmm11[1],xmm1[1] +; SSE-NEXT: movaps %xmm11, %xmm1 +; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; SSE-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill +; SSE-NEXT: movaps %xmm7, %xmm1 +; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1] +; SSE-NEXT: movaps %xmm9, %xmm7 +; SSE-NEXT: unpcklps {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1] +; SSE-NEXT: unpckhpd {{.*#+}} xmm11 = xmm11[1],xmm0[1] +; SSE-NEXT: movaps %xmm7, %xmm0 +; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm1[1] +; SSE-NEXT: movaps %xmm2, %xmm0 +; SSE-NEXT: movaps %xmm2, %xmm6 +; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1] +; SSE-NEXT: movaps 192(%rdi), %xmm5 +; SSE-NEXT: movaps %xmm5, %xmm1 +; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1] +; SSE-NEXT: movaps %xmm1, %xmm15 +; SSE-NEXT: movlhps {{.*#+}} xmm15 = xmm15[0],xmm0[0] +; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] +; SSE-NEXT: movaps 32(%rdi), %xmm4 +; SSE-NEXT: movaps 48(%rdi), %xmm13 +; SSE-NEXT: movaps %xmm4, %xmm3 +; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm13[0],xmm3[1],xmm13[1] +; SSE-NEXT: movaps (%rdi), %xmm0 +; SSE-NEXT: movaps 16(%rdi), %xmm10 +; SSE-NEXT: movaps %xmm0, %xmm2 +; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm10[0],xmm2[1],xmm10[1] +; SSE-NEXT: movaps %xmm2, %xmm14 +; SSE-NEXT: movlhps {{.*#+}} xmm14 = xmm14[0],xmm3[0] +; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm3[1] +; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload +; SSE-NEXT: # xmm8 = xmm8[2],mem[2],xmm8[3],mem[3] +; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload +; SSE-NEXT: # xmm12 = xmm12[2],mem[2],xmm12[3],mem[3] +; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload +; SSE-NEXT: # xmm6 = xmm6[2],mem[2],xmm6[3],mem[3] +; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload +; SSE-NEXT: # xmm5 = xmm5[2],mem[2],xmm5[3],mem[3] +; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload +; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload +; SSE-NEXT: # xmm3 = xmm3[2],mem[2],xmm3[3],mem[3] +; SSE-NEXT: unpckhps {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload +; SSE-NEXT: # xmm9 = xmm9[2],mem[2],xmm9[3],mem[3] +; SSE-NEXT: unpckhps {{.*#+}} xmm4 = xmm4[2],xmm13[2],xmm4[3],xmm13[3] +; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm10[2],xmm0[3],xmm10[3] +; SSE-NEXT: movaps %xmm12, %xmm10 +; SSE-NEXT: movlhps {{.*#+}} xmm10 = xmm10[0],xmm8[0] +; SSE-NEXT: unpckhpd {{.*#+}} xmm12 = xmm12[1],xmm8[1] +; SSE-NEXT: movaps %xmm9, %xmm8 +; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm3[0] +; SSE-NEXT: unpckhpd {{.*#+}} xmm9 = xmm9[1],xmm3[1] +; SSE-NEXT: movaps %xmm5, %xmm3 +; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm6[0] +; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm6[1] +; SSE-NEXT: movaps %xmm0, %xmm13 +; SSE-NEXT: movlhps {{.*#+}} xmm13 = xmm13[0],xmm4[0] +; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm4[1] +; SSE-NEXT: movaps %xmm15, 48(%rsi) +; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload +; SSE-NEXT: movaps %xmm4, 16(%rsi) +; SSE-NEXT: movaps (%rsp), %xmm4 # 16-byte Reload +; SSE-NEXT: movaps %xmm4, 32(%rsi) +; SSE-NEXT: movaps %xmm14, (%rsi) +; SSE-NEXT: movaps %xmm1, 48(%rdx) +; SSE-NEXT: movaps %xmm7, 16(%rdx) +; SSE-NEXT: movaps %xmm2, (%rdx) +; SSE-NEXT: movaps %xmm11, 32(%rdx) +; SSE-NEXT: movaps %xmm8, 16(%rcx) +; SSE-NEXT: movaps %xmm3, 48(%rcx) +; SSE-NEXT: movaps %xmm10, 32(%rcx) +; SSE-NEXT: movaps %xmm13, (%rcx) +; SSE-NEXT: movaps %xmm5, 48(%r8) +; SSE-NEXT: movaps %xmm9, 16(%r8) +; SSE-NEXT: movaps %xmm12, 32(%r8) +; SSE-NEXT: movaps %xmm0, (%r8) +; SSE-NEXT: addq $24, %rsp +; SSE-NEXT: retq +; +; AVX1-LABEL: load_i32_stride4_vf16: +; AVX1: # %bb.0: +; AVX1-NEXT: subq $216, %rsp +; AVX1-NEXT: vmovaps 160(%rdi), %xmm1 +; AVX1-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVX1-NEXT: vmovaps 176(%rdi), %xmm0 +; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX1-NEXT: vmovaps 144(%rdi), %xmm1 +; AVX1-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVX1-NEXT: vmovaps 128(%rdi), %xmm4 +; AVX1-NEXT: vunpcklps {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1] +; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,0] +; AVX1-NEXT: vmovaps 208(%rdi), %xmm2 +; AVX1-NEXT: vmovaps 192(%rdi), %xmm3 +; AVX1-NEXT: vunpcklps {{.*#+}} xmm1 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; AVX1-NEXT: vmovaps %xmm2, %xmm8 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 +; AVX1-NEXT: vmovaps 224(%rdi), %xmm6 +; AVX1-NEXT: vmovaps 240(%rdi), %xmm5 +; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm5[0,0],xmm6[0,0] +; AVX1-NEXT: vmovaps %xmm5, %xmm10 +; AVX1-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVX1-NEXT: vmovaps %xmm6, %xmm5 +; AVX1-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[0,1,2,0] +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2 +; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7] +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] +; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVX1-NEXT: vmovaps 80(%rdi), %xmm1 +; AVX1-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVX1-NEXT: vmovaps 64(%rdi), %xmm0 +; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill +; AVX1-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; AVX1-NEXT: vmovaps 96(%rdi), %xmm2 +; AVX1-NEXT: vmovaps 112(%rdi), %xmm7 +; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm7[0,0],xmm2[0,0] +; AVX1-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVX1-NEXT: vmovaps %xmm2, %xmm6 +; AVX1-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,1,2,0] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 +; AVX1-NEXT: vblendps {{.*#+}} ymm9 = ymm0[0,1,2,3,4,5],ymm1[6,7] +; AVX1-NEXT: vmovaps 32(%rdi), %xmm11 +; AVX1-NEXT: vmovaps 48(%rdi), %xmm12 +; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm12[0],xmm11[0] +; AVX1-NEXT: vmovaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVX1-NEXT: vmovaps (%rdi), %xmm15 +; AVX1-NEXT: vmovaps 16(%rdi), %xmm13 +; AVX1-NEXT: vunpcklps {{.*#+}} xmm14 = xmm15[0],xmm13[0],xmm15[1],xmm13[1] +; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm14[0,1],xmm0[2,0] +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5,6,7] +; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVX1-NEXT: vunpcklps {{.*#+}} xmm0 = xmm5[0],xmm10[0],xmm5[1],xmm10[1] +; AVX1-NEXT: vmovaps %xmm5, %xmm9 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm3[1],xmm8[1],zero,zero +; AVX1-NEXT: vmovaps %xmm8, %xmm10 +; AVX1-NEXT: vmovaps %xmm3, %xmm8 +; AVX1-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] +; AVX1-NEXT: vmovaps %xmm4, %xmm5 +; AVX1-NEXT: vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm4[1],xmm3[1],zero,zero +; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload +; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload +; AVX1-NEXT: vunpcklps {{.*#+}} xmm4 = xmm2[0],xmm14[0],xmm2[1],xmm14[1] +; AVX1-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3] +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] +; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVX1-NEXT: vunpcklps {{.*#+}} xmm0 = xmm6[0],xmm7[0],xmm6[1],xmm7[1] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload +; AVX1-NEXT: vmovaps (%rsp), %xmm7 # 16-byte Reload +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm7[1],xmm6[1],zero,zero +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] +; AVX1-NEXT: vunpcklps {{.*#+}} xmm1 = xmm11[0],xmm12[0],xmm11[1],xmm12[1] +; AVX1-NEXT: vmovaps %xmm11, %xmm12 +; AVX1-NEXT: vinsertps {{.*#+}} xmm4 = xmm15[1],xmm13[1],zero,zero +; AVX1-NEXT: vblendps {{.*#+}} xmm1 = xmm4[0,1],xmm1[2,3] +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] +; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload +; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = zero,zero,xmm9[2],xmm11[2] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; AVX1-NEXT: vunpckhps {{.*#+}} xmm1 = xmm8[2],xmm10[2],xmm8[3],xmm10[3] +; AVX1-NEXT: vmovaps %xmm10, %xmm8 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] +; AVX1-NEXT: vunpckhps {{.*#+}} xmm1 = xmm5[2],xmm3[2],xmm5[3],xmm3[3] +; AVX1-NEXT: vinsertps {{.*#+}} xmm4 = zero,zero,xmm2[2],xmm14[2] +; AVX1-NEXT: vmovaps %xmm2, %xmm10 +; AVX1-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3] +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] +; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload +; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload +; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = zero,zero,xmm2[2],xmm3[2] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; AVX1-NEXT: vmovaps %xmm7, %xmm5 +; AVX1-NEXT: vunpckhps {{.*#+}} xmm1 = xmm7[2],xmm6[2],xmm7[3],xmm6[3] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] +; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload +; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = zero,zero,xmm12[2],xmm7[2] +; AVX1-NEXT: vunpckhps {{.*#+}} xmm4 = xmm15[2],xmm13[2],xmm15[3],xmm13[3] +; AVX1-NEXT: vblendps {{.*#+}} xmm1 = xmm4[0,1],xmm1[2,3] +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] +; AVX1-NEXT: vunpckhps {{.*#+}} xmm1 = xmm9[2],xmm11[2],xmm9[3],xmm11[3] +; AVX1-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm4 # 16-byte Folded Reload +; AVX1-NEXT: # xmm4 = xmm8[3,0],mem[3,0] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 +; AVX1-NEXT: vpermilps {{.*#+}} xmm4 = xmm4[2,0,2,3] +; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4 +; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0,1,2,3,4,5],ymm1[6,7] +; AVX1-NEXT: vunpckhps {{.*#+}} xmm4 = xmm10[2],xmm14[2],xmm10[3],xmm14[3] +; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload +; AVX1-NEXT: vshufps $51, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload +; AVX1-NEXT: # xmm6 = xmm6[3,0],mem[3,0] +; AVX1-NEXT: vpermilps {{.*#+}} xmm6 = xmm6[2,0,2,3] +; AVX1-NEXT: vblendps {{.*#+}} xmm4 = xmm6[0,1],xmm4[2,3] +; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0,1,2,3],ymm1[4,5,6,7] +; AVX1-NEXT: vunpckhps {{.*#+}} xmm4 = xmm2[2],xmm3[2],xmm2[3],xmm3[3] +; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload +; AVX1-NEXT: vshufps {{.*#+}} xmm6 = xmm2[3,0],xmm5[3,0] +; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4 +; AVX1-NEXT: vpermilps {{.*#+}} xmm6 = xmm6[2,0,2,3] +; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm6 +; AVX1-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1,2,3,4,5],ymm4[6,7] +; AVX1-NEXT: vunpckhps {{.*#+}} xmm5 = xmm12[2],xmm7[2],xmm12[3],xmm7[3] +; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm13[3,0],xmm15[3,0] +; AVX1-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[2,0,2,3] +; AVX1-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1],xmm5[2,3] +; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7] +; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload +; AVX1-NEXT: vmovaps %ymm3, 32(%rsi) +; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload +; AVX1-NEXT: vmovaps %ymm3, (%rsi) +; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload +; AVX1-NEXT: vmovaps %ymm3, 32(%rdx) +; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload +; AVX1-NEXT: vmovaps %ymm3, (%rdx) +; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload +; AVX1-NEXT: vmovaps %ymm3, 32(%rcx) +; AVX1-NEXT: vmovaps %ymm0, (%rcx) +; AVX1-NEXT: vmovaps %ymm1, 32(%r8) +; AVX1-NEXT: vmovaps %ymm2, (%r8) +; AVX1-NEXT: addq $216, %rsp +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: load_i32_stride4_vf16: +; AVX2: # %bb.0: +; AVX2-NEXT: subq $104, %rsp +; AVX2-NEXT: vmovaps (%rdi), %ymm13 +; AVX2-NEXT: vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVX2-NEXT: vmovaps 32(%rdi), %ymm9 +; AVX2-NEXT: vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVX2-NEXT: vmovaps 64(%rdi), %ymm4 +; AVX2-NEXT: vmovaps 96(%rdi), %ymm5 +; AVX2-NEXT: vmovaps 160(%rdi), %ymm8 +; AVX2-NEXT: vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVX2-NEXT: vmovaps 192(%rdi), %ymm3 +; AVX2-NEXT: vmovaps 224(%rdi), %ymm2 +; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm0 = [17179869184,17179869184,17179869184,17179869184] +; AVX2-NEXT: vpermps %ymm2, %ymm0, %ymm1 +; AVX2-NEXT: vpermps %ymm3, %ymm0, %ymm6 +; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3,4,5],ymm1[6,7] +; AVX2-NEXT: vmovaps 144(%rdi), %xmm15 +; AVX2-NEXT: vmovaps 128(%rdi), %xmm10 +; AVX2-NEXT: vunpcklps {{.*#+}} xmm6 = xmm10[0],xmm15[0],xmm10[1],xmm15[1] +; AVX2-NEXT: vmovaps {{.*#+}} xmm7 = +; AVX2-NEXT: vpermps %ymm8, %ymm7, %ymm8 +; AVX2-NEXT: vblendps {{.*#+}} xmm6 = xmm6[0,1],xmm8[2,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm1[4,5,6,7] +; AVX2-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVX2-NEXT: vpermps %ymm5, %ymm0, %ymm1 +; AVX2-NEXT: vpermps %ymm4, %ymm0, %ymm0 +; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7] +; AVX2-NEXT: vpermps %ymm9, %ymm7, %ymm1 +; AVX2-NEXT: vmovaps (%rdi), %xmm7 +; AVX2-NEXT: vmovaps 16(%rdi), %xmm8 +; AVX2-NEXT: vunpcklps {{.*#+}} xmm6 = xmm7[0],xmm8[0],xmm7[1],xmm8[1] +; AVX2-NEXT: vblendps {{.*#+}} xmm1 = xmm6[0,1],xmm1[2,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] +; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm0 = [21474836481,21474836481,21474836481,21474836481] +; AVX2-NEXT: vpermps %ymm5, %ymm0, %ymm1 +; AVX2-NEXT: vpermps %ymm4, %ymm0, %ymm6 +; AVX2-NEXT: vblendps {{.*#+}} ymm12 = ymm6[0,1,2,3,4,5],ymm1[6,7] +; AVX2-NEXT: vmovaps 32(%rdi), %xmm9 +; AVX2-NEXT: vmovaps 48(%rdi), %xmm6 +; AVX2-NEXT: vunpcklps {{.*#+}} xmm1 = xmm9[0],xmm6[0],xmm9[1],xmm6[1] +; AVX2-NEXT: vmovaps {{.*#+}} xmm11 = <1,5,u,u> +; AVX2-NEXT: vpermps %ymm13, %ymm11, %ymm14 +; AVX2-NEXT: vblendps {{.*#+}} xmm1 = xmm14[0,1],xmm1[2,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm12[4,5,6,7] +; AVX2-NEXT: vmovups %ymm1, (%rsp) # 32-byte Spill +; AVX2-NEXT: vpermps %ymm2, %ymm0, %ymm1 +; AVX2-NEXT: vpermps %ymm3, %ymm0, %ymm0 +; AVX2-NEXT: vblendps {{.*#+}} ymm12 = ymm0[0,1,2,3,4,5],ymm1[6,7] +; AVX2-NEXT: vmovaps 128(%rdi), %ymm14 +; AVX2-NEXT: vpermps %ymm14, %ymm11, %ymm11 +; AVX2-NEXT: vmovaps 176(%rdi), %xmm1 +; AVX2-NEXT: vmovaps 160(%rdi), %xmm0 +; AVX2-NEXT: vunpcklps {{.*#+}} xmm13 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; AVX2-NEXT: vblendps {{.*#+}} xmm11 = xmm11[0,1],xmm13[2,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm12[4,5,6,7] +; AVX2-NEXT: vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm11 = [25769803778,25769803778,25769803778,25769803778] +; AVX2-NEXT: vpermps %ymm2, %ymm11, %ymm13 +; AVX2-NEXT: vpermps %ymm3, %ymm11, %ymm12 +; AVX2-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],ymm13[6,7] +; AVX2-NEXT: vunpckhps {{.*#+}} xmm10 = xmm10[2],xmm15[2],xmm10[3],xmm15[3] +; AVX2-NEXT: vmovaps {{.*#+}} xmm13 = +; AVX2-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm15 # 32-byte Folded Reload +; AVX2-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0,1],xmm15[2,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm12[4,5,6,7] +; AVX2-NEXT: vpermps %ymm5, %ymm11, %ymm12 +; AVX2-NEXT: vpermps %ymm4, %ymm11, %ymm11 +; AVX2-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5],ymm12[6,7] +; AVX2-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm12 # 32-byte Folded Reload +; AVX2-NEXT: vunpckhps {{.*#+}} xmm7 = xmm7[2],xmm8[2],xmm7[3],xmm8[3] +; AVX2-NEXT: vblendps {{.*#+}} xmm7 = xmm7[0,1],xmm12[2,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm11[4,5,6,7] +; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm8 = [30064771075,30064771075,30064771075,30064771075] +; AVX2-NEXT: vpermps %ymm5, %ymm8, %ymm5 +; AVX2-NEXT: vpermps %ymm4, %ymm8, %ymm4 +; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm5[6,7] +; AVX2-NEXT: vunpckhps {{.*#+}} xmm5 = xmm9[2],xmm6[2],xmm9[3],xmm6[3] +; AVX2-NEXT: vmovaps {{.*#+}} xmm6 = <3,7,u,u> +; AVX2-NEXT: vpermps {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm9 # 32-byte Folded Reload +; AVX2-NEXT: vblendps {{.*#+}} xmm5 = xmm9[0,1],xmm5[2,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7] +; AVX2-NEXT: vpermps %ymm2, %ymm8, %ymm2 +; AVX2-NEXT: vpermps %ymm3, %ymm8, %ymm3 +; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7] +; AVX2-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; AVX2-NEXT: vpermps %ymm14, %ymm6, %ymm1 +; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] +; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload +; AVX2-NEXT: vmovaps %ymm1, 32(%rsi) +; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload +; AVX2-NEXT: vmovaps %ymm1, (%rsi) +; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload +; AVX2-NEXT: vmovaps %ymm1, 32(%rdx) +; AVX2-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload +; AVX2-NEXT: vmovaps %ymm1, (%rdx) +; AVX2-NEXT: vmovaps %ymm10, 32(%rcx) +; AVX2-NEXT: vmovaps %ymm7, (%rcx) +; AVX2-NEXT: vmovaps %ymm0, 32(%r8) +; AVX2-NEXT: vmovaps %ymm4, (%r8) +; AVX2-NEXT: addq $104, %rsp +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: load_i32_stride4_vf16: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovdqu64 (%rdi), %zmm0 +; AVX512-NEXT: vmovdqu64 64(%rdi), %zmm1 +; AVX512-NEXT: vmovdqu64 128(%rdi), %zmm2 +; AVX512-NEXT: vmovdqu64 192(%rdi), %zmm3 +; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm4 = [0,4,8,12,16,20,24,28,0,4,8,12,16,20,24,28] +; AVX512-NEXT: # zmm4 = mem[0,1,2,3,0,1,2,3] +; AVX512-NEXT: vmovdqa64 %zmm2, %zmm5 +; AVX512-NEXT: vpermt2d %zmm3, %zmm4, %zmm5 +; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm4 +; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm4 = zmm4[0,1,2,3],zmm5[4,5,6,7] +; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [1,5,9,13,17,21,25,29,1,5,9,13,17,21,25,29] +; AVX512-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3] +; AVX512-NEXT: vmovdqa64 %zmm2, %zmm6 +; AVX512-NEXT: vpermt2d %zmm3, %zmm5, %zmm6 +; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm5 +; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm6[4,5,6,7] +; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm6 = [2,6,10,14,18,22,26,30,2,6,10,14,18,22,26,30] +; AVX512-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3] +; AVX512-NEXT: vmovdqa64 %zmm2, %zmm7 +; AVX512-NEXT: vpermt2d %zmm3, %zmm6, %zmm7 +; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm6 +; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm6 = zmm6[0,1,2,3],zmm7[4,5,6,7] +; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [3,7,11,15,19,23,27,31,3,7,11,15,19,23,27,31] +; AVX512-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3] +; AVX512-NEXT: vpermt2d %zmm3, %zmm7, %zmm2 +; AVX512-NEXT: vpermt2d %zmm1, %zmm7, %zmm0 +; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm2[4,5,6,7] +; AVX512-NEXT: vmovdqu64 %zmm4, (%rsi) +; AVX512-NEXT: vmovdqu64 %zmm5, (%rdx) +; AVX512-NEXT: vmovdqu64 %zmm6, (%rcx) +; AVX512-NEXT: vmovdqu64 %zmm0, (%r8) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %wide.vec = load <64 x i32>, <64 x i32>* %in.vec, align 32 + + %strided.vec0 = shufflevector <64 x i32> %wide.vec, <64 x i32> poison, <16 x i32> + %strided.vec1 = shufflevector <64 x i32> %wide.vec, <64 x i32> poison, <16 x i32> + %strided.vec2 = shufflevector <64 x i32> %wide.vec, <64 x i32> poison, <16 x i32> + %strided.vec3 = shufflevector <64 x i32> %wide.vec, <64 x i32> poison, <16 x i32> + + store <16 x i32> %strided.vec0, <16 x i32>* %out.vec0, align 32 + store <16 x i32> %strided.vec1, <16 x i32>* %out.vec1, align 32 + store <16 x i32> %strided.vec2, <16 x i32>* %out.vec2, align 32 + store <16 x i32> %strided.vec3, <16 x i32>* %out.vec3, align 32 + + ret void +} diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-4.ll new file mode 100644 index 0000000..71ec3e1 --- /dev/null +++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-4.ll @@ -0,0 +1,692 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX1 +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2,AVX2-SLOW +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2,AVX2-FAST-ALL +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX2,AVX2-FAST-PERLANE +; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512 + +; These patterns are produced by LoopVectorizer for interleaved stores. + +define void @store_i32_stride4_vf2(<2 x i32>* %in.vecptr0, <2 x i32>* %in.vecptr1, <2 x i32>* %in.vecptr2, <2 x i32>* %in.vecptr3, <8 x i32>* %out.vec) nounwind { +; SSE-LABEL: store_i32_stride4_vf2: +; SSE: # %bb.0: +; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero +; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero +; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero +; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSE-NEXT: movaps %xmm0, %xmm2 +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2] +; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3] +; SSE-NEXT: movaps %xmm0, 16(%r8) +; SSE-NEXT: movaps %xmm2, (%r8) +; SSE-NEXT: retq +; +; AVX1-LABEL: store_i32_stride4_vf2: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX1-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX1-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX1-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero +; AVX1-NEXT: vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm2 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,3,0,2,5,7,4,6] +; AVX1-NEXT: vpermilps {{.*#+}} ymm1 = ymm2[0,2,1,3,4,6,5,7] +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7] +; AVX1-NEXT: vmovaps %ymm0, (%r8) +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-SLOW-LABEL: store_i32_stride4_vf2: +; AVX2-SLOW: # %bb.0: +; AVX2-SLOW-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX2-SLOW-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX2-SLOW-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX2-SLOW-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX2-SLOW-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero +; AVX2-SLOW-NEXT: vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7] +; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3] +; AVX2-SLOW-NEXT: vmovaps %ymm0, (%r8) +; AVX2-SLOW-NEXT: vzeroupper +; AVX2-SLOW-NEXT: retq +; +; AVX2-FAST-ALL-LABEL: store_i32_stride4_vf2: +; AVX2-FAST-ALL: # %bb.0: +; AVX2-FAST-ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX2-FAST-ALL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX2-FAST-ALL-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX2-FAST-ALL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX2-FAST-ALL-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero +; AVX2-FAST-ALL-NEXT: vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX2-FAST-ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-FAST-ALL-NEXT: vmovaps {{.*#+}} ymm1 = [0,2,4,6,1,3,5,7] +; AVX2-FAST-ALL-NEXT: vpermps %ymm0, %ymm1, %ymm0 +; AVX2-FAST-ALL-NEXT: vmovaps %ymm0, (%r8) +; AVX2-FAST-ALL-NEXT: vzeroupper +; AVX2-FAST-ALL-NEXT: retq +; +; AVX2-FAST-PERLANE-LABEL: store_i32_stride4_vf2: +; AVX2-FAST-PERLANE: # %bb.0: +; AVX2-FAST-PERLANE-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX2-FAST-PERLANE-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX2-FAST-PERLANE-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX2-FAST-PERLANE-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX2-FAST-PERLANE-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero +; AVX2-FAST-PERLANE-NEXT: vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX2-FAST-PERLANE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7] +; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3] +; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, (%r8) +; AVX2-FAST-PERLANE-NEXT: vzeroupper +; AVX2-FAST-PERLANE-NEXT: retq +; +; AVX512-LABEL: store_i32_stride4_vf2: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX512-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX512-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX512-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX512-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero +; AVX512-NEXT: vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX512-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7] +; AVX512-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3] +; AVX512-NEXT: vmovaps %ymm0, (%r8) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %in.vec0 = load <2 x i32>, <2 x i32>* %in.vecptr0, align 32 + %in.vec1 = load <2 x i32>, <2 x i32>* %in.vecptr1, align 32 + %in.vec2 = load <2 x i32>, <2 x i32>* %in.vecptr2, align 32 + %in.vec3 = load <2 x i32>, <2 x i32>* %in.vecptr3, align 32 + + %concat01 = shufflevector <2 x i32> %in.vec0, <2 x i32> %in.vec1, <4 x i32> + %concat23 = shufflevector <2 x i32> %in.vec2, <2 x i32> %in.vec3, <4 x i32> + %concat0123 = shufflevector <4 x i32> %concat01, <4 x i32> %concat23, <8 x i32> + %interleaved.vec = shufflevector <8 x i32> %concat0123, <8 x i32> poison, <8 x i32> + + store <8 x i32> %interleaved.vec, <8 x i32>* %out.vec, align 32 + + ret void +} + +define void @store_i32_stride4_vf4(<4 x i32>* %in.vecptr0, <4 x i32>* %in.vecptr1, <4 x i32>* %in.vecptr2, <4 x i32>* %in.vecptr3, <16 x i32>* %out.vec) nounwind { +; SSE-LABEL: store_i32_stride4_vf4: +; SSE: # %bb.0: +; SSE-NEXT: movaps (%rdi), %xmm0 +; SSE-NEXT: movaps (%rsi), %xmm1 +; SSE-NEXT: movaps (%rdx), %xmm2 +; SSE-NEXT: movaps (%rcx), %xmm3 +; SSE-NEXT: movaps %xmm2, %xmm4 +; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] +; SSE-NEXT: movaps %xmm0, %xmm5 +; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1] +; SSE-NEXT: movaps %xmm5, %xmm6 +; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm4[0] +; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm4[1] +; SSE-NEXT: unpckhps {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3] +; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE-NEXT: movaps %xmm0, %xmm1 +; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1] +; SSE-NEXT: movaps %xmm0, 48(%r8) +; SSE-NEXT: movaps %xmm1, 32(%r8) +; SSE-NEXT: movaps %xmm5, 16(%r8) +; SSE-NEXT: movaps %xmm6, (%r8) +; SSE-NEXT: retq +; +; AVX1-LABEL: store_i32_stride4_vf4: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovaps (%rdi), %xmm0 +; AVX1-NEXT: vmovaps (%rsi), %xmm1 +; AVX1-NEXT: vmovaps (%rdx), %xmm2 +; AVX1-NEXT: vmovaps (%rcx), %xmm3 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm4 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm5 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2 +; AVX1-NEXT: vpermilps {{.*#+}} ymm3 = ymm2[0,1,1,0,4,5,5,4] +; AVX1-NEXT: vmovddup {{.*#+}} ymm6 = ymm5[0,0,2,2] +; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm6[0,1,2],ymm3[3],ymm6[4,5],ymm3[6],ymm6[7] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: vpermilps {{.*#+}} ymm1 = ymm0[1,0,2,3,5,4,6,7] +; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0],ymm1[1],ymm4[2,3],ymm1[4],ymm4[5,6,7] +; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3],ymm1[4,5],ymm3[6,7] +; AVX1-NEXT: vpermilps {{.*#+}} ymm2 = ymm2[0,1,3,2,4,5,7,6] +; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm5[0,1,2],ymm2[3],ymm5[4,5],ymm2[6],ymm5[7] +; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,2,3,7,6,6,7] +; AVX1-NEXT: vpermilpd {{.*#+}} ymm3 = ymm4[1,0,3,2] +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0],ymm0[1],ymm3[2,3],ymm0[4],ymm3[5,6,7] +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3],ymm0[4,5],ymm2[6,7] +; AVX1-NEXT: vmovaps %ymm0, 32(%r8) +; AVX1-NEXT: vmovaps %ymm1, (%r8) +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: store_i32_stride4_vf4: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovaps (%rdi), %xmm0 +; AVX2-NEXT: vmovaps (%rdx), %xmm1 +; AVX2-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm0 +; AVX2-NEXT: vinsertf128 $1, (%rcx), %ymm1, %ymm1 +; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = +; AVX2-NEXT: vpermps %ymm1, %ymm2, %ymm2 +; AVX2-NEXT: vmovaps {{.*#+}} ymm3 = <0,4,u,u,1,5,u,u> +; AVX2-NEXT: vpermps %ymm0, %ymm3, %ymm3 +; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7] +; AVX2-NEXT: vmovaps {{.*#+}} ymm3 = +; AVX2-NEXT: vpermps %ymm1, %ymm3, %ymm1 +; AVX2-NEXT: vmovaps {{.*#+}} ymm3 = <2,6,u,u,3,7,u,u> +; AVX2-NEXT: vpermps %ymm0, %ymm3, %ymm0 +; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7] +; AVX2-NEXT: vmovaps %ymm0, 32(%r8) +; AVX2-NEXT: vmovaps %ymm2, (%r8) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: store_i32_stride4_vf4: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovaps (%rdi), %xmm0 +; AVX512-NEXT: vmovaps (%rdx), %xmm1 +; AVX512-NEXT: vinsertf128 $1, (%rcx), %ymm1, %ymm1 +; AVX512-NEXT: vinsertf128 $1, (%rsi), %ymm0, %ymm0 +; AVX512-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512-NEXT: vmovaps {{.*#+}} zmm1 = [0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15] +; AVX512-NEXT: vpermps %zmm0, %zmm1, %zmm0 +; AVX512-NEXT: vmovups %zmm0, (%r8) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %in.vec0 = load <4 x i32>, <4 x i32>* %in.vecptr0, align 32 + %in.vec1 = load <4 x i32>, <4 x i32>* %in.vecptr1, align 32 + %in.vec2 = load <4 x i32>, <4 x i32>* %in.vecptr2, align 32 + %in.vec3 = load <4 x i32>, <4 x i32>* %in.vecptr3, align 32 + + %concat01 = shufflevector <4 x i32> %in.vec0, <4 x i32> %in.vec1, <8 x i32> + %concat23 = shufflevector <4 x i32> %in.vec2, <4 x i32> %in.vec3, <8 x i32> + %concat0123 = shufflevector <8 x i32> %concat01, <8 x i32> %concat23, <16 x i32> + %interleaved.vec = shufflevector <16 x i32> %concat0123, <16 x i32> poison, <16 x i32> + + store <16 x i32> %interleaved.vec, <16 x i32>* %out.vec, align 32 + + ret void +} + +define void @store_i32_stride4_vf8(<8 x i32>* %in.vecptr0, <8 x i32>* %in.vecptr1, <8 x i32>* %in.vecptr2, <8 x i32>* %in.vecptr3, <32 x i32>* %out.vec) nounwind { +; SSE-LABEL: store_i32_stride4_vf8: +; SSE: # %bb.0: +; SSE-NEXT: movaps (%rdi), %xmm0 +; SSE-NEXT: movaps 16(%rdi), %xmm1 +; SSE-NEXT: movaps (%rsi), %xmm5 +; SSE-NEXT: movaps 16(%rsi), %xmm8 +; SSE-NEXT: movaps (%rdx), %xmm3 +; SSE-NEXT: movaps 16(%rdx), %xmm4 +; SSE-NEXT: movaps (%rcx), %xmm6 +; SSE-NEXT: movaps 16(%rcx), %xmm9 +; SSE-NEXT: movaps %xmm3, %xmm7 +; SSE-NEXT: unpcklps {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1] +; SSE-NEXT: movaps %xmm0, %xmm2 +; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1] +; SSE-NEXT: movaps %xmm2, %xmm10 +; SSE-NEXT: unpckhpd {{.*#+}} xmm10 = xmm10[1],xmm7[1] +; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm7[0] +; SSE-NEXT: unpckhps {{.*#+}} xmm3 = xmm3[2],xmm6[2],xmm3[3],xmm6[3] +; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm5[2],xmm0[3],xmm5[3] +; SSE-NEXT: movaps %xmm0, %xmm5 +; SSE-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm3[1] +; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm3[0] +; SSE-NEXT: movaps %xmm4, %xmm3 +; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1] +; SSE-NEXT: movaps %xmm1, %xmm6 +; SSE-NEXT: unpcklps {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1] +; SSE-NEXT: movaps %xmm6, %xmm7 +; SSE-NEXT: unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm3[1] +; SSE-NEXT: movlhps {{.*#+}} xmm6 = xmm6[0],xmm3[0] +; SSE-NEXT: unpckhps {{.*#+}} xmm4 = xmm4[2],xmm9[2],xmm4[3],xmm9[3] +; SSE-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm8[2],xmm1[3],xmm8[3] +; SSE-NEXT: movaps %xmm1, %xmm3 +; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm4[1] +; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm4[0] +; SSE-NEXT: movaps %xmm1, 96(%r8) +; SSE-NEXT: movaps %xmm3, 112(%r8) +; SSE-NEXT: movaps %xmm6, 64(%r8) +; SSE-NEXT: movaps %xmm7, 80(%r8) +; SSE-NEXT: movaps %xmm0, 32(%r8) +; SSE-NEXT: movaps %xmm5, 48(%r8) +; SSE-NEXT: movaps %xmm2, (%r8) +; SSE-NEXT: movaps %xmm10, 16(%r8) +; SSE-NEXT: retq +; +; AVX1-LABEL: store_i32_stride4_vf8: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovaps (%rdi), %xmm2 +; AVX1-NEXT: vmovaps 16(%rdi), %xmm10 +; AVX1-NEXT: vmovaps (%rsi), %xmm4 +; AVX1-NEXT: vmovaps 16(%rsi), %xmm1 +; AVX1-NEXT: vinsertps {{.*#+}} xmm3 = xmm10[1],xmm1[1],zero,zero +; AVX1-NEXT: vunpcklps {{.*#+}} xmm5 = xmm10[0],xmm1[0],xmm10[1],xmm1[1] +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm8 +; AVX1-NEXT: vmovaps (%rcx), %xmm5 +; AVX1-NEXT: vmovaps 16(%rcx), %xmm6 +; AVX1-NEXT: vmovaps (%rdx), %xmm7 +; AVX1-NEXT: vmovaps 16(%rdx), %xmm3 +; AVX1-NEXT: vunpcklps {{.*#+}} xmm9 = xmm3[0],xmm6[0],xmm3[1],xmm6[1] +; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm6[0],xmm3[0] +; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,2,0] +; AVX1-NEXT: vinsertf128 $1, %xmm9, %ymm0, %ymm0 +; AVX1-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm0[2,3],ymm8[4,5],ymm0[6,7] +; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[1],xmm4[1],zero,zero +; AVX1-NEXT: vunpcklps {{.*#+}} xmm9 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm9, %ymm9 +; AVX1-NEXT: vunpcklps {{.*#+}} xmm11 = xmm7[0],xmm5[0],xmm7[1],xmm5[1] +; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm5[0],xmm7[0] +; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,2,0] +; AVX1-NEXT: vinsertf128 $1, %xmm11, %ymm0, %ymm0 +; AVX1-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm0[2,3],ymm9[4,5],ymm0[6,7] +; AVX1-NEXT: vunpckhps {{.*#+}} xmm0 = xmm7[2],xmm5[2],xmm7[3],xmm5[3] +; AVX1-NEXT: vinsertps {{.*#+}} xmm5 = zero,zero,xmm7[2],xmm5[2] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm5, %ymm0 +; AVX1-NEXT: vunpckhps {{.*#+}} xmm5 = xmm2[2],xmm4[2],xmm2[3],xmm4[3] +; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm4[3,0],xmm2[3,0] +; AVX1-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[2,0,2,3] +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm5, %ymm2 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3],ymm2[4,5],ymm0[6,7] +; AVX1-NEXT: vunpckhps {{.*#+}} xmm2 = xmm3[2],xmm6[2],xmm3[3],xmm6[3] +; AVX1-NEXT: vinsertps {{.*#+}} xmm3 = zero,zero,xmm3[2],xmm6[2] +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2 +; AVX1-NEXT: vunpckhps {{.*#+}} xmm3 = xmm10[2],xmm1[2],xmm10[3],xmm1[3] +; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[3,0],xmm10[3,0] +; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,0,2,3] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 +; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5],ymm2[6,7] +; AVX1-NEXT: vmovaps %ymm1, 96(%r8) +; AVX1-NEXT: vmovaps %ymm0, 32(%r8) +; AVX1-NEXT: vmovaps %ymm9, (%r8) +; AVX1-NEXT: vmovaps %ymm8, 64(%r8) +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: store_i32_stride4_vf8: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovaps (%rdi), %ymm8 +; AVX2-NEXT: vmovaps (%rsi), %ymm1 +; AVX2-NEXT: vmovaps (%rdx), %ymm2 +; AVX2-NEXT: vmovaps (%rcx), %ymm3 +; AVX2-NEXT: vmovaps (%rcx), %xmm4 +; AVX2-NEXT: vmovaps (%rdx), %xmm5 +; AVX2-NEXT: vunpckhps {{.*#+}} xmm6 = xmm5[2],xmm4[2],xmm5[3],xmm4[3] +; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,0,2,1] +; AVX2-NEXT: vmovaps (%rsi), %xmm7 +; AVX2-NEXT: vmovaps (%rdi), %xmm0 +; AVX2-NEXT: vunpckhps {{.*#+}} xmm9 = xmm0[2],xmm7[2],xmm0[3],xmm7[3] +; AVX2-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,1,1,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm9[0,1],ymm6[2,3],ymm9[4,5],ymm6[6,7] +; AVX2-NEXT: vunpcklps {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1] +; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,0,2,1] +; AVX2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1] +; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3],ymm0[4,5],ymm4[6,7] +; AVX2-NEXT: vunpcklps {{.*#+}} ymm4 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5] +; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,2,3] +; AVX2-NEXT: vunpcklps {{.*#+}} ymm5 = ymm8[0],ymm1[0],ymm8[1],ymm1[1],ymm8[4],ymm1[4],ymm8[5],ymm1[5] +; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,1,3,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7] +; AVX2-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[6],ymm3[6],ymm2[7],ymm3[7] +; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3] +; AVX2-NEXT: vunpckhps {{.*#+}} ymm1 = ymm8[2],ymm1[2],ymm8[3],ymm1[3],ymm8[6],ymm1[6],ymm8[7],ymm1[7] +; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,1,3,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5],ymm2[6,7] +; AVX2-NEXT: vmovaps %ymm1, 96(%r8) +; AVX2-NEXT: vmovaps %ymm4, 64(%r8) +; AVX2-NEXT: vmovaps %ymm0, (%r8) +; AVX2-NEXT: vmovaps %ymm6, 32(%r8) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: store_i32_stride4_vf8: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovdqa (%rdi), %ymm0 +; AVX512-NEXT: vmovdqa (%rdx), %ymm1 +; AVX512-NEXT: vinserti64x4 $1, (%rsi), %zmm0, %zmm0 +; AVX512-NEXT: vinserti64x4 $1, (%rcx), %zmm1, %zmm1 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,16,24,1,9,17,25,2,10,18,26,3,11,19,27] +; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm2 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = [4,12,20,28,5,13,21,29,6,14,22,30,7,15,23,31] +; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm3 +; AVX512-NEXT: vmovdqu64 %zmm3, 64(%r8) +; AVX512-NEXT: vmovdqu64 %zmm2, (%r8) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %in.vec0 = load <8 x i32>, <8 x i32>* %in.vecptr0, align 32 + %in.vec1 = load <8 x i32>, <8 x i32>* %in.vecptr1, align 32 + %in.vec2 = load <8 x i32>, <8 x i32>* %in.vecptr2, align 32 + %in.vec3 = load <8 x i32>, <8 x i32>* %in.vecptr3, align 32 + + %concat01 = shufflevector <8 x i32> %in.vec0, <8 x i32> %in.vec1, <16 x i32> + %concat23 = shufflevector <8 x i32> %in.vec2, <8 x i32> %in.vec3, <16 x i32> + %concat0123 = shufflevector <16 x i32> %concat01, <16 x i32> %concat23, <32 x i32> + %interleaved.vec = shufflevector <32 x i32> %concat0123, <32 x i32> poison, <32 x i32> + + store <32 x i32> %interleaved.vec, <32 x i32>* %out.vec, align 32 + + ret void +} + +define void @store_i32_stride4_vf16(<16 x i32>* %in.vecptr0, <16 x i32>* %in.vecptr1, <16 x i32>* %in.vecptr2, <16 x i32>* %in.vecptr3, <64 x i32>* %out.vec) nounwind { +; SSE-LABEL: store_i32_stride4_vf16: +; SSE: # %bb.0: +; SSE-NEXT: movaps (%rdi), %xmm10 +; SSE-NEXT: movaps 16(%rdi), %xmm13 +; SSE-NEXT: movaps 32(%rdi), %xmm8 +; SSE-NEXT: movaps 48(%rdi), %xmm4 +; SSE-NEXT: movaps (%rsi), %xmm3 +; SSE-NEXT: movaps 16(%rsi), %xmm1 +; SSE-NEXT: movaps 32(%rsi), %xmm9 +; SSE-NEXT: movaps (%rdx), %xmm0 +; SSE-NEXT: movaps 16(%rdx), %xmm5 +; SSE-NEXT: movaps 32(%rdx), %xmm6 +; SSE-NEXT: movaps (%rcx), %xmm11 +; SSE-NEXT: movaps 16(%rcx), %xmm14 +; SSE-NEXT: movaps 32(%rcx), %xmm12 +; SSE-NEXT: movaps %xmm0, %xmm7 +; SSE-NEXT: unpcklps {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1] +; SSE-NEXT: movaps %xmm10, %xmm15 +; SSE-NEXT: unpcklps {{.*#+}} xmm15 = xmm15[0],xmm3[0],xmm15[1],xmm3[1] +; SSE-NEXT: movaps %xmm15, %xmm2 +; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm7[1] +; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movlhps {{.*#+}} xmm15 = xmm15[0],xmm7[0] +; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm11[2],xmm0[3],xmm11[3] +; SSE-NEXT: unpckhps {{.*#+}} xmm10 = xmm10[2],xmm3[2],xmm10[3],xmm3[3] +; SSE-NEXT: movaps %xmm10, %xmm2 +; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1] +; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movlhps {{.*#+}} xmm10 = xmm10[0],xmm0[0] +; SSE-NEXT: movaps %xmm5, %xmm0 +; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1] +; SSE-NEXT: movaps %xmm13, %xmm7 +; SSE-NEXT: unpcklps {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1] +; SSE-NEXT: movaps %xmm7, %xmm2 +; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1] +; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; SSE-NEXT: movlhps {{.*#+}} xmm7 = xmm7[0],xmm0[0] +; SSE-NEXT: unpckhps {{.*#+}} xmm5 = xmm5[2],xmm14[2],xmm5[3],xmm14[3] +; SSE-NEXT: unpckhps {{.*#+}} xmm13 = xmm13[2],xmm1[2],xmm13[3],xmm1[3] +; SSE-NEXT: movaps %xmm13, %xmm11 +; SSE-NEXT: unpckhpd {{.*#+}} xmm11 = xmm11[1],xmm5[1] +; SSE-NEXT: movlhps {{.*#+}} xmm13 = xmm13[0],xmm5[0] +; SSE-NEXT: movaps %xmm6, %xmm0 +; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1] +; SSE-NEXT: movaps %xmm8, %xmm5 +; SSE-NEXT: unpcklps {{.*#+}} xmm5 = xmm5[0],xmm9[0],xmm5[1],xmm9[1] +; SSE-NEXT: movaps %xmm5, %xmm14 +; SSE-NEXT: unpckhpd {{.*#+}} xmm14 = xmm14[1],xmm0[1] +; SSE-NEXT: movlhps {{.*#+}} xmm5 = xmm5[0],xmm0[0] +; SSE-NEXT: movaps 48(%rdx), %xmm0 +; SSE-NEXT: unpckhps {{.*#+}} xmm6 = xmm6[2],xmm12[2],xmm6[3],xmm12[3] +; SSE-NEXT: movaps 48(%rcx), %xmm12 +; SSE-NEXT: unpckhps {{.*#+}} xmm8 = xmm8[2],xmm9[2],xmm8[3],xmm9[3] +; SSE-NEXT: movaps %xmm8, %xmm9 +; SSE-NEXT: unpckhpd {{.*#+}} xmm9 = xmm9[1],xmm6[1] +; SSE-NEXT: movlhps {{.*#+}} xmm8 = xmm8[0],xmm6[0] +; SSE-NEXT: movaps %xmm0, %xmm6 +; SSE-NEXT: unpcklps {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1] +; SSE-NEXT: movaps 48(%rsi), %xmm2 +; SSE-NEXT: movaps %xmm4, %xmm3 +; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] +; SSE-NEXT: movaps %xmm3, %xmm1 +; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm6[1] +; SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm6[0] +; SSE-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm12[2],xmm0[3],xmm12[3] +; SSE-NEXT: unpckhps {{.*#+}} xmm4 = xmm4[2],xmm2[2],xmm4[3],xmm2[3] +; SSE-NEXT: movaps %xmm4, %xmm2 +; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1] +; SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm0[0] +; SSE-NEXT: movaps %xmm4, 224(%r8) +; SSE-NEXT: movaps %xmm2, 240(%r8) +; SSE-NEXT: movaps %xmm3, 192(%r8) +; SSE-NEXT: movaps %xmm1, 208(%r8) +; SSE-NEXT: movaps %xmm8, 160(%r8) +; SSE-NEXT: movaps %xmm9, 176(%r8) +; SSE-NEXT: movaps %xmm5, 128(%r8) +; SSE-NEXT: movaps %xmm14, 144(%r8) +; SSE-NEXT: movaps %xmm13, 96(%r8) +; SSE-NEXT: movaps %xmm11, 112(%r8) +; SSE-NEXT: movaps %xmm7, 64(%r8) +; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE-NEXT: movaps %xmm0, 80(%r8) +; SSE-NEXT: movaps %xmm10, 32(%r8) +; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE-NEXT: movaps %xmm0, 48(%r8) +; SSE-NEXT: movaps %xmm15, (%r8) +; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; SSE-NEXT: movaps %xmm0, 16(%r8) +; SSE-NEXT: retq +; +; AVX1-LABEL: store_i32_stride4_vf16: +; AVX1: # %bb.0: +; AVX1-NEXT: subq $24, %rsp +; AVX1-NEXT: vmovaps 16(%rdi), %xmm2 +; AVX1-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVX1-NEXT: vmovaps 32(%rdi), %xmm13 +; AVX1-NEXT: vmovaps 48(%rdi), %xmm11 +; AVX1-NEXT: vmovaps 16(%rsi), %xmm1 +; AVX1-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVX1-NEXT: vmovaps 32(%rsi), %xmm8 +; AVX1-NEXT: vmovaps 48(%rsi), %xmm9 +; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[1],xmm1[1],zero,zero +; AVX1-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm10 +; AVX1-NEXT: vmovaps 16(%rcx), %xmm0 +; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVX1-NEXT: vmovaps 32(%rcx), %xmm3 +; AVX1-NEXT: vmovaps 48(%rcx), %xmm6 +; AVX1-NEXT: vmovaps 16(%rdx), %xmm15 +; AVX1-NEXT: vmovaps 32(%rdx), %xmm4 +; AVX1-NEXT: vmovaps 48(%rdx), %xmm5 +; AVX1-NEXT: vunpcklps {{.*#+}} xmm1 = xmm15[0],xmm0[0],xmm15[1],xmm0[1] +; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm15[0] +; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,2,0] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1],ymm0[2,3],ymm10[4,5],ymm0[6,7] +; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm13[1],xmm8[1],zero,zero +; AVX1-NEXT: vunpcklps {{.*#+}} xmm1 = xmm13[0],xmm8[0],xmm13[1],xmm8[1] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: vunpcklps {{.*#+}} xmm1 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] +; AVX1-NEXT: vmovlhps {{.*#+}} xmm7 = xmm3[0],xmm4[0] +; AVX1-NEXT: vpermilps {{.*#+}} xmm7 = xmm7[0,1,2,0] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm7, %ymm1 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7] +; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm11[1],xmm9[1],zero,zero +; AVX1-NEXT: vunpcklps {{.*#+}} xmm1 = xmm11[0],xmm9[0],xmm11[1],xmm9[1] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: vmovlhps {{.*#+}} xmm1 = xmm6[0],xmm5[0] +; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,1,2,0] +; AVX1-NEXT: vunpcklps {{.*#+}} xmm7 = xmm5[0],xmm6[0],xmm5[1],xmm6[1] +; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm1, %ymm1 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7] +; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; AVX1-NEXT: vmovaps (%rdi), %xmm2 +; AVX1-NEXT: vmovaps (%rsi), %xmm1 +; AVX1-NEXT: vinsertps {{.*#+}} xmm7 = xmm2[1],xmm1[1],zero,zero +; AVX1-NEXT: vunpcklps {{.*#+}} xmm10 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm10, %ymm10 +; AVX1-NEXT: vmovaps (%rcx), %xmm7 +; AVX1-NEXT: vmovaps (%rdx), %xmm0 +; AVX1-NEXT: vmovlhps {{.*#+}} xmm12 = xmm7[0],xmm0[0] +; AVX1-NEXT: vpermilps {{.*#+}} xmm12 = xmm12[0,1,2,0] +; AVX1-NEXT: vunpcklps {{.*#+}} xmm14 = xmm0[0],xmm7[0],xmm0[1],xmm7[1] +; AVX1-NEXT: vinsertf128 $1, %xmm14, %ymm12, %ymm12 +; AVX1-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm12[2,3],ymm10[4,5],ymm12[6,7] +; AVX1-NEXT: vunpckhps {{.*#+}} xmm12 = xmm4[2],xmm3[2],xmm4[3],xmm3[3] +; AVX1-NEXT: vinsertps {{.*#+}} xmm3 = zero,zero,xmm4[2],xmm3[2] +; AVX1-NEXT: vinsertf128 $1, %xmm12, %ymm3, %ymm12 +; AVX1-NEXT: vunpckhps {{.*#+}} xmm4 = xmm13[2],xmm8[2],xmm13[3],xmm8[3] +; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm8[3,0],xmm13[3,0] +; AVX1-NEXT: vpermilps {{.*#+}} xmm3 = xmm3[2,0,2,3] +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 +; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm12[2,3],ymm3[4,5],ymm12[6,7] +; AVX1-NEXT: vunpckhps {{.*#+}} xmm4 = xmm5[2],xmm6[2],xmm5[3],xmm6[3] +; AVX1-NEXT: vinsertps {{.*#+}} xmm5 = zero,zero,xmm5[2],xmm6[2] +; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm4 +; AVX1-NEXT: vunpckhps {{.*#+}} xmm5 = xmm11[2],xmm9[2],xmm11[3],xmm9[3] +; AVX1-NEXT: vshufps {{.*#+}} xmm6 = xmm9[3,0],xmm11[3,0] +; AVX1-NEXT: vpermilps {{.*#+}} xmm6 = xmm6[2,0,2,3] +; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm5, %ymm5 +; AVX1-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7] +; AVX1-NEXT: vunpckhps {{.*#+}} xmm5 = xmm0[2],xmm7[2],xmm0[3],xmm7[3] +; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = zero,zero,xmm0[2],xmm7[2] +; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm0 +; AVX1-NEXT: vunpckhps {{.*#+}} xmm5 = xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[3,0],xmm2[3,0] +; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,0,2,3] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm5, %ymm1 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7] +; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload +; AVX1-NEXT: vunpckhps {{.*#+}} xmm1 = xmm15[2],xmm2[2],xmm15[3],xmm2[3] +; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = zero,zero,xmm15[2],xmm2[2] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 +; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload +; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload +; AVX1-NEXT: vunpckhps {{.*#+}} xmm2 = xmm6[2],xmm5[2],xmm6[3],xmm5[3] +; AVX1-NEXT: vshufps {{.*#+}} xmm5 = xmm5[3,0],xmm6[3,0] +; AVX1-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[2,0,2,3] +; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2 +; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5],ymm1[6,7] +; AVX1-NEXT: vmovaps %ymm1, 96(%r8) +; AVX1-NEXT: vmovaps %ymm0, 32(%r8) +; AVX1-NEXT: vmovaps %ymm4, 224(%r8) +; AVX1-NEXT: vmovaps %ymm3, 160(%r8) +; AVX1-NEXT: vmovaps %ymm10, (%r8) +; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; AVX1-NEXT: vmovaps %ymm0, 192(%r8) +; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; AVX1-NEXT: vmovaps %ymm0, 128(%r8) +; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; AVX1-NEXT: vmovaps %ymm0, 64(%r8) +; AVX1-NEXT: addq $24, %rsp +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: store_i32_stride4_vf16: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovaps 32(%rdi), %ymm13 +; AVX2-NEXT: vmovaps (%rdi), %ymm11 +; AVX2-NEXT: vmovaps 32(%rsi), %ymm14 +; AVX2-NEXT: vmovaps (%rsi), %ymm12 +; AVX2-NEXT: vmovaps 32(%rdx), %ymm5 +; AVX2-NEXT: vmovaps (%rdx), %ymm15 +; AVX2-NEXT: vmovaps 32(%rcx), %ymm7 +; AVX2-NEXT: vmovaps (%rcx), %xmm6 +; AVX2-NEXT: vmovaps 32(%rcx), %xmm0 +; AVX2-NEXT: vmovaps (%rdx), %xmm1 +; AVX2-NEXT: vmovaps 32(%rdx), %xmm2 +; AVX2-NEXT: vunpckhps {{.*#+}} xmm8 = xmm2[2],xmm0[2],xmm2[3],xmm0[3] +; AVX2-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,0,2,1] +; AVX2-NEXT: vmovaps 32(%rsi), %xmm3 +; AVX2-NEXT: vmovaps 32(%rdi), %xmm4 +; AVX2-NEXT: vunpckhps {{.*#+}} xmm9 = xmm4[2],xmm3[2],xmm4[3],xmm3[3] +; AVX2-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,1,1,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm10 = ymm9[0,1],ymm8[2,3],ymm9[4,5],ymm8[6,7] +; AVX2-NEXT: vmovaps (%rsi), %xmm8 +; AVX2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] +; AVX2-NEXT: vmovaps (%rdi), %xmm2 +; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1] +; AVX2-NEXT: vunpcklps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] +; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,1,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3],ymm3[4,5],ymm0[6,7] +; AVX2-NEXT: vunpckhps {{.*#+}} xmm3 = xmm1[2],xmm6[2],xmm1[3],xmm6[3] +; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,0,2,1] +; AVX2-NEXT: vunpckhps {{.*#+}} xmm4 = xmm2[2],xmm8[2],xmm2[3],xmm8[3] +; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,1,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm9 = ymm4[0,1],ymm3[2,3],ymm4[4,5],ymm3[6,7] +; AVX2-NEXT: vmovaps (%rcx), %ymm3 +; AVX2-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1] +; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,2,1] +; AVX2-NEXT: vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1] +; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,1,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5],ymm1[6,7] +; AVX2-NEXT: vunpcklps {{.*#+}} ymm2 = ymm15[0],ymm3[0],ymm15[1],ymm3[1],ymm15[4],ymm3[4],ymm15[5],ymm3[5] +; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3] +; AVX2-NEXT: vunpcklps {{.*#+}} ymm4 = ymm11[0],ymm12[0],ymm11[1],ymm12[1],ymm11[4],ymm12[4],ymm11[5],ymm12[5] +; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[2,1,3,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3],ymm4[4,5],ymm2[6,7] +; AVX2-NEXT: vunpckhps {{.*#+}} ymm4 = ymm5[2],ymm7[2],ymm5[3],ymm7[3],ymm5[6],ymm7[6],ymm5[7],ymm7[7] +; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,2,3] +; AVX2-NEXT: vunpckhps {{.*#+}} ymm6 = ymm13[2],ymm14[2],ymm13[3],ymm14[3],ymm13[6],ymm14[6],ymm13[7],ymm14[7] +; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,3,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1],ymm4[2,3],ymm6[4,5],ymm4[6,7] +; AVX2-NEXT: vunpcklps {{.*#+}} ymm5 = ymm5[0],ymm7[0],ymm5[1],ymm7[1],ymm5[4],ymm7[4],ymm5[5],ymm7[5] +; AVX2-NEXT: vunpcklps {{.*#+}} ymm6 = ymm13[0],ymm14[0],ymm13[1],ymm14[1],ymm13[4],ymm14[4],ymm13[5],ymm14[5] +; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,2,3] +; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,3,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3],ymm6[4,5],ymm5[6,7] +; AVX2-NEXT: vunpckhps {{.*#+}} ymm3 = ymm15[2],ymm3[2],ymm15[3],ymm3[3],ymm15[6],ymm3[6],ymm15[7],ymm3[7] +; AVX2-NEXT: vunpckhps {{.*#+}} ymm6 = ymm11[2],ymm12[2],ymm11[3],ymm12[3],ymm11[6],ymm12[6],ymm11[7],ymm12[7] +; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,2,2,3] +; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,3,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm6[0,1],ymm3[2,3],ymm6[4,5],ymm3[6,7] +; AVX2-NEXT: vmovaps %ymm3, 96(%r8) +; AVX2-NEXT: vmovaps %ymm5, 192(%r8) +; AVX2-NEXT: vmovaps %ymm4, 224(%r8) +; AVX2-NEXT: vmovaps %ymm2, 64(%r8) +; AVX2-NEXT: vmovaps %ymm1, (%r8) +; AVX2-NEXT: vmovaps %ymm9, 32(%r8) +; AVX2-NEXT: vmovaps %ymm0, 128(%r8) +; AVX2-NEXT: vmovaps %ymm10, 160(%r8) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: store_i32_stride4_vf16: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovdqu64 (%rdi), %zmm0 +; AVX512-NEXT: vmovdqu64 (%rsi), %zmm1 +; AVX512-NEXT: vmovdqu64 (%rdx), %zmm2 +; AVX512-NEXT: vmovdqu64 (%rcx), %zmm3 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = +; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm4 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = <0,16,u,u,1,17,u,u,2,18,u,u,3,19,u,u> +; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm5 +; AVX512-NEXT: movb $-86, %al +; AVX512-NEXT: kmovd %eax, %k1 +; AVX512-NEXT: vmovdqa64 %zmm4, %zmm5 {%k1} +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = +; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm4 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm6 = <4,20,u,u,5,21,u,u,6,22,u,u,7,23,u,u> +; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm6 +; AVX512-NEXT: vmovdqa64 %zmm4, %zmm6 {%k1} +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = +; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm4 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm7 = <8,24,u,u,9,25,u,u,10,26,u,u,11,27,u,u> +; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm7 +; AVX512-NEXT: vmovdqa64 %zmm4, %zmm7 {%k1} +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = +; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm4 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm2 = <12,28,u,u,13,29,u,u,14,30,u,u,15,31,u,u> +; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm2 +; AVX512-NEXT: vmovdqa64 %zmm4, %zmm2 {%k1} +; AVX512-NEXT: vmovdqu64 %zmm2, 192(%r8) +; AVX512-NEXT: vmovdqu64 %zmm7, 128(%r8) +; AVX512-NEXT: vmovdqu64 %zmm6, 64(%r8) +; AVX512-NEXT: vmovdqu64 %zmm5, (%r8) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %in.vec0 = load <16 x i32>, <16 x i32>* %in.vecptr0, align 32 + %in.vec1 = load <16 x i32>, <16 x i32>* %in.vecptr1, align 32 + %in.vec2 = load <16 x i32>, <16 x i32>* %in.vecptr2, align 32 + %in.vec3 = load <16 x i32>, <16 x i32>* %in.vecptr3, align 32 + + %concat01 = shufflevector <16 x i32> %in.vec0, <16 x i32> %in.vec1, <32 x i32> + %concat23 = shufflevector <16 x i32> %in.vec2, <16 x i32> %in.vec3, <32 x i32> + %concat0123 = shufflevector <32 x i32> %concat01, <32 x i32> %concat23, <64 x i32> + %interleaved.vec = shufflevector <64 x i32> %concat0123, <64 x i32> poison, <64 x i32> + + store <64 x i32> %interleaved.vec, <64 x i32>* %out.vec, align 32 + + ret void +} -- 2.7.4