; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse4.1 | FileCheck %s --check-prefixes=ALL,SSE
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=ALL,SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse4.1 | FileCheck %s --check-prefixes=ALL,SSE,SSE41
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=ALL,AVX,AVX1
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=ALL,AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=ALL,AVX,AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefixes=ALL,AVX,AVX512,AVX512BW
define <16 x i8> @undef_index(i8 %x) nounwind {
; ALL-LABEL: undef_index:
ret <16 x i8> %ins
}
-define <16 x i8> @arg_i8_v16i8(i8 %x, i32 %y) nounwind {
-; SSE-LABEL: arg_i8_v16i8:
-; SSE: # %bb.0:
-; SSE-NEXT: movd %edi, %xmm0
-; SSE-NEXT: pxor %xmm1, %xmm1
-; SSE-NEXT: pshufb %xmm1, %xmm0
-; SSE-NEXT: retq
;
-; AVX1-LABEL: arg_i8_v16i8:
+; Insertion into undef vectors
+;
+
+define <16 x i8> @arg_i8_v16i8_undef(i8 %x, i32 %y) nounwind {
+; SSE2-LABEL: arg_i8_v16i8_undef:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movd %edi, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: arg_i8_v16i8_undef:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movd %edi, %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: pshufb %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: arg_i8_v16i8_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: arg_i8_v16i8:
+; AVX2-LABEL: arg_i8_v16i8_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastb %xmm0, %xmm0
; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: arg_i8_v16i8_undef:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vmovd %edi, %xmm0
+; AVX512F-NEXT: vpbroadcastb %xmm0, %xmm0
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: arg_i8_v16i8_undef:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpbroadcastb %edi, %xmm0
+; AVX512BW-NEXT: retq
%ins = insertelement <16 x i8> undef, i8 %x, i32 %y
ret <16 x i8> %ins
}
-define <8 x i16> @arg_i16_v8i16(i16 %x, i32 %y) nounwind {
-; SSE-LABEL: arg_i16_v8i16:
+define <8 x i16> @arg_i16_v8i16_undef(i16 %x, i32 %y) nounwind {
+; SSE-LABEL: arg_i16_v8i16_undef:
; SSE: # %bb.0:
; SSE-NEXT: movd %edi, %xmm0
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE-NEXT: retq
;
-; AVX1-LABEL: arg_i16_v8i16:
+; AVX1-LABEL: arg_i16_v8i16_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: retq
;
-; AVX2-LABEL: arg_i16_v8i16:
+; AVX2-LABEL: arg_i16_v8i16_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0
; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: arg_i16_v8i16_undef:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vmovd %edi, %xmm0
+; AVX512F-NEXT: vpbroadcastw %xmm0, %xmm0
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: arg_i16_v8i16_undef:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpbroadcastw %edi, %xmm0
+; AVX512BW-NEXT: retq
%ins = insertelement <8 x i16> undef, i16 %x, i32 %y
ret <8 x i16> %ins
}
-define <4 x i32> @arg_i32_v4i32(i32 %x, i32 %y) nounwind {
-; SSE-LABEL: arg_i32_v4i32:
+define <4 x i32> @arg_i32_v4i32_undef(i32 %x, i32 %y) nounwind {
+; SSE-LABEL: arg_i32_v4i32_undef:
; SSE: # %bb.0:
; SSE-NEXT: movd %edi, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE-NEXT: retq
;
-; AVX1-LABEL: arg_i32_v4i32:
+; AVX1-LABEL: arg_i32_v4i32_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: retq
;
-; AVX2-LABEL: arg_i32_v4i32:
+; AVX2-LABEL: arg_i32_v4i32_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastd %xmm0, %xmm0
; AVX2-NEXT: retq
+;
+; AVX512-LABEL: arg_i32_v4i32_undef:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpbroadcastd %edi, %xmm0
+; AVX512-NEXT: retq
%ins = insertelement <4 x i32> undef, i32 %x, i32 %y
ret <4 x i32> %ins
}
-define <2 x i64> @arg_i64_v2i64(i64 %x, i32 %y) nounwind {
-; SSE-LABEL: arg_i64_v2i64:
+define <2 x i64> @arg_i64_v2i64_undef(i64 %x, i32 %y) nounwind {
+; SSE-LABEL: arg_i64_v2i64_undef:
; SSE: # %bb.0:
; SSE-NEXT: movq %rdi, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; SSE-NEXT: retq
;
-; AVX1-LABEL: arg_i64_v2i64:
+; AVX1-LABEL: arg_i64_v2i64_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: retq
;
-; AVX2-LABEL: arg_i64_v2i64:
+; AVX2-LABEL: arg_i64_v2i64_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
; AVX2-NEXT: retq
+;
+; AVX512-LABEL: arg_i64_v2i64_undef:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpbroadcastq %rdi, %xmm0
+; AVX512-NEXT: retq
%ins = insertelement <2 x i64> undef, i64 %x, i32 %y
ret <2 x i64> %ins
}
-define <4 x float> @arg_f32_v4f32(float %x, i32 %y) nounwind {
-; SSE-LABEL: arg_f32_v4f32:
+define <4 x float> @arg_f32_v4f32_undef(float %x, i32 %y) nounwind {
+; SSE-LABEL: arg_f32_v4f32_undef:
; SSE: # %bb.0:
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE-NEXT: retq
;
-; AVX1-LABEL: arg_f32_v4f32:
+; AVX1-LABEL: arg_f32_v4f32_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: retq
;
-; AVX2-LABEL: arg_f32_v4f32:
+; AVX2-LABEL: arg_f32_v4f32_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastss %xmm0, %xmm0
; AVX2-NEXT: retq
+;
+; AVX512-LABEL: arg_f32_v4f32_undef:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vbroadcastss %xmm0, %xmm0
+; AVX512-NEXT: retq
%ins = insertelement <4 x float> undef, float %x, i32 %y
ret <4 x float> %ins
}
-define <2 x double> @arg_f64_v2f64(double %x, i32 %y) nounwind {
-; SSE-LABEL: arg_f64_v2f64:
-; SSE: # %bb.0:
-; SSE-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
-; SSE-NEXT: retq
+define <2 x double> @arg_f64_v2f64_undef(double %x, i32 %y) nounwind {
+; SSE2-LABEL: arg_f64_v2f64_undef:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
+; SSE2-NEXT: retq
;
-; AVX-LABEL: arg_f64_v2f64:
+; SSE41-LABEL: arg_f64_v2f64_undef:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movddup {{.*#+}} xmm0 = xmm0[0,0]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: arg_f64_v2f64_undef:
; AVX: # %bb.0:
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX-NEXT: retq
ret <2 x double> %ins
}
-define <16 x i8> @load_i8_v16i8(i8* %p, i32 %y) nounwind {
-; SSE-LABEL: load_i8_v16i8:
-; SSE: # %bb.0:
-; SSE-NEXT: movzbl (%rdi), %eax
-; SSE-NEXT: movd %eax, %xmm0
-; SSE-NEXT: pxor %xmm1, %xmm1
-; SSE-NEXT: pshufb %xmm1, %xmm0
-; SSE-NEXT: retq
+define <16 x i8> @load_i8_v16i8_undef(i8* %p, i32 %y) nounwind {
+; SSE2-LABEL: load_i8_v16i8_undef:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movzbl (%rdi), %eax
+; SSE2-NEXT: movd %eax, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; SSE2-NEXT: retq
;
-; AVX1-LABEL: load_i8_v16i8:
+; SSE41-LABEL: load_i8_v16i8_undef:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movzbl (%rdi), %eax
+; SSE41-NEXT: movd %eax, %xmm0
+; SSE41-NEXT: pxor %xmm1, %xmm1
+; SSE41-NEXT: pshufb %xmm1, %xmm0
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: load_i8_v16i8_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: movzbl (%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: load_i8_v16i8:
+; AVX2-LABEL: load_i8_v16i8_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastb (%rdi), %xmm0
; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_i8_v16i8_undef:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpbroadcastb (%rdi), %xmm0
+; AVX512-NEXT: retq
%x = load i8, i8* %p
%ins = insertelement <16 x i8> undef, i8 %x, i32 %y
ret <16 x i8> %ins
}
-define <8 x i16> @load_i16_v8i16(i16* %p, i32 %y) nounwind {
-; SSE-LABEL: load_i16_v8i16:
+define <8 x i16> @load_i16_v8i16_undef(i16* %p, i32 %y) nounwind {
+; SSE-LABEL: load_i16_v8i16_undef:
; SSE: # %bb.0:
; SSE-NEXT: movzwl (%rdi), %eax
; SSE-NEXT: movd %eax, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE-NEXT: retq
;
-; AVX1-LABEL: load_i16_v8i16:
+; AVX1-LABEL: load_i16_v8i16_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: movzwl (%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: retq
;
-; AVX2-LABEL: load_i16_v8i16:
+; AVX2-LABEL: load_i16_v8i16_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastw (%rdi), %xmm0
; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_i16_v8i16_undef:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpbroadcastw (%rdi), %xmm0
+; AVX512-NEXT: retq
%x = load i16, i16* %p
%ins = insertelement <8 x i16> undef, i16 %x, i32 %y
ret <8 x i16> %ins
}
-define <4 x i32> @load_i32_v4i32(i32* %p, i32 %y) nounwind {
-; SSE-LABEL: load_i32_v4i32:
+define <4 x i32> @load_i32_v4i32_undef(i32* %p, i32 %y) nounwind {
+; SSE-LABEL: load_i32_v4i32_undef:
; SSE: # %bb.0:
; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE-NEXT: retq
;
-; AVX-LABEL: load_i32_v4i32:
+; AVX-LABEL: load_i32_v4i32_undef:
; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss (%rdi), %xmm0
; AVX-NEXT: retq
ret <4 x i32> %ins
}
-define <2 x i64> @load_i64_v2i64(i64* %p, i32 %y) nounwind {
-; SSE-LABEL: load_i64_v2i64:
+define <2 x i64> @load_i64_v2i64_undef(i64* %p, i32 %y) nounwind {
+; SSE-LABEL: load_i64_v2i64_undef:
; SSE: # %bb.0:
; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; SSE-NEXT: retq
;
-; AVX-LABEL: load_i64_v2i64:
+; AVX-LABEL: load_i64_v2i64_undef:
; AVX: # %bb.0:
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX-NEXT: retq
ret <2 x i64> %ins
}
-define <4 x float> @load_f32_v4f32(float* %p, i32 %y) nounwind {
-; SSE-LABEL: load_f32_v4f32:
+define <4 x float> @load_f32_v4f32_undef(float* %p, i32 %y) nounwind {
+; SSE-LABEL: load_f32_v4f32_undef:
; SSE: # %bb.0:
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; SSE-NEXT: retq
;
-; AVX-LABEL: load_f32_v4f32:
+; AVX-LABEL: load_f32_v4f32_undef:
; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss (%rdi), %xmm0
; AVX-NEXT: retq
ret <4 x float> %ins
}
-define <2 x double> @load_f64_v2f64(double* %p, i32 %y) nounwind {
-; SSE-LABEL: load_f64_v2f64:
-; SSE: # %bb.0:
-; SSE-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
-; SSE-NEXT: retq
+define <2 x double> @load_f64_v2f64_undef(double* %p, i32 %y) nounwind {
+; SSE2-LABEL: load_f64_v2f64_undef:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0]
+; SSE2-NEXT: retq
;
-; AVX-LABEL: load_f64_v2f64:
+; SSE41-LABEL: load_f64_v2f64_undef:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movddup {{.*#+}} xmm0 = mem[0,0]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: load_f64_v2f64_undef:
; AVX: # %bb.0:
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX-NEXT: retq
ret <2 x double> %ins
}
-define <32 x i8> @arg_i8_v32i8(i8 %x, i32 %y) nounwind {
-; SSE-LABEL: arg_i8_v32i8:
+define <32 x i8> @arg_i8_v32i8_undef(i8 %x, i32 %y) nounwind {
+; SSE-LABEL: arg_i8_v32i8_undef:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: andl $31, %esi
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
-; AVX1-LABEL: arg_i8_v32i8:
+; AVX1-LABEL: arg_i8_v32i8_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: arg_i8_v32i8:
+; AVX2-LABEL: arg_i8_v32i8_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastb %xmm0, %ymm0
; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: arg_i8_v32i8_undef:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vmovd %edi, %xmm0
+; AVX512F-NEXT: vpbroadcastb %xmm0, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: arg_i8_v32i8_undef:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpbroadcastb %edi, %ymm0
+; AVX512BW-NEXT: retq
%ins = insertelement <32 x i8> undef, i8 %x, i32 %y
ret <32 x i8> %ins
}
-define <16 x i16> @arg_i16_v16i16(i16 %x, i32 %y) nounwind {
-; SSE-LABEL: arg_i16_v16i16:
+define <16 x i16> @arg_i16_v16i16_undef(i16 %x, i32 %y) nounwind {
+; SSE-LABEL: arg_i16_v16i16_undef:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: andl $15, %esi
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
-; AVX1-LABEL: arg_i16_v16i16:
+; AVX1-LABEL: arg_i16_v16i16_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: arg_i16_v16i16:
+; AVX2-LABEL: arg_i16_v16i16_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastw %xmm0, %ymm0
; AVX2-NEXT: retq
+;
+; AVX512F-LABEL: arg_i16_v16i16_undef:
+; AVX512F: # %bb.0:
+; AVX512F-NEXT: vmovd %edi, %xmm0
+; AVX512F-NEXT: vpbroadcastw %xmm0, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512BW-LABEL: arg_i16_v16i16_undef:
+; AVX512BW: # %bb.0:
+; AVX512BW-NEXT: vpbroadcastw %edi, %ymm0
+; AVX512BW-NEXT: retq
%ins = insertelement <16 x i16> undef, i16 %x, i32 %y
ret <16 x i16> %ins
}
-define <8 x i32> @arg_i32_v8i32(i32 %x, i32 %y) nounwind {
-; SSE-LABEL: arg_i32_v8i32:
+define <8 x i32> @arg_i32_v8i32_undef(i32 %x, i32 %y) nounwind {
+; SSE-LABEL: arg_i32_v8i32_undef:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: andl $7, %esi
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
-; AVX1-LABEL: arg_i32_v8i32:
+; AVX1-LABEL: arg_i32_v8i32_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: arg_i32_v8i32:
+; AVX2-LABEL: arg_i32_v8i32_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
; AVX2-NEXT: vpbroadcastd %xmm0, %ymm0
; AVX2-NEXT: retq
+;
+; AVX512-LABEL: arg_i32_v8i32_undef:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpbroadcastd %edi, %ymm0
+; AVX512-NEXT: retq
%ins = insertelement <8 x i32> undef, i32 %x, i32 %y
ret <8 x i32> %ins
}
-define <4 x i64> @arg_i64_v4i64(i64 %x, i32 %y) nounwind {
-; SSE-LABEL: arg_i64_v4i64:
+define <4 x i64> @arg_i64_v4i64_undef(i64 %x, i32 %y) nounwind {
+; SSE-LABEL: arg_i64_v4i64_undef:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: andl $3, %esi
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
-; AVX1-LABEL: arg_i64_v4i64:
+; AVX1-LABEL: arg_i64_v4i64_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: arg_i64_v4i64:
+; AVX2-LABEL: arg_i64_v4i64_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %ymm0
; AVX2-NEXT: retq
+;
+; AVX512-LABEL: arg_i64_v4i64_undef:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpbroadcastq %rdi, %ymm0
+; AVX512-NEXT: retq
%ins = insertelement <4 x i64> undef, i64 %x, i32 %y
ret <4 x i64> %ins
}
-define <8 x float> @arg_f32_v8f32(float %x, i32 %y) nounwind {
-; SSE-LABEL: arg_f32_v8f32:
+define <8 x float> @arg_f32_v8f32_undef(float %x, i32 %y) nounwind {
+; SSE-LABEL: arg_f32_v8f32_undef:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $edi killed $edi def $rdi
; SSE-NEXT: andl $7, %edi
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
-; AVX1-LABEL: arg_f32_v8f32:
+; AVX1-LABEL: arg_f32_v8f32_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: arg_f32_v8f32:
+; AVX2-LABEL: arg_f32_v8f32_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastss %xmm0, %ymm0
; AVX2-NEXT: retq
+;
+; AVX512-LABEL: arg_f32_v8f32_undef:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vbroadcastss %xmm0, %ymm0
+; AVX512-NEXT: retq
%ins = insertelement <8 x float> undef, float %x, i32 %y
ret <8 x float> %ins
}
-define <4 x double> @arg_f64_v4f64(double %x, i32 %y) nounwind {
-; SSE-LABEL: arg_f64_v4f64:
+define <4 x double> @arg_f64_v4f64_undef(double %x, i32 %y) nounwind {
+; SSE-LABEL: arg_f64_v4f64_undef:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $edi killed $edi def $rdi
; SSE-NEXT: andl $3, %edi
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
-; AVX1-LABEL: arg_f64_v4f64:
+; AVX1-LABEL: arg_f64_v4f64_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: arg_f64_v4f64:
+; AVX2-LABEL: arg_f64_v4f64_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vbroadcastsd %xmm0, %ymm0
; AVX2-NEXT: retq
+;
+; AVX512-LABEL: arg_f64_v4f64_undef:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vbroadcastsd %xmm0, %ymm0
+; AVX512-NEXT: retq
%ins = insertelement <4 x double> undef, double %x, i32 %y
ret <4 x double> %ins
}
-define <32 x i8> @load_i8_v32i8(i8* %p, i32 %y) nounwind {
-; SSE-LABEL: load_i8_v32i8:
+define <32 x i8> @load_i8_v32i8_undef(i8* %p, i32 %y) nounwind {
+; SSE-LABEL: load_i8_v32i8_undef:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: movb (%rdi), %al
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
-; AVX1-LABEL: load_i8_v32i8:
+; AVX1-LABEL: load_i8_v32i8_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: movzbl (%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: load_i8_v32i8:
+; AVX2-LABEL: load_i8_v32i8_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastb (%rdi), %ymm0
; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_i8_v32i8_undef:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpbroadcastb (%rdi), %ymm0
+; AVX512-NEXT: retq
%x = load i8, i8* %p
%ins = insertelement <32 x i8> undef, i8 %x, i32 %y
ret <32 x i8> %ins
}
-define <16 x i16> @load_i16_v16i16(i16* %p, i32 %y) nounwind {
-; SSE-LABEL: load_i16_v16i16:
+define <16 x i16> @load_i16_v16i16_undef(i16* %p, i32 %y) nounwind {
+; SSE-LABEL: load_i16_v16i16_undef:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: movzwl (%rdi), %eax
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
-; AVX1-LABEL: load_i16_v16i16:
+; AVX1-LABEL: load_i16_v16i16_undef:
; AVX1: # %bb.0:
; AVX1-NEXT: movzwl (%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
-; AVX2-LABEL: load_i16_v16i16:
+; AVX2-LABEL: load_i16_v16i16_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vpbroadcastw (%rdi), %ymm0
; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_i16_v16i16_undef:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpbroadcastw (%rdi), %ymm0
+; AVX512-NEXT: retq
%x = load i16, i16* %p
%ins = insertelement <16 x i16> undef, i16 %x, i32 %y
ret <16 x i16> %ins
}
-define <8 x i32> @load_i32_v8i32(i32* %p, i32 %y) nounwind {
-; SSE-LABEL: load_i32_v8i32:
+define <8 x i32> @load_i32_v8i32_undef(i32* %p, i32 %y) nounwind {
+; SSE-LABEL: load_i32_v8i32_undef:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: movl (%rdi), %eax
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
-; AVX-LABEL: load_i32_v8i32:
+; AVX-LABEL: load_i32_v8i32_undef:
; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss (%rdi), %ymm0
; AVX-NEXT: retq
ret <8 x i32> %ins
}
-define <4 x i64> @load_i64_v4i64(i64* %p, i32 %y) nounwind {
-; SSE-LABEL: load_i64_v4i64:
+define <4 x i64> @load_i64_v4i64_undef(i64* %p, i32 %y) nounwind {
+; SSE-LABEL: load_i64_v4i64_undef:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: movq (%rdi), %rax
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
-; AVX-LABEL: load_i64_v4i64:
+; AVX-LABEL: load_i64_v4i64_undef:
; AVX: # %bb.0:
; AVX-NEXT: vbroadcastsd (%rdi), %ymm0
; AVX-NEXT: retq
ret <4 x i64> %ins
}
-define <8 x float> @load_f32_v8f32(float* %p, i32 %y) nounwind {
-; SSE-LABEL: load_f32_v8f32:
+define <8 x float> @load_f32_v8f32_undef(float* %p, i32 %y) nounwind {
+; SSE-LABEL: load_f32_v8f32_undef:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
-; AVX-LABEL: load_f32_v8f32:
+; AVX-LABEL: load_f32_v8f32_undef:
; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss (%rdi), %ymm0
; AVX-NEXT: retq
ret <8 x float> %ins
}
-define <4 x double> @load_f64_v4f64(double* %p, i32 %y) nounwind {
-; SSE-LABEL: load_f64_v4f64:
+define <4 x double> @load_f64_v4f64_undef(double* %p, i32 %y) nounwind {
+; SSE-LABEL: load_f64_v4f64_undef:
; SSE: # %bb.0:
; SSE-NEXT: # kill: def $esi killed $esi def $rsi
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
; SSE-NEXT: retq
;
-; AVX-LABEL: load_f64_v4f64:
+; AVX-LABEL: load_f64_v4f64_undef:
; AVX: # %bb.0:
; AVX-NEXT: vbroadcastsd (%rdi), %ymm0
; AVX-NEXT: retq
ret <4 x double> %ins
}
+;
+; Insertion into arg vectors
+;
+
+define <16 x i8> @arg_i8_v16i8(<16 x i8> %v, i8 %x, i32 %y) nounwind {
+; SSE-LABEL: arg_i8_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: andl $15, %esi
+; SSE-NEXT: movb %dil, -24(%rsp,%rsi)
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: arg_i8_v16i8:
+; AVX: # %bb.0:
+; AVX-NEXT: # kill: def $esi killed $esi def $rsi
+; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX-NEXT: andl $15, %esi
+; AVX-NEXT: movb %dil, -24(%rsp,%rsi)
+; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
+; AVX-NEXT: retq
+ %ins = insertelement <16 x i8> %v, i8 %x, i32 %y
+ ret <16 x i8> %ins
+}
+
+define <8 x i16> @arg_i16_v8i16(<8 x i16> %v, i16 %x, i32 %y) nounwind {
+; SSE-LABEL: arg_i16_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: andl $7, %esi
+; SSE-NEXT: movw %di, -24(%rsp,%rsi,2)
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: arg_i16_v8i16:
+; AVX: # %bb.0:
+; AVX-NEXT: # kill: def $esi killed $esi def $rsi
+; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX-NEXT: andl $7, %esi
+; AVX-NEXT: movw %di, -24(%rsp,%rsi,2)
+; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
+; AVX-NEXT: retq
+ %ins = insertelement <8 x i16> %v, i16 %x, i32 %y
+ ret <8 x i16> %ins
+}
+
+define <4 x i32> @arg_i32_v4i32(<4 x i32> %v, i32 %x, i32 %y) nounwind {
+; SSE-LABEL: arg_i32_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: andl $3, %esi
+; SSE-NEXT: movl %edi, -24(%rsp,%rsi,4)
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: arg_i32_v4i32:
+; AVX: # %bb.0:
+; AVX-NEXT: # kill: def $esi killed $esi def $rsi
+; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX-NEXT: andl $3, %esi
+; AVX-NEXT: movl %edi, -24(%rsp,%rsi,4)
+; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
+; AVX-NEXT: retq
+ %ins = insertelement <4 x i32> %v, i32 %x, i32 %y
+ ret <4 x i32> %ins
+}
+
+define <2 x i64> @arg_i64_v2i64(<2 x i64> %v, i64 %x, i32 %y) nounwind {
+; SSE-LABEL: arg_i64_v2i64:
+; SSE: # %bb.0:
+; SSE-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: andl $1, %esi
+; SSE-NEXT: movq %rdi, -24(%rsp,%rsi,8)
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: arg_i64_v2i64:
+; AVX: # %bb.0:
+; AVX-NEXT: # kill: def $esi killed $esi def $rsi
+; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX-NEXT: andl $1, %esi
+; AVX-NEXT: movq %rdi, -24(%rsp,%rsi,8)
+; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
+; AVX-NEXT: retq
+ %ins = insertelement <2 x i64> %v, i64 %x, i32 %y
+ ret <2 x i64> %ins
+}
+
+define <4 x float> @arg_f32_v4f32(<4 x float> %v, float %x, i32 %y) nounwind {
+; SSE-LABEL: arg_f32_v4f32:
+; SSE: # %bb.0:
+; SSE-NEXT: # kill: def $edi killed $edi def $rdi
+; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: andl $3, %edi
+; SSE-NEXT: movss %xmm1, -24(%rsp,%rdi,4)
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: arg_f32_v4f32:
+; AVX: # %bb.0:
+; AVX-NEXT: # kill: def $edi killed $edi def $rdi
+; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX-NEXT: andl $3, %edi
+; AVX-NEXT: vmovss %xmm1, -24(%rsp,%rdi,4)
+; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
+; AVX-NEXT: retq
+ %ins = insertelement <4 x float> %v, float %x, i32 %y
+ ret <4 x float> %ins
+}
+
+define <2 x double> @arg_f64_v2f64(<2 x double> %v, double %x, i32 %y) nounwind {
+; SSE-LABEL: arg_f64_v2f64:
+; SSE: # %bb.0:
+; SSE-NEXT: # kill: def $edi killed $edi def $rdi
+; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: andl $1, %edi
+; SSE-NEXT: movsd %xmm1, -24(%rsp,%rdi,8)
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: arg_f64_v2f64:
+; AVX: # %bb.0:
+; AVX-NEXT: # kill: def $edi killed $edi def $rdi
+; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX-NEXT: andl $1, %edi
+; AVX-NEXT: vmovsd %xmm1, -24(%rsp,%rdi,8)
+; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
+; AVX-NEXT: retq
+ %ins = insertelement <2 x double> %v, double %x, i32 %y
+ ret <2 x double> %ins
+}
+
+define <16 x i8> @load_i8_v16i8(<16 x i8> %v, i8* %p, i32 %y) nounwind {
+; SSE-LABEL: load_i8_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE-NEXT: movb (%rdi), %al
+; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: andl $15, %esi
+; SSE-NEXT: movb %al, -24(%rsp,%rsi)
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: load_i8_v16i8:
+; AVX: # %bb.0:
+; AVX-NEXT: # kill: def $esi killed $esi def $rsi
+; AVX-NEXT: movb (%rdi), %al
+; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX-NEXT: andl $15, %esi
+; AVX-NEXT: movb %al, -24(%rsp,%rsi)
+; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
+; AVX-NEXT: retq
+ %x = load i8, i8* %p
+ %ins = insertelement <16 x i8> %v, i8 %x, i32 %y
+ ret <16 x i8> %ins
+}
+
+define <8 x i16> @load_i16_v8i16(<8 x i16> %v, i16* %p, i32 %y) nounwind {
+; SSE-LABEL: load_i16_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE-NEXT: movzwl (%rdi), %eax
+; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: andl $7, %esi
+; SSE-NEXT: movw %ax, -24(%rsp,%rsi,2)
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: load_i16_v8i16:
+; AVX: # %bb.0:
+; AVX-NEXT: # kill: def $esi killed $esi def $rsi
+; AVX-NEXT: movzwl (%rdi), %eax
+; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX-NEXT: andl $7, %esi
+; AVX-NEXT: movw %ax, -24(%rsp,%rsi,2)
+; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
+; AVX-NEXT: retq
+ %x = load i16, i16* %p
+ %ins = insertelement <8 x i16> %v, i16 %x, i32 %y
+ ret <8 x i16> %ins
+}
+
+define <4 x i32> @load_i32_v4i32(<4 x i32> %v, i32* %p, i32 %y) nounwind {
+; SSE-LABEL: load_i32_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE-NEXT: movl (%rdi), %eax
+; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: andl $3, %esi
+; SSE-NEXT: movl %eax, -24(%rsp,%rsi,4)
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: load_i32_v4i32:
+; AVX: # %bb.0:
+; AVX-NEXT: # kill: def $esi killed $esi def $rsi
+; AVX-NEXT: movl (%rdi), %eax
+; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX-NEXT: andl $3, %esi
+; AVX-NEXT: movl %eax, -24(%rsp,%rsi,4)
+; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
+; AVX-NEXT: retq
+ %x = load i32, i32* %p
+ %ins = insertelement <4 x i32> %v, i32 %x, i32 %y
+ ret <4 x i32> %ins
+}
+
+define <2 x i64> @load_i64_v2i64(<2 x i64> %v, i64* %p, i32 %y) nounwind {
+; SSE-LABEL: load_i64_v2i64:
+; SSE: # %bb.0:
+; SSE-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE-NEXT: movq (%rdi), %rax
+; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: andl $1, %esi
+; SSE-NEXT: movq %rax, -24(%rsp,%rsi,8)
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: load_i64_v2i64:
+; AVX: # %bb.0:
+; AVX-NEXT: # kill: def $esi killed $esi def $rsi
+; AVX-NEXT: movq (%rdi), %rax
+; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX-NEXT: andl $1, %esi
+; AVX-NEXT: movq %rax, -24(%rsp,%rsi,8)
+; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
+; AVX-NEXT: retq
+ %x = load i64, i64* %p
+ %ins = insertelement <2 x i64> %v, i64 %x, i32 %y
+ ret <2 x i64> %ins
+}
+
+define <4 x float> @load_f32_v4f32(<4 x float> %v, float* %p, i32 %y) nounwind {
+; SSE-LABEL: load_f32_v4f32:
+; SSE: # %bb.0:
+; SSE-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: andl $3, %esi
+; SSE-NEXT: movss %xmm1, -24(%rsp,%rsi,4)
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: load_f32_v4f32:
+; AVX: # %bb.0:
+; AVX-NEXT: # kill: def $esi killed $esi def $rsi
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX-NEXT: andl $3, %esi
+; AVX-NEXT: vmovss %xmm1, -24(%rsp,%rsi,4)
+; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
+; AVX-NEXT: retq
+ %x = load float, float* %p
+ %ins = insertelement <4 x float> %v, float %x, i32 %y
+ ret <4 x float> %ins
+}
+
+define <2 x double> @load_f64_v2f64(<2 x double> %v, double* %p, i32 %y) nounwind {
+; SSE-LABEL: load_f64_v2f64:
+; SSE: # %bb.0:
+; SSE-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: andl $1, %esi
+; SSE-NEXT: movsd %xmm1, -24(%rsp,%rsi,8)
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: load_f64_v2f64:
+; AVX: # %bb.0:
+; AVX-NEXT: # kill: def $esi killed $esi def $rsi
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX-NEXT: andl $1, %esi
+; AVX-NEXT: vmovsd %xmm1, -24(%rsp,%rsi,8)
+; AVX-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0
+; AVX-NEXT: retq
+ %x = load double, double* %p
+ %ins = insertelement <2 x double> %v, double %x, i32 %y
+ ret <2 x double> %ins
+}
+
+define <32 x i8> @arg_i8_v32i8(<32 x i8> %v, i8 %x, i32 %y) nounwind {
+; SSE-LABEL: arg_i8_v32i8:
+; SSE: # %bb.0:
+; SSE-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: andl $31, %esi
+; SSE-NEXT: movb %dil, -40(%rsp,%rsi)
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: arg_i8_v32i8:
+; AVX: # %bb.0:
+; AVX-NEXT: pushq %rbp
+; AVX-NEXT: movq %rsp, %rbp
+; AVX-NEXT: andq $-32, %rsp
+; AVX-NEXT: subq $64, %rsp
+; AVX-NEXT: # kill: def $esi killed $esi def $rsi
+; AVX-NEXT: vmovaps %ymm0, (%rsp)
+; AVX-NEXT: andl $31, %esi
+; AVX-NEXT: movb %dil, (%rsp,%rsi)
+; AVX-NEXT: vmovaps (%rsp), %ymm0
+; AVX-NEXT: movq %rbp, %rsp
+; AVX-NEXT: popq %rbp
+; AVX-NEXT: retq
+ %ins = insertelement <32 x i8> %v, i8 %x, i32 %y
+ ret <32 x i8> %ins
+}
+
+define <16 x i16> @arg_i16_v16i16(<16 x i16> %v, i16 %x, i32 %y) nounwind {
+; SSE-LABEL: arg_i16_v16i16:
+; SSE: # %bb.0:
+; SSE-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: andl $15, %esi
+; SSE-NEXT: movw %di, -40(%rsp,%rsi,2)
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: arg_i16_v16i16:
+; AVX: # %bb.0:
+; AVX-NEXT: pushq %rbp
+; AVX-NEXT: movq %rsp, %rbp
+; AVX-NEXT: andq $-32, %rsp
+; AVX-NEXT: subq $64, %rsp
+; AVX-NEXT: # kill: def $esi killed $esi def $rsi
+; AVX-NEXT: vmovaps %ymm0, (%rsp)
+; AVX-NEXT: andl $15, %esi
+; AVX-NEXT: movw %di, (%rsp,%rsi,2)
+; AVX-NEXT: vmovaps (%rsp), %ymm0
+; AVX-NEXT: movq %rbp, %rsp
+; AVX-NEXT: popq %rbp
+; AVX-NEXT: retq
+ %ins = insertelement <16 x i16> %v, i16 %x, i32 %y
+ ret <16 x i16> %ins
+}
+
+define <8 x i32> @arg_i32_v8i32(<8 x i32> %v, i32 %x, i32 %y) nounwind {
+; SSE-LABEL: arg_i32_v8i32:
+; SSE: # %bb.0:
+; SSE-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: andl $7, %esi
+; SSE-NEXT: movl %edi, -40(%rsp,%rsi,4)
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: arg_i32_v8i32:
+; AVX: # %bb.0:
+; AVX-NEXT: pushq %rbp
+; AVX-NEXT: movq %rsp, %rbp
+; AVX-NEXT: andq $-32, %rsp
+; AVX-NEXT: subq $64, %rsp
+; AVX-NEXT: # kill: def $esi killed $esi def $rsi
+; AVX-NEXT: vmovaps %ymm0, (%rsp)
+; AVX-NEXT: andl $7, %esi
+; AVX-NEXT: movl %edi, (%rsp,%rsi,4)
+; AVX-NEXT: vmovaps (%rsp), %ymm0
+; AVX-NEXT: movq %rbp, %rsp
+; AVX-NEXT: popq %rbp
+; AVX-NEXT: retq
+ %ins = insertelement <8 x i32> %v, i32 %x, i32 %y
+ ret <8 x i32> %ins
+}
+
+define <4 x i64> @arg_i64_v4i64(<4 x i64> %v, i64 %x, i32 %y) nounwind {
+; SSE-LABEL: arg_i64_v4i64:
+; SSE: # %bb.0:
+; SSE-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: andl $3, %esi
+; SSE-NEXT: movq %rdi, -40(%rsp,%rsi,8)
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: arg_i64_v4i64:
+; AVX: # %bb.0:
+; AVX-NEXT: pushq %rbp
+; AVX-NEXT: movq %rsp, %rbp
+; AVX-NEXT: andq $-32, %rsp
+; AVX-NEXT: subq $64, %rsp
+; AVX-NEXT: # kill: def $esi killed $esi def $rsi
+; AVX-NEXT: vmovaps %ymm0, (%rsp)
+; AVX-NEXT: andl $3, %esi
+; AVX-NEXT: movq %rdi, (%rsp,%rsi,8)
+; AVX-NEXT: vmovaps (%rsp), %ymm0
+; AVX-NEXT: movq %rbp, %rsp
+; AVX-NEXT: popq %rbp
+; AVX-NEXT: retq
+ %ins = insertelement <4 x i64> %v, i64 %x, i32 %y
+ ret <4 x i64> %ins
+}
+
+define <8 x float> @arg_f32_v8f32(<8 x float> %v, float %x, i32 %y) nounwind {
+; SSE-LABEL: arg_f32_v8f32:
+; SSE: # %bb.0:
+; SSE-NEXT: # kill: def $edi killed $edi def $rdi
+; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: andl $7, %edi
+; SSE-NEXT: movss %xmm2, -40(%rsp,%rdi,4)
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: arg_f32_v8f32:
+; AVX: # %bb.0:
+; AVX-NEXT: pushq %rbp
+; AVX-NEXT: movq %rsp, %rbp
+; AVX-NEXT: andq $-32, %rsp
+; AVX-NEXT: subq $64, %rsp
+; AVX-NEXT: # kill: def $edi killed $edi def $rdi
+; AVX-NEXT: vmovaps %ymm0, (%rsp)
+; AVX-NEXT: andl $7, %edi
+; AVX-NEXT: vmovss %xmm1, (%rsp,%rdi,4)
+; AVX-NEXT: vmovaps (%rsp), %ymm0
+; AVX-NEXT: movq %rbp, %rsp
+; AVX-NEXT: popq %rbp
+; AVX-NEXT: retq
+ %ins = insertelement <8 x float> %v, float %x, i32 %y
+ ret <8 x float> %ins
+}
+
+define <4 x double> @arg_f64_v4f64(<4 x double> %v, double %x, i32 %y) nounwind {
+; SSE-LABEL: arg_f64_v4f64:
+; SSE: # %bb.0:
+; SSE-NEXT: # kill: def $edi killed $edi def $rdi
+; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: andl $3, %edi
+; SSE-NEXT: movsd %xmm2, -40(%rsp,%rdi,8)
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: arg_f64_v4f64:
+; AVX: # %bb.0:
+; AVX-NEXT: pushq %rbp
+; AVX-NEXT: movq %rsp, %rbp
+; AVX-NEXT: andq $-32, %rsp
+; AVX-NEXT: subq $64, %rsp
+; AVX-NEXT: # kill: def $edi killed $edi def $rdi
+; AVX-NEXT: vmovaps %ymm0, (%rsp)
+; AVX-NEXT: andl $3, %edi
+; AVX-NEXT: vmovsd %xmm1, (%rsp,%rdi,8)
+; AVX-NEXT: vmovaps (%rsp), %ymm0
+; AVX-NEXT: movq %rbp, %rsp
+; AVX-NEXT: popq %rbp
+; AVX-NEXT: retq
+ %ins = insertelement <4 x double> %v, double %x, i32 %y
+ ret <4 x double> %ins
+}
+
+define <32 x i8> @load_i8_v32i8(<32 x i8> %v, i8* %p, i32 %y) nounwind {
+; SSE-LABEL: load_i8_v32i8:
+; SSE: # %bb.0:
+; SSE-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE-NEXT: movb (%rdi), %al
+; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: andl $31, %esi
+; SSE-NEXT: movb %al, -40(%rsp,%rsi)
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: load_i8_v32i8:
+; AVX: # %bb.0:
+; AVX-NEXT: pushq %rbp
+; AVX-NEXT: movq %rsp, %rbp
+; AVX-NEXT: andq $-32, %rsp
+; AVX-NEXT: subq $64, %rsp
+; AVX-NEXT: # kill: def $esi killed $esi def $rsi
+; AVX-NEXT: movb (%rdi), %al
+; AVX-NEXT: vmovaps %ymm0, (%rsp)
+; AVX-NEXT: andl $31, %esi
+; AVX-NEXT: movb %al, (%rsp,%rsi)
+; AVX-NEXT: vmovaps (%rsp), %ymm0
+; AVX-NEXT: movq %rbp, %rsp
+; AVX-NEXT: popq %rbp
+; AVX-NEXT: retq
+ %x = load i8, i8* %p
+ %ins = insertelement <32 x i8> %v, i8 %x, i32 %y
+ ret <32 x i8> %ins
+}
+
+define <16 x i16> @load_i16_v16i16(<16 x i16> %v, i16* %p, i32 %y) nounwind {
+; SSE-LABEL: load_i16_v16i16:
+; SSE: # %bb.0:
+; SSE-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE-NEXT: movzwl (%rdi), %eax
+; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: andl $15, %esi
+; SSE-NEXT: movw %ax, -40(%rsp,%rsi,2)
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: load_i16_v16i16:
+; AVX: # %bb.0:
+; AVX-NEXT: pushq %rbp
+; AVX-NEXT: movq %rsp, %rbp
+; AVX-NEXT: andq $-32, %rsp
+; AVX-NEXT: subq $64, %rsp
+; AVX-NEXT: # kill: def $esi killed $esi def $rsi
+; AVX-NEXT: movzwl (%rdi), %eax
+; AVX-NEXT: vmovaps %ymm0, (%rsp)
+; AVX-NEXT: andl $15, %esi
+; AVX-NEXT: movw %ax, (%rsp,%rsi,2)
+; AVX-NEXT: vmovaps (%rsp), %ymm0
+; AVX-NEXT: movq %rbp, %rsp
+; AVX-NEXT: popq %rbp
+; AVX-NEXT: retq
+ %x = load i16, i16* %p
+ %ins = insertelement <16 x i16> %v, i16 %x, i32 %y
+ ret <16 x i16> %ins
+}
+
+define <8 x i32> @load_i32_v8i32(<8 x i32> %v, i32* %p, i32 %y) nounwind {
+; SSE-LABEL: load_i32_v8i32:
+; SSE: # %bb.0:
+; SSE-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE-NEXT: movl (%rdi), %eax
+; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: andl $7, %esi
+; SSE-NEXT: movl %eax, -40(%rsp,%rsi,4)
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: load_i32_v8i32:
+; AVX: # %bb.0:
+; AVX-NEXT: pushq %rbp
+; AVX-NEXT: movq %rsp, %rbp
+; AVX-NEXT: andq $-32, %rsp
+; AVX-NEXT: subq $64, %rsp
+; AVX-NEXT: # kill: def $esi killed $esi def $rsi
+; AVX-NEXT: movl (%rdi), %eax
+; AVX-NEXT: vmovaps %ymm0, (%rsp)
+; AVX-NEXT: andl $7, %esi
+; AVX-NEXT: movl %eax, (%rsp,%rsi,4)
+; AVX-NEXT: vmovaps (%rsp), %ymm0
+; AVX-NEXT: movq %rbp, %rsp
+; AVX-NEXT: popq %rbp
+; AVX-NEXT: retq
+ %x = load i32, i32* %p
+ %ins = insertelement <8 x i32> %v, i32 %x, i32 %y
+ ret <8 x i32> %ins
+}
+
+define <4 x i64> @load_i64_v4i64(<4 x i64> %v, i64* %p, i32 %y) nounwind {
+; SSE-LABEL: load_i64_v4i64:
+; SSE: # %bb.0:
+; SSE-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE-NEXT: movq (%rdi), %rax
+; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: andl $3, %esi
+; SSE-NEXT: movq %rax, -40(%rsp,%rsi,8)
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: load_i64_v4i64:
+; AVX: # %bb.0:
+; AVX-NEXT: pushq %rbp
+; AVX-NEXT: movq %rsp, %rbp
+; AVX-NEXT: andq $-32, %rsp
+; AVX-NEXT: subq $64, %rsp
+; AVX-NEXT: # kill: def $esi killed $esi def $rsi
+; AVX-NEXT: movq (%rdi), %rax
+; AVX-NEXT: vmovaps %ymm0, (%rsp)
+; AVX-NEXT: andl $3, %esi
+; AVX-NEXT: movq %rax, (%rsp,%rsi,8)
+; AVX-NEXT: vmovaps (%rsp), %ymm0
+; AVX-NEXT: movq %rbp, %rsp
+; AVX-NEXT: popq %rbp
+; AVX-NEXT: retq
+ %x = load i64, i64* %p
+ %ins = insertelement <4 x i64> %v, i64 %x, i32 %y
+ ret <4 x i64> %ins
+}
+
+define <8 x float> @load_f32_v8f32(<8 x float> %v, float* %p, i32 %y) nounwind {
+; SSE-LABEL: load_f32_v8f32:
+; SSE: # %bb.0:
+; SSE-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: andl $7, %esi
+; SSE-NEXT: movss %xmm2, -40(%rsp,%rsi,4)
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: load_f32_v8f32:
+; AVX: # %bb.0:
+; AVX-NEXT: pushq %rbp
+; AVX-NEXT: movq %rsp, %rbp
+; AVX-NEXT: andq $-32, %rsp
+; AVX-NEXT: subq $64, %rsp
+; AVX-NEXT: # kill: def $esi killed $esi def $rsi
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovaps %ymm0, (%rsp)
+; AVX-NEXT: andl $7, %esi
+; AVX-NEXT: vmovss %xmm1, (%rsp,%rsi,4)
+; AVX-NEXT: vmovaps (%rsp), %ymm0
+; AVX-NEXT: movq %rbp, %rsp
+; AVX-NEXT: popq %rbp
+; AVX-NEXT: retq
+ %x = load float, float* %p
+ %ins = insertelement <8 x float> %v, float %x, i32 %y
+ ret <8 x float> %ins
+}
+
+define <4 x double> @load_f64_v4f64(<4 x double> %v, double* %p, i32 %y) nounwind {
+; SSE-LABEL: load_f64_v4f64:
+; SSE: # %bb.0:
+; SSE-NEXT: # kill: def $esi killed $esi def $rsi
+; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSE-NEXT: andl $3, %esi
+; SSE-NEXT: movsd %xmm2, -40(%rsp,%rsi,8)
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: retq
+;
+; AVX-LABEL: load_f64_v4f64:
+; AVX: # %bb.0:
+; AVX-NEXT: pushq %rbp
+; AVX-NEXT: movq %rsp, %rbp
+; AVX-NEXT: andq $-32, %rsp
+; AVX-NEXT: subq $64, %rsp
+; AVX-NEXT: # kill: def $esi killed $esi def $rsi
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovaps %ymm0, (%rsp)
+; AVX-NEXT: andl $3, %esi
+; AVX-NEXT: vmovsd %xmm1, (%rsp,%rsi,8)
+; AVX-NEXT: vmovaps (%rsp), %ymm0
+; AVX-NEXT: movq %rbp, %rsp
+; AVX-NEXT: popq %rbp
+; AVX-NEXT: retq
+ %x = load double, double* %p
+ %ins = insertelement <4 x double> %v, double %x, i32 %y
+ ret <4 x double> %ins
+}
+
; Don't die trying to insert to an invalid index.
define i32 @PR44139(<16 x i64>* %p) {