; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse2 | FileCheck %s --check-prefixes=ALL,SSE,SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse3 | FileCheck %s --check-prefixes=ALL,SSE,SSE3
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+ssse3 | FileCheck %s --check-prefixes=ALL,SSE,SSSE3
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse4.1 | FileCheck %s --check-prefixes=ALL,SSE,SSE41
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse4.2 | FileCheck %s --check-prefixes=ALL,SSE,SSE42
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,SCALAR
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse2 | FileCheck %s --check-prefixes=ALL,SSE,SSE2,SSE2-ONLY
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse3 | FileCheck %s --check-prefixes=ALL,SSE,SSE2,SSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+ssse3 | FileCheck %s --check-prefixes=ALL,SSE,SSSE3,SSSE3-ONLY
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse4.1 | FileCheck %s --check-prefixes=ALL,SSE,SSSE3,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse4.2 | FileCheck %s --check-prefixes=ALL,SSE,SSSE3,SSE42
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx | FileCheck %s --check-prefixes=ALL,AVX,AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx2 | FileCheck %s --check-prefixes=ALL,AVX,AVX2
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512vl | FileCheck %s --check-prefixes=ALL,AVX512,AVX512F
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=ALL,AVX512,AVX512BW
+define void @vec16_i8(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec16_i8:
+; ALL: # %bb.0:
+; ALL-NEXT: movzbl (%rdi), %eax
+; ALL-NEXT: notb %al
+; ALL-NEXT: movb %al, (%rsi)
+; ALL-NEXT: movb %al, 1(%rsi)
+; ALL-NEXT: retq
+ %in.elt.not = load i8, ptr %in.elt.ptr, align 64
+ %in.elt = xor i8 %in.elt.not, -1
+ %out.elt0.ptr = getelementptr i8, ptr %out.vec.ptr, i64 0
+ store i8 %in.elt, ptr %out.elt0.ptr, align 64
+ %out.elt1.ptr = getelementptr i8, ptr %out.vec.ptr, i64 1
+ store i8 %in.elt, ptr %out.elt1.ptr, align 1
+ ret void
+}
+
+define void @vec32_i8(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec32_i8:
+; ALL: # %bb.0:
+; ALL-NEXT: movzbl (%rdi), %eax
+; ALL-NEXT: notb %al
+; ALL-NEXT: movb %al, (%rsi)
+; ALL-NEXT: movb %al, 1(%rsi)
+; ALL-NEXT: movb %al, 2(%rsi)
+; ALL-NEXT: movb %al, 3(%rsi)
+; ALL-NEXT: retq
+ %in.elt.not = load i8, ptr %in.elt.ptr, align 64
+ %in.elt = xor i8 %in.elt.not, -1
+ %out.elt0.ptr = getelementptr i8, ptr %out.vec.ptr, i64 0
+ store i8 %in.elt, ptr %out.elt0.ptr, align 64
+ %out.elt1.ptr = getelementptr i8, ptr %out.vec.ptr, i64 1
+ store i8 %in.elt, ptr %out.elt1.ptr, align 1
+ %out.elt2.ptr = getelementptr i8, ptr %out.vec.ptr, i64 2
+ store i8 %in.elt, ptr %out.elt2.ptr, align 2
+ %out.elt3.ptr = getelementptr i8, ptr %out.vec.ptr, i64 3
+ store i8 %in.elt, ptr %out.elt3.ptr, align 1
+ ret void
+}
+
+define void @vec32_i16(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec32_i16:
+; ALL: # %bb.0:
+; ALL-NEXT: movl (%rdi), %eax
+; ALL-NEXT: notl %eax
+; ALL-NEXT: movw %ax, (%rsi)
+; ALL-NEXT: movw %ax, 2(%rsi)
+; ALL-NEXT: retq
+ %in.elt.not = load i16, ptr %in.elt.ptr, align 64
+ %in.elt = xor i16 %in.elt.not, -1
+ %out.elt0.ptr = getelementptr i16, ptr %out.vec.ptr, i64 0
+ store i16 %in.elt, ptr %out.elt0.ptr, align 64
+ %out.elt1.ptr = getelementptr i16, ptr %out.vec.ptr, i64 1
+ store i16 %in.elt, ptr %out.elt1.ptr, align 2
+ ret void
+}
+
+define void @vec64_i8(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec64_i8:
+; ALL: # %bb.0:
+; ALL-NEXT: movzbl (%rdi), %eax
+; ALL-NEXT: notb %al
+; ALL-NEXT: movb %al, (%rsi)
+; ALL-NEXT: movb %al, 1(%rsi)
+; ALL-NEXT: movb %al, 2(%rsi)
+; ALL-NEXT: movb %al, 3(%rsi)
+; ALL-NEXT: movb %al, 4(%rsi)
+; ALL-NEXT: movb %al, 5(%rsi)
+; ALL-NEXT: movb %al, 6(%rsi)
+; ALL-NEXT: movb %al, 7(%rsi)
+; ALL-NEXT: retq
+ %in.elt.not = load i8, ptr %in.elt.ptr, align 64
+ %in.elt = xor i8 %in.elt.not, -1
+ %out.elt0.ptr = getelementptr i8, ptr %out.vec.ptr, i64 0
+ store i8 %in.elt, ptr %out.elt0.ptr, align 64
+ %out.elt1.ptr = getelementptr i8, ptr %out.vec.ptr, i64 1
+ store i8 %in.elt, ptr %out.elt1.ptr, align 1
+ %out.elt2.ptr = getelementptr i8, ptr %out.vec.ptr, i64 2
+ store i8 %in.elt, ptr %out.elt2.ptr, align 2
+ %out.elt3.ptr = getelementptr i8, ptr %out.vec.ptr, i64 3
+ store i8 %in.elt, ptr %out.elt3.ptr, align 1
+ %out.elt4.ptr = getelementptr i8, ptr %out.vec.ptr, i64 4
+ store i8 %in.elt, ptr %out.elt4.ptr, align 4
+ %out.elt5.ptr = getelementptr i8, ptr %out.vec.ptr, i64 5
+ store i8 %in.elt, ptr %out.elt5.ptr, align 1
+ %out.elt6.ptr = getelementptr i8, ptr %out.vec.ptr, i64 6
+ store i8 %in.elt, ptr %out.elt6.ptr, align 2
+ %out.elt7.ptr = getelementptr i8, ptr %out.vec.ptr, i64 7
+ store i8 %in.elt, ptr %out.elt7.ptr, align 1
+ ret void
+}
+
+define void @vec64_i16(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec64_i16:
+; ALL: # %bb.0:
+; ALL-NEXT: movl (%rdi), %eax
+; ALL-NEXT: notl %eax
+; ALL-NEXT: movw %ax, (%rsi)
+; ALL-NEXT: movw %ax, 2(%rsi)
+; ALL-NEXT: movw %ax, 4(%rsi)
+; ALL-NEXT: movw %ax, 6(%rsi)
+; ALL-NEXT: retq
+ %in.elt.not = load i16, ptr %in.elt.ptr, align 64
+ %in.elt = xor i16 %in.elt.not, -1
+ %out.elt0.ptr = getelementptr i16, ptr %out.vec.ptr, i64 0
+ store i16 %in.elt, ptr %out.elt0.ptr, align 64
+ %out.elt1.ptr = getelementptr i16, ptr %out.vec.ptr, i64 1
+ store i16 %in.elt, ptr %out.elt1.ptr, align 2
+ %out.elt2.ptr = getelementptr i16, ptr %out.vec.ptr, i64 2
+ store i16 %in.elt, ptr %out.elt2.ptr, align 4
+ %out.elt3.ptr = getelementptr i16, ptr %out.vec.ptr, i64 3
+ store i16 %in.elt, ptr %out.elt3.ptr, align 2
+ ret void
+}
+
+define void @vec64_i32(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec64_i32:
+; ALL: # %bb.0:
+; ALL-NEXT: movl (%rdi), %eax
+; ALL-NEXT: notl %eax
+; ALL-NEXT: movl %eax, (%rsi)
+; ALL-NEXT: movl %eax, 4(%rsi)
+; ALL-NEXT: retq
+ %in.elt.not = load i32, ptr %in.elt.ptr, align 64
+ %in.elt = xor i32 %in.elt.not, -1
+ %out.elt0.ptr = getelementptr i32, ptr %out.vec.ptr, i64 0
+ store i32 %in.elt, ptr %out.elt0.ptr, align 64
+ %out.elt1.ptr = getelementptr i32, ptr %out.vec.ptr, i64 1
+ store i32 %in.elt, ptr %out.elt1.ptr, align 4
+ ret void
+}
+
+define void @vec64_float(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec64_float:
+; ALL: # %bb.0:
+; ALL-NEXT: movl (%rdi), %eax
+; ALL-NEXT: notl %eax
+; ALL-NEXT: movl %eax, (%rsi)
+; ALL-NEXT: movl %eax, 4(%rsi)
+; ALL-NEXT: retq
+ %in.elt.not = load i32, ptr %in.elt.ptr, align 64
+ %in.elt.int = xor i32 %in.elt.not, -1
+ %in.elt = bitcast i32 %in.elt.int to float
+ %out.elt0.ptr = getelementptr float, ptr %out.vec.ptr, i64 0
+ store float %in.elt, ptr %out.elt0.ptr, align 64
+ %out.elt1.ptr = getelementptr float, ptr %out.vec.ptr, i64 1
+ store float %in.elt, ptr %out.elt1.ptr, align 4
+ ret void
+}
+
+define void @vec128_i8(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec128_i8:
+; ALL: # %bb.0:
+; ALL-NEXT: movzbl (%rdi), %eax
+; ALL-NEXT: notb %al
+; ALL-NEXT: movb %al, (%rsi)
+; ALL-NEXT: movb %al, 1(%rsi)
+; ALL-NEXT: movb %al, 2(%rsi)
+; ALL-NEXT: movb %al, 3(%rsi)
+; ALL-NEXT: movb %al, 4(%rsi)
+; ALL-NEXT: movb %al, 5(%rsi)
+; ALL-NEXT: movb %al, 6(%rsi)
+; ALL-NEXT: movb %al, 7(%rsi)
+; ALL-NEXT: movb %al, 8(%rsi)
+; ALL-NEXT: movb %al, 9(%rsi)
+; ALL-NEXT: movb %al, 10(%rsi)
+; ALL-NEXT: movb %al, 11(%rsi)
+; ALL-NEXT: movb %al, 12(%rsi)
+; ALL-NEXT: movb %al, 13(%rsi)
+; ALL-NEXT: movb %al, 14(%rsi)
+; ALL-NEXT: movb %al, 15(%rsi)
+; ALL-NEXT: retq
+ %in.elt.not = load i8, ptr %in.elt.ptr, align 64
+ %in.elt = xor i8 %in.elt.not, -1
+ %out.elt0.ptr = getelementptr i8, ptr %out.vec.ptr, i64 0
+ store i8 %in.elt, ptr %out.elt0.ptr, align 64
+ %out.elt1.ptr = getelementptr i8, ptr %out.vec.ptr, i64 1
+ store i8 %in.elt, ptr %out.elt1.ptr, align 1
+ %out.elt2.ptr = getelementptr i8, ptr %out.vec.ptr, i64 2
+ store i8 %in.elt, ptr %out.elt2.ptr, align 2
+ %out.elt3.ptr = getelementptr i8, ptr %out.vec.ptr, i64 3
+ store i8 %in.elt, ptr %out.elt3.ptr, align 1
+ %out.elt4.ptr = getelementptr i8, ptr %out.vec.ptr, i64 4
+ store i8 %in.elt, ptr %out.elt4.ptr, align 4
+ %out.elt5.ptr = getelementptr i8, ptr %out.vec.ptr, i64 5
+ store i8 %in.elt, ptr %out.elt5.ptr, align 1
+ %out.elt6.ptr = getelementptr i8, ptr %out.vec.ptr, i64 6
+ store i8 %in.elt, ptr %out.elt6.ptr, align 2
+ %out.elt7.ptr = getelementptr i8, ptr %out.vec.ptr, i64 7
+ store i8 %in.elt, ptr %out.elt7.ptr, align 1
+ %out.elt8.ptr = getelementptr i8, ptr %out.vec.ptr, i64 8
+ store i8 %in.elt, ptr %out.elt8.ptr, align 8
+ %out.elt9.ptr = getelementptr i8, ptr %out.vec.ptr, i64 9
+ store i8 %in.elt, ptr %out.elt9.ptr, align 1
+ %out.elt10.ptr = getelementptr i8, ptr %out.vec.ptr, i64 10
+ store i8 %in.elt, ptr %out.elt10.ptr, align 2
+ %out.elt11.ptr = getelementptr i8, ptr %out.vec.ptr, i64 11
+ store i8 %in.elt, ptr %out.elt11.ptr, align 1
+ %out.elt12.ptr = getelementptr i8, ptr %out.vec.ptr, i64 12
+ store i8 %in.elt, ptr %out.elt12.ptr, align 4
+ %out.elt13.ptr = getelementptr i8, ptr %out.vec.ptr, i64 13
+ store i8 %in.elt, ptr %out.elt13.ptr, align 1
+ %out.elt14.ptr = getelementptr i8, ptr %out.vec.ptr, i64 14
+ store i8 %in.elt, ptr %out.elt14.ptr, align 2
+ %out.elt15.ptr = getelementptr i8, ptr %out.vec.ptr, i64 15
+ store i8 %in.elt, ptr %out.elt15.ptr, align 1
+ ret void
+}
+
+define void @vec128_i16(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec128_i16:
+; ALL: # %bb.0:
+; ALL-NEXT: movl (%rdi), %eax
+; ALL-NEXT: notl %eax
+; ALL-NEXT: movw %ax, (%rsi)
+; ALL-NEXT: movw %ax, 2(%rsi)
+; ALL-NEXT: movw %ax, 4(%rsi)
+; ALL-NEXT: movw %ax, 6(%rsi)
+; ALL-NEXT: movw %ax, 8(%rsi)
+; ALL-NEXT: movw %ax, 10(%rsi)
+; ALL-NEXT: movw %ax, 12(%rsi)
+; ALL-NEXT: movw %ax, 14(%rsi)
+; ALL-NEXT: retq
+ %in.elt.not = load i16, ptr %in.elt.ptr, align 64
+ %in.elt = xor i16 %in.elt.not, -1
+ %out.elt0.ptr = getelementptr i16, ptr %out.vec.ptr, i64 0
+ store i16 %in.elt, ptr %out.elt0.ptr, align 64
+ %out.elt1.ptr = getelementptr i16, ptr %out.vec.ptr, i64 1
+ store i16 %in.elt, ptr %out.elt1.ptr, align 2
+ %out.elt2.ptr = getelementptr i16, ptr %out.vec.ptr, i64 2
+ store i16 %in.elt, ptr %out.elt2.ptr, align 4
+ %out.elt3.ptr = getelementptr i16, ptr %out.vec.ptr, i64 3
+ store i16 %in.elt, ptr %out.elt3.ptr, align 2
+ %out.elt4.ptr = getelementptr i16, ptr %out.vec.ptr, i64 4
+ store i16 %in.elt, ptr %out.elt4.ptr, align 8
+ %out.elt5.ptr = getelementptr i16, ptr %out.vec.ptr, i64 5
+ store i16 %in.elt, ptr %out.elt5.ptr, align 2
+ %out.elt6.ptr = getelementptr i16, ptr %out.vec.ptr, i64 6
+ store i16 %in.elt, ptr %out.elt6.ptr, align 4
+ %out.elt7.ptr = getelementptr i16, ptr %out.vec.ptr, i64 7
+ store i16 %in.elt, ptr %out.elt7.ptr, align 2
+ ret void
+}
+
+define void @vec128_i32(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec128_i32:
+; ALL: # %bb.0:
+; ALL-NEXT: movl (%rdi), %eax
+; ALL-NEXT: notl %eax
+; ALL-NEXT: movl %eax, (%rsi)
+; ALL-NEXT: movl %eax, 4(%rsi)
+; ALL-NEXT: movl %eax, 8(%rsi)
+; ALL-NEXT: movl %eax, 12(%rsi)
+; ALL-NEXT: retq
+ %in.elt.not = load i32, ptr %in.elt.ptr, align 64
+ %in.elt = xor i32 %in.elt.not, -1
+ %out.elt0.ptr = getelementptr i32, ptr %out.vec.ptr, i64 0
+ store i32 %in.elt, ptr %out.elt0.ptr, align 64
+ %out.elt1.ptr = getelementptr i32, ptr %out.vec.ptr, i64 1
+ store i32 %in.elt, ptr %out.elt1.ptr, align 4
+ %out.elt2.ptr = getelementptr i32, ptr %out.vec.ptr, i64 2
+ store i32 %in.elt, ptr %out.elt2.ptr, align 8
+ %out.elt3.ptr = getelementptr i32, ptr %out.vec.ptr, i64 3
+ store i32 %in.elt, ptr %out.elt3.ptr, align 4
+ ret void
+}
+
+define void @vec128_float(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec128_float:
+; ALL: # %bb.0:
+; ALL-NEXT: movl (%rdi), %eax
+; ALL-NEXT: notl %eax
+; ALL-NEXT: movl %eax, (%rsi)
+; ALL-NEXT: movl %eax, 4(%rsi)
+; ALL-NEXT: movl %eax, 8(%rsi)
+; ALL-NEXT: movl %eax, 12(%rsi)
+; ALL-NEXT: retq
+ %in.elt.not = load i32, ptr %in.elt.ptr, align 64
+ %in.elt.int = xor i32 %in.elt.not, -1
+ %in.elt = bitcast i32 %in.elt.int to float
+ %out.elt0.ptr = getelementptr float, ptr %out.vec.ptr, i64 0
+ store float %in.elt, ptr %out.elt0.ptr, align 64
+ %out.elt1.ptr = getelementptr float, ptr %out.vec.ptr, i64 1
+ store float %in.elt, ptr %out.elt1.ptr, align 4
+ %out.elt2.ptr = getelementptr float, ptr %out.vec.ptr, i64 2
+ store float %in.elt, ptr %out.elt2.ptr, align 8
+ %out.elt3.ptr = getelementptr float, ptr %out.vec.ptr, i64 3
+ store float %in.elt, ptr %out.elt3.ptr, align 4
+ ret void
+}
+
+define void @vec128_i64(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec128_i64:
+; ALL: # %bb.0:
+; ALL-NEXT: movq (%rdi), %rax
+; ALL-NEXT: notq %rax
+; ALL-NEXT: movq %rax, (%rsi)
+; ALL-NEXT: movq %rax, 8(%rsi)
+; ALL-NEXT: retq
+ %in.elt.not = load i64, ptr %in.elt.ptr, align 64
+ %in.elt = xor i64 %in.elt.not, -1
+ %out.elt0.ptr = getelementptr i64, ptr %out.vec.ptr, i64 0
+ store i64 %in.elt, ptr %out.elt0.ptr, align 64
+ %out.elt1.ptr = getelementptr i64, ptr %out.vec.ptr, i64 1
+ store i64 %in.elt, ptr %out.elt1.ptr, align 8
+ ret void
+}
+
+define void @vec128_double(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec128_double:
+; ALL: # %bb.0:
+; ALL-NEXT: movq (%rdi), %rax
+; ALL-NEXT: notq %rax
+; ALL-NEXT: movq %rax, (%rsi)
+; ALL-NEXT: movq %rax, 8(%rsi)
+; ALL-NEXT: retq
+ %in.elt.not = load i64, ptr %in.elt.ptr, align 64
+ %in.elt.int = xor i64 %in.elt.not, -1
+ %in.elt = bitcast i64 %in.elt.int to double
+ %out.elt0.ptr = getelementptr double, ptr %out.vec.ptr, i64 0
+ store double %in.elt, ptr %out.elt0.ptr, align 64
+ %out.elt1.ptr = getelementptr double, ptr %out.vec.ptr, i64 1
+ store double %in.elt, ptr %out.elt1.ptr, align 8
+ ret void
+}
+
+define void @vec256_i8(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec256_i8:
+; ALL: # %bb.0:
+; ALL-NEXT: movzbl (%rdi), %eax
+; ALL-NEXT: notb %al
+; ALL-NEXT: movb %al, (%rsi)
+; ALL-NEXT: movb %al, 1(%rsi)
+; ALL-NEXT: movb %al, 2(%rsi)
+; ALL-NEXT: movb %al, 3(%rsi)
+; ALL-NEXT: movb %al, 4(%rsi)
+; ALL-NEXT: movb %al, 5(%rsi)
+; ALL-NEXT: movb %al, 6(%rsi)
+; ALL-NEXT: movb %al, 7(%rsi)
+; ALL-NEXT: movb %al, 8(%rsi)
+; ALL-NEXT: movb %al, 9(%rsi)
+; ALL-NEXT: movb %al, 10(%rsi)
+; ALL-NEXT: movb %al, 11(%rsi)
+; ALL-NEXT: movb %al, 12(%rsi)
+; ALL-NEXT: movb %al, 13(%rsi)
+; ALL-NEXT: movb %al, 14(%rsi)
+; ALL-NEXT: movb %al, 15(%rsi)
+; ALL-NEXT: movb %al, 16(%rsi)
+; ALL-NEXT: movb %al, 17(%rsi)
+; ALL-NEXT: movb %al, 18(%rsi)
+; ALL-NEXT: movb %al, 19(%rsi)
+; ALL-NEXT: movb %al, 20(%rsi)
+; ALL-NEXT: movb %al, 21(%rsi)
+; ALL-NEXT: movb %al, 22(%rsi)
+; ALL-NEXT: movb %al, 23(%rsi)
+; ALL-NEXT: movb %al, 24(%rsi)
+; ALL-NEXT: movb %al, 25(%rsi)
+; ALL-NEXT: movb %al, 26(%rsi)
+; ALL-NEXT: movb %al, 27(%rsi)
+; ALL-NEXT: movb %al, 28(%rsi)
+; ALL-NEXT: movb %al, 29(%rsi)
+; ALL-NEXT: movb %al, 30(%rsi)
+; ALL-NEXT: movb %al, 31(%rsi)
+; ALL-NEXT: retq
+ %in.elt.not = load i8, ptr %in.elt.ptr, align 64
+ %in.elt = xor i8 %in.elt.not, -1
+ %out.elt0.ptr = getelementptr i8, ptr %out.vec.ptr, i64 0
+ store i8 %in.elt, ptr %out.elt0.ptr, align 64
+ %out.elt1.ptr = getelementptr i8, ptr %out.vec.ptr, i64 1
+ store i8 %in.elt, ptr %out.elt1.ptr, align 1
+ %out.elt2.ptr = getelementptr i8, ptr %out.vec.ptr, i64 2
+ store i8 %in.elt, ptr %out.elt2.ptr, align 2
+ %out.elt3.ptr = getelementptr i8, ptr %out.vec.ptr, i64 3
+ store i8 %in.elt, ptr %out.elt3.ptr, align 1
+ %out.elt4.ptr = getelementptr i8, ptr %out.vec.ptr, i64 4
+ store i8 %in.elt, ptr %out.elt4.ptr, align 4
+ %out.elt5.ptr = getelementptr i8, ptr %out.vec.ptr, i64 5
+ store i8 %in.elt, ptr %out.elt5.ptr, align 1
+ %out.elt6.ptr = getelementptr i8, ptr %out.vec.ptr, i64 6
+ store i8 %in.elt, ptr %out.elt6.ptr, align 2
+ %out.elt7.ptr = getelementptr i8, ptr %out.vec.ptr, i64 7
+ store i8 %in.elt, ptr %out.elt7.ptr, align 1
+ %out.elt8.ptr = getelementptr i8, ptr %out.vec.ptr, i64 8
+ store i8 %in.elt, ptr %out.elt8.ptr, align 8
+ %out.elt9.ptr = getelementptr i8, ptr %out.vec.ptr, i64 9
+ store i8 %in.elt, ptr %out.elt9.ptr, align 1
+ %out.elt10.ptr = getelementptr i8, ptr %out.vec.ptr, i64 10
+ store i8 %in.elt, ptr %out.elt10.ptr, align 2
+ %out.elt11.ptr = getelementptr i8, ptr %out.vec.ptr, i64 11
+ store i8 %in.elt, ptr %out.elt11.ptr, align 1
+ %out.elt12.ptr = getelementptr i8, ptr %out.vec.ptr, i64 12
+ store i8 %in.elt, ptr %out.elt12.ptr, align 4
+ %out.elt13.ptr = getelementptr i8, ptr %out.vec.ptr, i64 13
+ store i8 %in.elt, ptr %out.elt13.ptr, align 1
+ %out.elt14.ptr = getelementptr i8, ptr %out.vec.ptr, i64 14
+ store i8 %in.elt, ptr %out.elt14.ptr, align 2
+ %out.elt15.ptr = getelementptr i8, ptr %out.vec.ptr, i64 15
+ store i8 %in.elt, ptr %out.elt15.ptr, align 1
+ %out.elt16.ptr = getelementptr i8, ptr %out.vec.ptr, i64 16
+ store i8 %in.elt, ptr %out.elt16.ptr, align 16
+ %out.elt17.ptr = getelementptr i8, ptr %out.vec.ptr, i64 17
+ store i8 %in.elt, ptr %out.elt17.ptr, align 1
+ %out.elt18.ptr = getelementptr i8, ptr %out.vec.ptr, i64 18
+ store i8 %in.elt, ptr %out.elt18.ptr, align 2
+ %out.elt19.ptr = getelementptr i8, ptr %out.vec.ptr, i64 19
+ store i8 %in.elt, ptr %out.elt19.ptr, align 1
+ %out.elt20.ptr = getelementptr i8, ptr %out.vec.ptr, i64 20
+ store i8 %in.elt, ptr %out.elt20.ptr, align 4
+ %out.elt21.ptr = getelementptr i8, ptr %out.vec.ptr, i64 21
+ store i8 %in.elt, ptr %out.elt21.ptr, align 1
+ %out.elt22.ptr = getelementptr i8, ptr %out.vec.ptr, i64 22
+ store i8 %in.elt, ptr %out.elt22.ptr, align 2
+ %out.elt23.ptr = getelementptr i8, ptr %out.vec.ptr, i64 23
+ store i8 %in.elt, ptr %out.elt23.ptr, align 1
+ %out.elt24.ptr = getelementptr i8, ptr %out.vec.ptr, i64 24
+ store i8 %in.elt, ptr %out.elt24.ptr, align 8
+ %out.elt25.ptr = getelementptr i8, ptr %out.vec.ptr, i64 25
+ store i8 %in.elt, ptr %out.elt25.ptr, align 1
+ %out.elt26.ptr = getelementptr i8, ptr %out.vec.ptr, i64 26
+ store i8 %in.elt, ptr %out.elt26.ptr, align 2
+ %out.elt27.ptr = getelementptr i8, ptr %out.vec.ptr, i64 27
+ store i8 %in.elt, ptr %out.elt27.ptr, align 1
+ %out.elt28.ptr = getelementptr i8, ptr %out.vec.ptr, i64 28
+ store i8 %in.elt, ptr %out.elt28.ptr, align 4
+ %out.elt29.ptr = getelementptr i8, ptr %out.vec.ptr, i64 29
+ store i8 %in.elt, ptr %out.elt29.ptr, align 1
+ %out.elt30.ptr = getelementptr i8, ptr %out.vec.ptr, i64 30
+ store i8 %in.elt, ptr %out.elt30.ptr, align 2
+ %out.elt31.ptr = getelementptr i8, ptr %out.vec.ptr, i64 31
+ store i8 %in.elt, ptr %out.elt31.ptr, align 1
+ ret void
+}
+
+define void @vec256_i16(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec256_i16:
+; ALL: # %bb.0:
+; ALL-NEXT: movl (%rdi), %eax
+; ALL-NEXT: notl %eax
+; ALL-NEXT: movw %ax, (%rsi)
+; ALL-NEXT: movw %ax, 2(%rsi)
+; ALL-NEXT: movw %ax, 4(%rsi)
+; ALL-NEXT: movw %ax, 6(%rsi)
+; ALL-NEXT: movw %ax, 8(%rsi)
+; ALL-NEXT: movw %ax, 10(%rsi)
+; ALL-NEXT: movw %ax, 12(%rsi)
+; ALL-NEXT: movw %ax, 14(%rsi)
+; ALL-NEXT: movw %ax, 16(%rsi)
+; ALL-NEXT: movw %ax, 18(%rsi)
+; ALL-NEXT: movw %ax, 20(%rsi)
+; ALL-NEXT: movw %ax, 22(%rsi)
+; ALL-NEXT: movw %ax, 24(%rsi)
+; ALL-NEXT: movw %ax, 26(%rsi)
+; ALL-NEXT: movw %ax, 28(%rsi)
+; ALL-NEXT: movw %ax, 30(%rsi)
+; ALL-NEXT: retq
+ %in.elt.not = load i16, ptr %in.elt.ptr, align 64
+ %in.elt = xor i16 %in.elt.not, -1
+ %out.elt0.ptr = getelementptr i16, ptr %out.vec.ptr, i64 0
+ store i16 %in.elt, ptr %out.elt0.ptr, align 64
+ %out.elt1.ptr = getelementptr i16, ptr %out.vec.ptr, i64 1
+ store i16 %in.elt, ptr %out.elt1.ptr, align 2
+ %out.elt2.ptr = getelementptr i16, ptr %out.vec.ptr, i64 2
+ store i16 %in.elt, ptr %out.elt2.ptr, align 4
+ %out.elt3.ptr = getelementptr i16, ptr %out.vec.ptr, i64 3
+ store i16 %in.elt, ptr %out.elt3.ptr, align 2
+ %out.elt4.ptr = getelementptr i16, ptr %out.vec.ptr, i64 4
+ store i16 %in.elt, ptr %out.elt4.ptr, align 8
+ %out.elt5.ptr = getelementptr i16, ptr %out.vec.ptr, i64 5
+ store i16 %in.elt, ptr %out.elt5.ptr, align 2
+ %out.elt6.ptr = getelementptr i16, ptr %out.vec.ptr, i64 6
+ store i16 %in.elt, ptr %out.elt6.ptr, align 4
+ %out.elt7.ptr = getelementptr i16, ptr %out.vec.ptr, i64 7
+ store i16 %in.elt, ptr %out.elt7.ptr, align 2
+ %out.elt8.ptr = getelementptr i16, ptr %out.vec.ptr, i64 8
+ store i16 %in.elt, ptr %out.elt8.ptr, align 16
+ %out.elt9.ptr = getelementptr i16, ptr %out.vec.ptr, i64 9
+ store i16 %in.elt, ptr %out.elt9.ptr, align 2
+ %out.elt10.ptr = getelementptr i16, ptr %out.vec.ptr, i64 10
+ store i16 %in.elt, ptr %out.elt10.ptr, align 4
+ %out.elt11.ptr = getelementptr i16, ptr %out.vec.ptr, i64 11
+ store i16 %in.elt, ptr %out.elt11.ptr, align 2
+ %out.elt12.ptr = getelementptr i16, ptr %out.vec.ptr, i64 12
+ store i16 %in.elt, ptr %out.elt12.ptr, align 8
+ %out.elt13.ptr = getelementptr i16, ptr %out.vec.ptr, i64 13
+ store i16 %in.elt, ptr %out.elt13.ptr, align 2
+ %out.elt14.ptr = getelementptr i16, ptr %out.vec.ptr, i64 14
+ store i16 %in.elt, ptr %out.elt14.ptr, align 4
+ %out.elt15.ptr = getelementptr i16, ptr %out.vec.ptr, i64 15
+ store i16 %in.elt, ptr %out.elt15.ptr, align 2
+ ret void
+}
+
+define void @vec256_i32(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec256_i32:
+; ALL: # %bb.0:
+; ALL-NEXT: movl (%rdi), %eax
+; ALL-NEXT: notl %eax
+; ALL-NEXT: movl %eax, (%rsi)
+; ALL-NEXT: movl %eax, 4(%rsi)
+; ALL-NEXT: movl %eax, 8(%rsi)
+; ALL-NEXT: movl %eax, 12(%rsi)
+; ALL-NEXT: movl %eax, 16(%rsi)
+; ALL-NEXT: movl %eax, 20(%rsi)
+; ALL-NEXT: movl %eax, 24(%rsi)
+; ALL-NEXT: movl %eax, 28(%rsi)
+; ALL-NEXT: retq
+ %in.elt.not = load i32, ptr %in.elt.ptr, align 64
+ %in.elt = xor i32 %in.elt.not, -1
+ %out.elt0.ptr = getelementptr i32, ptr %out.vec.ptr, i64 0
+ store i32 %in.elt, ptr %out.elt0.ptr, align 64
+ %out.elt1.ptr = getelementptr i32, ptr %out.vec.ptr, i64 1
+ store i32 %in.elt, ptr %out.elt1.ptr, align 4
+ %out.elt2.ptr = getelementptr i32, ptr %out.vec.ptr, i64 2
+ store i32 %in.elt, ptr %out.elt2.ptr, align 8
+ %out.elt3.ptr = getelementptr i32, ptr %out.vec.ptr, i64 3
+ store i32 %in.elt, ptr %out.elt3.ptr, align 4
+ %out.elt4.ptr = getelementptr i32, ptr %out.vec.ptr, i64 4
+ store i32 %in.elt, ptr %out.elt4.ptr, align 16
+ %out.elt5.ptr = getelementptr i32, ptr %out.vec.ptr, i64 5
+ store i32 %in.elt, ptr %out.elt5.ptr, align 4
+ %out.elt6.ptr = getelementptr i32, ptr %out.vec.ptr, i64 6
+ store i32 %in.elt, ptr %out.elt6.ptr, align 8
+ %out.elt7.ptr = getelementptr i32, ptr %out.vec.ptr, i64 7
+ store i32 %in.elt, ptr %out.elt7.ptr, align 4
+ ret void
+}
+
+define void @vec256_float(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec256_float:
+; ALL: # %bb.0:
+; ALL-NEXT: movl (%rdi), %eax
+; ALL-NEXT: notl %eax
+; ALL-NEXT: movl %eax, (%rsi)
+; ALL-NEXT: movl %eax, 4(%rsi)
+; ALL-NEXT: movl %eax, 8(%rsi)
+; ALL-NEXT: movl %eax, 12(%rsi)
+; ALL-NEXT: movl %eax, 16(%rsi)
+; ALL-NEXT: movl %eax, 20(%rsi)
+; ALL-NEXT: movl %eax, 24(%rsi)
+; ALL-NEXT: movl %eax, 28(%rsi)
+; ALL-NEXT: retq
+ %in.elt.not = load i32, ptr %in.elt.ptr, align 64
+ %in.elt.int = xor i32 %in.elt.not, -1
+ %in.elt = bitcast i32 %in.elt.int to float
+ %out.elt0.ptr = getelementptr float, ptr %out.vec.ptr, i64 0
+ store float %in.elt, ptr %out.elt0.ptr, align 64
+ %out.elt1.ptr = getelementptr float, ptr %out.vec.ptr, i64 1
+ store float %in.elt, ptr %out.elt1.ptr, align 4
+ %out.elt2.ptr = getelementptr float, ptr %out.vec.ptr, i64 2
+ store float %in.elt, ptr %out.elt2.ptr, align 8
+ %out.elt3.ptr = getelementptr float, ptr %out.vec.ptr, i64 3
+ store float %in.elt, ptr %out.elt3.ptr, align 4
+ %out.elt4.ptr = getelementptr float, ptr %out.vec.ptr, i64 4
+ store float %in.elt, ptr %out.elt4.ptr, align 16
+ %out.elt5.ptr = getelementptr float, ptr %out.vec.ptr, i64 5
+ store float %in.elt, ptr %out.elt5.ptr, align 4
+ %out.elt6.ptr = getelementptr float, ptr %out.vec.ptr, i64 6
+ store float %in.elt, ptr %out.elt6.ptr, align 8
+ %out.elt7.ptr = getelementptr float, ptr %out.vec.ptr, i64 7
+ store float %in.elt, ptr %out.elt7.ptr, align 4
+ ret void
+}
+
+define void @vec256_i64(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec256_i64:
+; ALL: # %bb.0:
+; ALL-NEXT: movq (%rdi), %rax
+; ALL-NEXT: notq %rax
+; ALL-NEXT: movq %rax, (%rsi)
+; ALL-NEXT: movq %rax, 8(%rsi)
+; ALL-NEXT: movq %rax, 16(%rsi)
+; ALL-NEXT: movq %rax, 24(%rsi)
+; ALL-NEXT: retq
+ %in.elt.not = load i64, ptr %in.elt.ptr, align 64
+ %in.elt = xor i64 %in.elt.not, -1
+ %out.elt0.ptr = getelementptr i64, ptr %out.vec.ptr, i64 0
+ store i64 %in.elt, ptr %out.elt0.ptr, align 64
+ %out.elt1.ptr = getelementptr i64, ptr %out.vec.ptr, i64 1
+ store i64 %in.elt, ptr %out.elt1.ptr, align 8
+ %out.elt2.ptr = getelementptr i64, ptr %out.vec.ptr, i64 2
+ store i64 %in.elt, ptr %out.elt2.ptr, align 16
+ %out.elt3.ptr = getelementptr i64, ptr %out.vec.ptr, i64 3
+ store i64 %in.elt, ptr %out.elt3.ptr, align 8
+ ret void
+}
+
+define void @vec256_double(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec256_double:
+; ALL: # %bb.0:
+; ALL-NEXT: movq (%rdi), %rax
+; ALL-NEXT: notq %rax
+; ALL-NEXT: movq %rax, (%rsi)
+; ALL-NEXT: movq %rax, 8(%rsi)
+; ALL-NEXT: movq %rax, 16(%rsi)
+; ALL-NEXT: movq %rax, 24(%rsi)
+; ALL-NEXT: retq
+ %in.elt.not = load i64, ptr %in.elt.ptr, align 64
+ %in.elt.int = xor i64 %in.elt.not, -1
+ %in.elt = bitcast i64 %in.elt.int to double
+ %out.elt0.ptr = getelementptr double, ptr %out.vec.ptr, i64 0
+ store double %in.elt, ptr %out.elt0.ptr, align 64
+ %out.elt1.ptr = getelementptr double, ptr %out.vec.ptr, i64 1
+ store double %in.elt, ptr %out.elt1.ptr, align 8
+ %out.elt2.ptr = getelementptr double, ptr %out.vec.ptr, i64 2
+ store double %in.elt, ptr %out.elt2.ptr, align 16
+ %out.elt3.ptr = getelementptr double, ptr %out.vec.ptr, i64 3
+ store double %in.elt, ptr %out.elt3.ptr, align 8
+ ret void
+}
+
+define void @vec256_i128(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec256_i128:
+; ALL: # %bb.0:
+; ALL-NEXT: movq (%rdi), %rax
+; ALL-NEXT: movq 8(%rdi), %rcx
+; ALL-NEXT: notq %rcx
+; ALL-NEXT: notq %rax
+; ALL-NEXT: movq %rax, (%rsi)
+; ALL-NEXT: movq %rcx, 8(%rsi)
+; ALL-NEXT: movq %rcx, 24(%rsi)
+; ALL-NEXT: movq %rax, 16(%rsi)
+; ALL-NEXT: retq
+ %in.elt.not = load i128, ptr %in.elt.ptr, align 64
+ %in.elt = xor i128 %in.elt.not, -1
+ %out.elt0.ptr = getelementptr i128, ptr %out.vec.ptr, i64 0
+ store i128 %in.elt, ptr %out.elt0.ptr, align 64
+ %out.elt1.ptr = getelementptr i128, ptr %out.vec.ptr, i64 1
+ store i128 %in.elt, ptr %out.elt1.ptr, align 16
+ ret void
+}
+
+define void @vec384_i8(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec384_i8:
+; ALL: # %bb.0:
+; ALL-NEXT: movzbl (%rdi), %eax
+; ALL-NEXT: notb %al
+; ALL-NEXT: movb %al, (%rsi)
+; ALL-NEXT: movb %al, 1(%rsi)
+; ALL-NEXT: movb %al, 2(%rsi)
+; ALL-NEXT: movb %al, 3(%rsi)
+; ALL-NEXT: movb %al, 4(%rsi)
+; ALL-NEXT: movb %al, 5(%rsi)
+; ALL-NEXT: movb %al, 6(%rsi)
+; ALL-NEXT: movb %al, 7(%rsi)
+; ALL-NEXT: movb %al, 8(%rsi)
+; ALL-NEXT: movb %al, 9(%rsi)
+; ALL-NEXT: movb %al, 10(%rsi)
+; ALL-NEXT: movb %al, 11(%rsi)
+; ALL-NEXT: movb %al, 12(%rsi)
+; ALL-NEXT: movb %al, 13(%rsi)
+; ALL-NEXT: movb %al, 14(%rsi)
+; ALL-NEXT: movb %al, 15(%rsi)
+; ALL-NEXT: movb %al, 16(%rsi)
+; ALL-NEXT: movb %al, 17(%rsi)
+; ALL-NEXT: movb %al, 18(%rsi)
+; ALL-NEXT: movb %al, 19(%rsi)
+; ALL-NEXT: movb %al, 20(%rsi)
+; ALL-NEXT: movb %al, 21(%rsi)
+; ALL-NEXT: movb %al, 22(%rsi)
+; ALL-NEXT: movb %al, 23(%rsi)
+; ALL-NEXT: movb %al, 24(%rsi)
+; ALL-NEXT: movb %al, 25(%rsi)
+; ALL-NEXT: movb %al, 26(%rsi)
+; ALL-NEXT: movb %al, 27(%rsi)
+; ALL-NEXT: movb %al, 28(%rsi)
+; ALL-NEXT: movb %al, 29(%rsi)
+; ALL-NEXT: movb %al, 30(%rsi)
+; ALL-NEXT: movb %al, 31(%rsi)
+; ALL-NEXT: movb %al, 32(%rsi)
+; ALL-NEXT: movb %al, 33(%rsi)
+; ALL-NEXT: movb %al, 34(%rsi)
+; ALL-NEXT: movb %al, 35(%rsi)
+; ALL-NEXT: movb %al, 36(%rsi)
+; ALL-NEXT: movb %al, 37(%rsi)
+; ALL-NEXT: movb %al, 38(%rsi)
+; ALL-NEXT: movb %al, 39(%rsi)
+; ALL-NEXT: movb %al, 40(%rsi)
+; ALL-NEXT: movb %al, 41(%rsi)
+; ALL-NEXT: movb %al, 42(%rsi)
+; ALL-NEXT: movb %al, 43(%rsi)
+; ALL-NEXT: movb %al, 44(%rsi)
+; ALL-NEXT: movb %al, 45(%rsi)
+; ALL-NEXT: movb %al, 46(%rsi)
+; ALL-NEXT: movb %al, 47(%rsi)
+; ALL-NEXT: retq
+ %in.elt.not = load i8, ptr %in.elt.ptr, align 64
+ %in.elt = xor i8 %in.elt.not, -1
+ %out.elt0.ptr = getelementptr i8, ptr %out.vec.ptr, i64 0
+ store i8 %in.elt, ptr %out.elt0.ptr, align 64
+ %out.elt1.ptr = getelementptr i8, ptr %out.vec.ptr, i64 1
+ store i8 %in.elt, ptr %out.elt1.ptr, align 1
+ %out.elt2.ptr = getelementptr i8, ptr %out.vec.ptr, i64 2
+ store i8 %in.elt, ptr %out.elt2.ptr, align 2
+ %out.elt3.ptr = getelementptr i8, ptr %out.vec.ptr, i64 3
+ store i8 %in.elt, ptr %out.elt3.ptr, align 1
+ %out.elt4.ptr = getelementptr i8, ptr %out.vec.ptr, i64 4
+ store i8 %in.elt, ptr %out.elt4.ptr, align 4
+ %out.elt5.ptr = getelementptr i8, ptr %out.vec.ptr, i64 5
+ store i8 %in.elt, ptr %out.elt5.ptr, align 1
+ %out.elt6.ptr = getelementptr i8, ptr %out.vec.ptr, i64 6
+ store i8 %in.elt, ptr %out.elt6.ptr, align 2
+ %out.elt7.ptr = getelementptr i8, ptr %out.vec.ptr, i64 7
+ store i8 %in.elt, ptr %out.elt7.ptr, align 1
+ %out.elt8.ptr = getelementptr i8, ptr %out.vec.ptr, i64 8
+ store i8 %in.elt, ptr %out.elt8.ptr, align 8
+ %out.elt9.ptr = getelementptr i8, ptr %out.vec.ptr, i64 9
+ store i8 %in.elt, ptr %out.elt9.ptr, align 1
+ %out.elt10.ptr = getelementptr i8, ptr %out.vec.ptr, i64 10
+ store i8 %in.elt, ptr %out.elt10.ptr, align 2
+ %out.elt11.ptr = getelementptr i8, ptr %out.vec.ptr, i64 11
+ store i8 %in.elt, ptr %out.elt11.ptr, align 1
+ %out.elt12.ptr = getelementptr i8, ptr %out.vec.ptr, i64 12
+ store i8 %in.elt, ptr %out.elt12.ptr, align 4
+ %out.elt13.ptr = getelementptr i8, ptr %out.vec.ptr, i64 13
+ store i8 %in.elt, ptr %out.elt13.ptr, align 1
+ %out.elt14.ptr = getelementptr i8, ptr %out.vec.ptr, i64 14
+ store i8 %in.elt, ptr %out.elt14.ptr, align 2
+ %out.elt15.ptr = getelementptr i8, ptr %out.vec.ptr, i64 15
+ store i8 %in.elt, ptr %out.elt15.ptr, align 1
+ %out.elt16.ptr = getelementptr i8, ptr %out.vec.ptr, i64 16
+ store i8 %in.elt, ptr %out.elt16.ptr, align 16
+ %out.elt17.ptr = getelementptr i8, ptr %out.vec.ptr, i64 17
+ store i8 %in.elt, ptr %out.elt17.ptr, align 1
+ %out.elt18.ptr = getelementptr i8, ptr %out.vec.ptr, i64 18
+ store i8 %in.elt, ptr %out.elt18.ptr, align 2
+ %out.elt19.ptr = getelementptr i8, ptr %out.vec.ptr, i64 19
+ store i8 %in.elt, ptr %out.elt19.ptr, align 1
+ %out.elt20.ptr = getelementptr i8, ptr %out.vec.ptr, i64 20
+ store i8 %in.elt, ptr %out.elt20.ptr, align 4
+ %out.elt21.ptr = getelementptr i8, ptr %out.vec.ptr, i64 21
+ store i8 %in.elt, ptr %out.elt21.ptr, align 1
+ %out.elt22.ptr = getelementptr i8, ptr %out.vec.ptr, i64 22
+ store i8 %in.elt, ptr %out.elt22.ptr, align 2
+ %out.elt23.ptr = getelementptr i8, ptr %out.vec.ptr, i64 23
+ store i8 %in.elt, ptr %out.elt23.ptr, align 1
+ %out.elt24.ptr = getelementptr i8, ptr %out.vec.ptr, i64 24
+ store i8 %in.elt, ptr %out.elt24.ptr, align 8
+ %out.elt25.ptr = getelementptr i8, ptr %out.vec.ptr, i64 25
+ store i8 %in.elt, ptr %out.elt25.ptr, align 1
+ %out.elt26.ptr = getelementptr i8, ptr %out.vec.ptr, i64 26
+ store i8 %in.elt, ptr %out.elt26.ptr, align 2
+ %out.elt27.ptr = getelementptr i8, ptr %out.vec.ptr, i64 27
+ store i8 %in.elt, ptr %out.elt27.ptr, align 1
+ %out.elt28.ptr = getelementptr i8, ptr %out.vec.ptr, i64 28
+ store i8 %in.elt, ptr %out.elt28.ptr, align 4
+ %out.elt29.ptr = getelementptr i8, ptr %out.vec.ptr, i64 29
+ store i8 %in.elt, ptr %out.elt29.ptr, align 1
+ %out.elt30.ptr = getelementptr i8, ptr %out.vec.ptr, i64 30
+ store i8 %in.elt, ptr %out.elt30.ptr, align 2
+ %out.elt31.ptr = getelementptr i8, ptr %out.vec.ptr, i64 31
+ store i8 %in.elt, ptr %out.elt31.ptr, align 1
+ %out.elt32.ptr = getelementptr i8, ptr %out.vec.ptr, i64 32
+ store i8 %in.elt, ptr %out.elt32.ptr, align 32
+ %out.elt33.ptr = getelementptr i8, ptr %out.vec.ptr, i64 33
+ store i8 %in.elt, ptr %out.elt33.ptr, align 1
+ %out.elt34.ptr = getelementptr i8, ptr %out.vec.ptr, i64 34
+ store i8 %in.elt, ptr %out.elt34.ptr, align 2
+ %out.elt35.ptr = getelementptr i8, ptr %out.vec.ptr, i64 35
+ store i8 %in.elt, ptr %out.elt35.ptr, align 1
+ %out.elt36.ptr = getelementptr i8, ptr %out.vec.ptr, i64 36
+ store i8 %in.elt, ptr %out.elt36.ptr, align 4
+ %out.elt37.ptr = getelementptr i8, ptr %out.vec.ptr, i64 37
+ store i8 %in.elt, ptr %out.elt37.ptr, align 1
+ %out.elt38.ptr = getelementptr i8, ptr %out.vec.ptr, i64 38
+ store i8 %in.elt, ptr %out.elt38.ptr, align 2
+ %out.elt39.ptr = getelementptr i8, ptr %out.vec.ptr, i64 39
+ store i8 %in.elt, ptr %out.elt39.ptr, align 1
+ %out.elt40.ptr = getelementptr i8, ptr %out.vec.ptr, i64 40
+ store i8 %in.elt, ptr %out.elt40.ptr, align 8
+ %out.elt41.ptr = getelementptr i8, ptr %out.vec.ptr, i64 41
+ store i8 %in.elt, ptr %out.elt41.ptr, align 1
+ %out.elt42.ptr = getelementptr i8, ptr %out.vec.ptr, i64 42
+ store i8 %in.elt, ptr %out.elt42.ptr, align 2
+ %out.elt43.ptr = getelementptr i8, ptr %out.vec.ptr, i64 43
+ store i8 %in.elt, ptr %out.elt43.ptr, align 1
+ %out.elt44.ptr = getelementptr i8, ptr %out.vec.ptr, i64 44
+ store i8 %in.elt, ptr %out.elt44.ptr, align 4
+ %out.elt45.ptr = getelementptr i8, ptr %out.vec.ptr, i64 45
+ store i8 %in.elt, ptr %out.elt45.ptr, align 1
+ %out.elt46.ptr = getelementptr i8, ptr %out.vec.ptr, i64 46
+ store i8 %in.elt, ptr %out.elt46.ptr, align 2
+ %out.elt47.ptr = getelementptr i8, ptr %out.vec.ptr, i64 47
+ store i8 %in.elt, ptr %out.elt47.ptr, align 1
+ ret void
+}
+
+define void @vec384_i16(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec384_i16:
+; ALL: # %bb.0:
+; ALL-NEXT: movl (%rdi), %eax
+; ALL-NEXT: notl %eax
+; ALL-NEXT: movw %ax, (%rsi)
+; ALL-NEXT: movw %ax, 2(%rsi)
+; ALL-NEXT: movw %ax, 4(%rsi)
+; ALL-NEXT: movw %ax, 6(%rsi)
+; ALL-NEXT: movw %ax, 8(%rsi)
+; ALL-NEXT: movw %ax, 10(%rsi)
+; ALL-NEXT: movw %ax, 12(%rsi)
+; ALL-NEXT: movw %ax, 14(%rsi)
+; ALL-NEXT: movw %ax, 16(%rsi)
+; ALL-NEXT: movw %ax, 18(%rsi)
+; ALL-NEXT: movw %ax, 20(%rsi)
+; ALL-NEXT: movw %ax, 22(%rsi)
+; ALL-NEXT: movw %ax, 24(%rsi)
+; ALL-NEXT: movw %ax, 26(%rsi)
+; ALL-NEXT: movw %ax, 28(%rsi)
+; ALL-NEXT: movw %ax, 30(%rsi)
+; ALL-NEXT: movw %ax, 32(%rsi)
+; ALL-NEXT: movw %ax, 34(%rsi)
+; ALL-NEXT: movw %ax, 36(%rsi)
+; ALL-NEXT: movw %ax, 38(%rsi)
+; ALL-NEXT: movw %ax, 40(%rsi)
+; ALL-NEXT: movw %ax, 42(%rsi)
+; ALL-NEXT: movw %ax, 44(%rsi)
+; ALL-NEXT: movw %ax, 46(%rsi)
+; ALL-NEXT: retq
+ %in.elt.not = load i16, ptr %in.elt.ptr, align 64
+ %in.elt = xor i16 %in.elt.not, -1
+ %out.elt0.ptr = getelementptr i16, ptr %out.vec.ptr, i64 0
+ store i16 %in.elt, ptr %out.elt0.ptr, align 64
+ %out.elt1.ptr = getelementptr i16, ptr %out.vec.ptr, i64 1
+ store i16 %in.elt, ptr %out.elt1.ptr, align 2
+ %out.elt2.ptr = getelementptr i16, ptr %out.vec.ptr, i64 2
+ store i16 %in.elt, ptr %out.elt2.ptr, align 4
+ %out.elt3.ptr = getelementptr i16, ptr %out.vec.ptr, i64 3
+ store i16 %in.elt, ptr %out.elt3.ptr, align 2
+ %out.elt4.ptr = getelementptr i16, ptr %out.vec.ptr, i64 4
+ store i16 %in.elt, ptr %out.elt4.ptr, align 8
+ %out.elt5.ptr = getelementptr i16, ptr %out.vec.ptr, i64 5
+ store i16 %in.elt, ptr %out.elt5.ptr, align 2
+ %out.elt6.ptr = getelementptr i16, ptr %out.vec.ptr, i64 6
+ store i16 %in.elt, ptr %out.elt6.ptr, align 4
+ %out.elt7.ptr = getelementptr i16, ptr %out.vec.ptr, i64 7
+ store i16 %in.elt, ptr %out.elt7.ptr, align 2
+ %out.elt8.ptr = getelementptr i16, ptr %out.vec.ptr, i64 8
+ store i16 %in.elt, ptr %out.elt8.ptr, align 16
+ %out.elt9.ptr = getelementptr i16, ptr %out.vec.ptr, i64 9
+ store i16 %in.elt, ptr %out.elt9.ptr, align 2
+ %out.elt10.ptr = getelementptr i16, ptr %out.vec.ptr, i64 10
+ store i16 %in.elt, ptr %out.elt10.ptr, align 4
+ %out.elt11.ptr = getelementptr i16, ptr %out.vec.ptr, i64 11
+ store i16 %in.elt, ptr %out.elt11.ptr, align 2
+ %out.elt12.ptr = getelementptr i16, ptr %out.vec.ptr, i64 12
+ store i16 %in.elt, ptr %out.elt12.ptr, align 8
+ %out.elt13.ptr = getelementptr i16, ptr %out.vec.ptr, i64 13
+ store i16 %in.elt, ptr %out.elt13.ptr, align 2
+ %out.elt14.ptr = getelementptr i16, ptr %out.vec.ptr, i64 14
+ store i16 %in.elt, ptr %out.elt14.ptr, align 4
+ %out.elt15.ptr = getelementptr i16, ptr %out.vec.ptr, i64 15
+ store i16 %in.elt, ptr %out.elt15.ptr, align 2
+ %out.elt16.ptr = getelementptr i16, ptr %out.vec.ptr, i64 16
+ store i16 %in.elt, ptr %out.elt16.ptr, align 32
+ %out.elt17.ptr = getelementptr i16, ptr %out.vec.ptr, i64 17
+ store i16 %in.elt, ptr %out.elt17.ptr, align 2
+ %out.elt18.ptr = getelementptr i16, ptr %out.vec.ptr, i64 18
+ store i16 %in.elt, ptr %out.elt18.ptr, align 4
+ %out.elt19.ptr = getelementptr i16, ptr %out.vec.ptr, i64 19
+ store i16 %in.elt, ptr %out.elt19.ptr, align 2
+ %out.elt20.ptr = getelementptr i16, ptr %out.vec.ptr, i64 20
+ store i16 %in.elt, ptr %out.elt20.ptr, align 8
+ %out.elt21.ptr = getelementptr i16, ptr %out.vec.ptr, i64 21
+ store i16 %in.elt, ptr %out.elt21.ptr, align 2
+ %out.elt22.ptr = getelementptr i16, ptr %out.vec.ptr, i64 22
+ store i16 %in.elt, ptr %out.elt22.ptr, align 4
+ %out.elt23.ptr = getelementptr i16, ptr %out.vec.ptr, i64 23
+ store i16 %in.elt, ptr %out.elt23.ptr, align 2
+ ret void
+}
+
+define void @vec384_i32(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec384_i32:
+; ALL: # %bb.0:
+; ALL-NEXT: movl (%rdi), %eax
+; ALL-NEXT: notl %eax
+; ALL-NEXT: movl %eax, (%rsi)
+; ALL-NEXT: movl %eax, 4(%rsi)
+; ALL-NEXT: movl %eax, 8(%rsi)
+; ALL-NEXT: movl %eax, 12(%rsi)
+; ALL-NEXT: movl %eax, 16(%rsi)
+; ALL-NEXT: movl %eax, 20(%rsi)
+; ALL-NEXT: movl %eax, 24(%rsi)
+; ALL-NEXT: movl %eax, 28(%rsi)
+; ALL-NEXT: movl %eax, 32(%rsi)
+; ALL-NEXT: movl %eax, 36(%rsi)
+; ALL-NEXT: movl %eax, 40(%rsi)
+; ALL-NEXT: movl %eax, 44(%rsi)
+; ALL-NEXT: retq
+ %in.elt.not = load i32, ptr %in.elt.ptr, align 64
+ %in.elt = xor i32 %in.elt.not, -1
+ %out.elt0.ptr = getelementptr i32, ptr %out.vec.ptr, i64 0
+ store i32 %in.elt, ptr %out.elt0.ptr, align 64
+ %out.elt1.ptr = getelementptr i32, ptr %out.vec.ptr, i64 1
+ store i32 %in.elt, ptr %out.elt1.ptr, align 4
+ %out.elt2.ptr = getelementptr i32, ptr %out.vec.ptr, i64 2
+ store i32 %in.elt, ptr %out.elt2.ptr, align 8
+ %out.elt3.ptr = getelementptr i32, ptr %out.vec.ptr, i64 3
+ store i32 %in.elt, ptr %out.elt3.ptr, align 4
+ %out.elt4.ptr = getelementptr i32, ptr %out.vec.ptr, i64 4
+ store i32 %in.elt, ptr %out.elt4.ptr, align 16
+ %out.elt5.ptr = getelementptr i32, ptr %out.vec.ptr, i64 5
+ store i32 %in.elt, ptr %out.elt5.ptr, align 4
+ %out.elt6.ptr = getelementptr i32, ptr %out.vec.ptr, i64 6
+ store i32 %in.elt, ptr %out.elt6.ptr, align 8
+ %out.elt7.ptr = getelementptr i32, ptr %out.vec.ptr, i64 7
+ store i32 %in.elt, ptr %out.elt7.ptr, align 4
+ %out.elt8.ptr = getelementptr i32, ptr %out.vec.ptr, i64 8
+ store i32 %in.elt, ptr %out.elt8.ptr, align 32
+ %out.elt9.ptr = getelementptr i32, ptr %out.vec.ptr, i64 9
+ store i32 %in.elt, ptr %out.elt9.ptr, align 4
+ %out.elt10.ptr = getelementptr i32, ptr %out.vec.ptr, i64 10
+ store i32 %in.elt, ptr %out.elt10.ptr, align 8
+ %out.elt11.ptr = getelementptr i32, ptr %out.vec.ptr, i64 11
+ store i32 %in.elt, ptr %out.elt11.ptr, align 4
+ ret void
+}
+
+define void @vec384_float(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec384_float:
+; ALL: # %bb.0:
+; ALL-NEXT: movl (%rdi), %eax
+; ALL-NEXT: notl %eax
+; ALL-NEXT: movl %eax, (%rsi)
+; ALL-NEXT: movl %eax, 4(%rsi)
+; ALL-NEXT: movl %eax, 8(%rsi)
+; ALL-NEXT: movl %eax, 12(%rsi)
+; ALL-NEXT: movl %eax, 16(%rsi)
+; ALL-NEXT: movl %eax, 20(%rsi)
+; ALL-NEXT: movl %eax, 24(%rsi)
+; ALL-NEXT: movl %eax, 28(%rsi)
+; ALL-NEXT: movl %eax, 32(%rsi)
+; ALL-NEXT: movl %eax, 36(%rsi)
+; ALL-NEXT: movl %eax, 40(%rsi)
+; ALL-NEXT: movl %eax, 44(%rsi)
+; ALL-NEXT: retq
+ %in.elt.not = load i32, ptr %in.elt.ptr, align 64
+ %in.elt.int = xor i32 %in.elt.not, -1
+ %in.elt = bitcast i32 %in.elt.int to float
+ %out.elt0.ptr = getelementptr float, ptr %out.vec.ptr, i64 0
+ store float %in.elt, ptr %out.elt0.ptr, align 64
+ %out.elt1.ptr = getelementptr float, ptr %out.vec.ptr, i64 1
+ store float %in.elt, ptr %out.elt1.ptr, align 4
+ %out.elt2.ptr = getelementptr float, ptr %out.vec.ptr, i64 2
+ store float %in.elt, ptr %out.elt2.ptr, align 8
+ %out.elt3.ptr = getelementptr float, ptr %out.vec.ptr, i64 3
+ store float %in.elt, ptr %out.elt3.ptr, align 4
+ %out.elt4.ptr = getelementptr float, ptr %out.vec.ptr, i64 4
+ store float %in.elt, ptr %out.elt4.ptr, align 16
+ %out.elt5.ptr = getelementptr float, ptr %out.vec.ptr, i64 5
+ store float %in.elt, ptr %out.elt5.ptr, align 4
+ %out.elt6.ptr = getelementptr float, ptr %out.vec.ptr, i64 6
+ store float %in.elt, ptr %out.elt6.ptr, align 8
+ %out.elt7.ptr = getelementptr float, ptr %out.vec.ptr, i64 7
+ store float %in.elt, ptr %out.elt7.ptr, align 4
+ %out.elt8.ptr = getelementptr float, ptr %out.vec.ptr, i64 8
+ store float %in.elt, ptr %out.elt8.ptr, align 32
+ %out.elt9.ptr = getelementptr float, ptr %out.vec.ptr, i64 9
+ store float %in.elt, ptr %out.elt9.ptr, align 4
+ %out.elt10.ptr = getelementptr float, ptr %out.vec.ptr, i64 10
+ store float %in.elt, ptr %out.elt10.ptr, align 8
+ %out.elt11.ptr = getelementptr float, ptr %out.vec.ptr, i64 11
+ store float %in.elt, ptr %out.elt11.ptr, align 4
+ ret void
+}
+
+define void @vec384_i64(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec384_i64:
+; ALL: # %bb.0:
+; ALL-NEXT: movq (%rdi), %rax
+; ALL-NEXT: notq %rax
+; ALL-NEXT: movq %rax, (%rsi)
+; ALL-NEXT: movq %rax, 8(%rsi)
+; ALL-NEXT: movq %rax, 16(%rsi)
+; ALL-NEXT: movq %rax, 24(%rsi)
+; ALL-NEXT: movq %rax, 32(%rsi)
+; ALL-NEXT: movq %rax, 40(%rsi)
+; ALL-NEXT: retq
+ %in.elt.not = load i64, ptr %in.elt.ptr, align 64
+ %in.elt = xor i64 %in.elt.not, -1
+ %out.elt0.ptr = getelementptr i64, ptr %out.vec.ptr, i64 0
+ store i64 %in.elt, ptr %out.elt0.ptr, align 64
+ %out.elt1.ptr = getelementptr i64, ptr %out.vec.ptr, i64 1
+ store i64 %in.elt, ptr %out.elt1.ptr, align 8
+ %out.elt2.ptr = getelementptr i64, ptr %out.vec.ptr, i64 2
+ store i64 %in.elt, ptr %out.elt2.ptr, align 16
+ %out.elt3.ptr = getelementptr i64, ptr %out.vec.ptr, i64 3
+ store i64 %in.elt, ptr %out.elt3.ptr, align 8
+ %out.elt4.ptr = getelementptr i64, ptr %out.vec.ptr, i64 4
+ store i64 %in.elt, ptr %out.elt4.ptr, align 32
+ %out.elt5.ptr = getelementptr i64, ptr %out.vec.ptr, i64 5
+ store i64 %in.elt, ptr %out.elt5.ptr, align 8
+ ret void
+}
+
+define void @vec384_double(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec384_double:
+; ALL: # %bb.0:
+; ALL-NEXT: movq (%rdi), %rax
+; ALL-NEXT: notq %rax
+; ALL-NEXT: movq %rax, (%rsi)
+; ALL-NEXT: movq %rax, 8(%rsi)
+; ALL-NEXT: movq %rax, 16(%rsi)
+; ALL-NEXT: movq %rax, 24(%rsi)
+; ALL-NEXT: movq %rax, 32(%rsi)
+; ALL-NEXT: movq %rax, 40(%rsi)
+; ALL-NEXT: retq
+ %in.elt.not = load i64, ptr %in.elt.ptr, align 64
+ %in.elt.int = xor i64 %in.elt.not, -1
+ %in.elt = bitcast i64 %in.elt.int to double
+ %out.elt0.ptr = getelementptr double, ptr %out.vec.ptr, i64 0
+ store double %in.elt, ptr %out.elt0.ptr, align 64
+ %out.elt1.ptr = getelementptr double, ptr %out.vec.ptr, i64 1
+ store double %in.elt, ptr %out.elt1.ptr, align 8
+ %out.elt2.ptr = getelementptr double, ptr %out.vec.ptr, i64 2
+ store double %in.elt, ptr %out.elt2.ptr, align 16
+ %out.elt3.ptr = getelementptr double, ptr %out.vec.ptr, i64 3
+ store double %in.elt, ptr %out.elt3.ptr, align 8
+ %out.elt4.ptr = getelementptr double, ptr %out.vec.ptr, i64 4
+ store double %in.elt, ptr %out.elt4.ptr, align 32
+ %out.elt5.ptr = getelementptr double, ptr %out.vec.ptr, i64 5
+ store double %in.elt, ptr %out.elt5.ptr, align 8
+ ret void
+}
+
+define void @vec384_i128(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec384_i128:
+; ALL: # %bb.0:
+; ALL-NEXT: movq (%rdi), %rax
+; ALL-NEXT: movq 8(%rdi), %rcx
+; ALL-NEXT: notq %rcx
+; ALL-NEXT: notq %rax
+; ALL-NEXT: movq %rax, (%rsi)
+; ALL-NEXT: movq %rcx, 8(%rsi)
+; ALL-NEXT: movq %rcx, 24(%rsi)
+; ALL-NEXT: movq %rax, 16(%rsi)
+; ALL-NEXT: movq %rcx, 40(%rsi)
+; ALL-NEXT: movq %rax, 32(%rsi)
+; ALL-NEXT: retq
+ %in.elt.not = load i128, ptr %in.elt.ptr, align 64
+ %in.elt = xor i128 %in.elt.not, -1
+ %out.elt0.ptr = getelementptr i128, ptr %out.vec.ptr, i64 0
+ store i128 %in.elt, ptr %out.elt0.ptr, align 64
+ %out.elt1.ptr = getelementptr i128, ptr %out.vec.ptr, i64 1
+ store i128 %in.elt, ptr %out.elt1.ptr, align 16
+ %out.elt2.ptr = getelementptr i128, ptr %out.vec.ptr, i64 2
+ store i128 %in.elt, ptr %out.elt2.ptr, align 32
+ ret void
+}
+
define void @vec512_i8(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
; ALL-LABEL: vec512_i8:
; ALL: # %bb.0:
%in.elt.not = load i64, ptr %in.elt.ptr, align 64
%in.elt.int = xor i64 %in.elt.not, -1
%in.elt = bitcast i64 %in.elt.int to double
- %out.elt0.ptr = getelementptr i64, ptr %out.vec.ptr, i64 0
+ %out.elt0.ptr = getelementptr double, ptr %out.vec.ptr, i64 0
store double %in.elt, ptr %out.elt0.ptr, align 64
- %out.elt1.ptr = getelementptr i64, ptr %out.vec.ptr, i64 1
+ %out.elt1.ptr = getelementptr double, ptr %out.vec.ptr, i64 1
store double %in.elt, ptr %out.elt1.ptr, align 8
- %out.elt2.ptr = getelementptr i64, ptr %out.vec.ptr, i64 2
+ %out.elt2.ptr = getelementptr double, ptr %out.vec.ptr, i64 2
store double %in.elt, ptr %out.elt2.ptr, align 16
- %out.elt3.ptr = getelementptr i64, ptr %out.vec.ptr, i64 3
+ %out.elt3.ptr = getelementptr double, ptr %out.vec.ptr, i64 3
store double %in.elt, ptr %out.elt3.ptr, align 8
- %out.elt4.ptr = getelementptr i64, ptr %out.vec.ptr, i64 4
+ %out.elt4.ptr = getelementptr double, ptr %out.vec.ptr, i64 4
store double %in.elt, ptr %out.elt4.ptr, align 32
- %out.elt5.ptr = getelementptr i64, ptr %out.vec.ptr, i64 5
+ %out.elt5.ptr = getelementptr double, ptr %out.vec.ptr, i64 5
store double %in.elt, ptr %out.elt5.ptr, align 8
- %out.elt6.ptr = getelementptr i64, ptr %out.vec.ptr, i64 6
+ %out.elt6.ptr = getelementptr double, ptr %out.vec.ptr, i64 6
store double %in.elt, ptr %out.elt6.ptr, align 16
- %out.elt7.ptr = getelementptr i64, ptr %out.vec.ptr, i64 7
+ %out.elt7.ptr = getelementptr double, ptr %out.vec.ptr, i64 7
store double %in.elt, ptr %out.elt7.ptr, align 8
ret void
}
; AVX512: {{.*}}
; AVX512BW: {{.*}}
; AVX512F: {{.*}}
+; SCALAR: {{.*}}
; SSE: {{.*}}
; SSE2: {{.*}}
+; SSE2-ONLY: {{.*}}
; SSE3: {{.*}}
; SSE41: {{.*}}
; SSE42: {{.*}}
; SSSE3: {{.*}}
+; SSSE3-ONLY: {{.*}}