ret <4 x i32> %r
}
+; Code and data size may increase by using more vector ops, so the transform is disabled here.
+
define <4 x i32> @add_op1_constant_optsize(i32* %p) nounwind optsize {
; SSE-LABEL: add_op1_constant_optsize:
; SSE: # %bb.0:
ret <2 x double> %r
}
+; Try again with 256-bit types.
+
+define <8 x i32> @add_op1_constant_v8i32(i32* %p) nounwind {
+; SSE-LABEL: add_op1_constant_v8i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movl (%rdi), %eax
+; SSE-NEXT: addl $42, %eax
+; SSE-NEXT: movd %eax, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: add_op1_constant_v8i32:
+; AVX: # %bb.0:
+; AVX-NEXT: movl (%rdi), %eax
+; AVX-NEXT: addl $42, %eax
+; AVX-NEXT: vmovd %eax, %xmm0
+; AVX-NEXT: retq
+ %x = load i32, i32* %p
+ %b = add i32 %x, 42
+ %r = insertelement <8 x i32> undef, i32 %b, i32 0
+ ret <8 x i32> %r
+}
+
+define <4 x i64> @sub_op0_constant_v4i64(i64* %p) nounwind {
+; SSE-LABEL: sub_op0_constant_v4i64:
+; SSE: # %bb.0:
+; SSE-NEXT: movl $42, %eax
+; SSE-NEXT: subq (%rdi), %rax
+; SSE-NEXT: movq %rax, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: sub_op0_constant_v4i64:
+; AVX: # %bb.0:
+; AVX-NEXT: movl $42, %eax
+; AVX-NEXT: subq (%rdi), %rax
+; AVX-NEXT: vmovq %rax, %xmm0
+; AVX-NEXT: retq
+ %x = load i64, i64* %p
+ %b = sub i64 42, %x
+ %r = insertelement <4 x i64> undef, i64 %b, i32 0
+ ret <4 x i64> %r
+}
+
+define <8 x i32> @mul_op1_constant_v8i32(i32* %p) nounwind {
+; SSE-LABEL: mul_op1_constant_v8i32:
+; SSE: # %bb.0:
+; SSE-NEXT: imull $42, (%rdi), %eax
+; SSE-NEXT: movd %eax, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: mul_op1_constant_v8i32:
+; AVX: # %bb.0:
+; AVX-NEXT: imull $42, (%rdi), %eax
+; AVX-NEXT: vmovd %eax, %xmm0
+; AVX-NEXT: retq
+ %x = load i32, i32* %p
+ %b = mul i32 %x, 42
+ %r = insertelement <8 x i32> undef, i32 %b, i32 0
+ ret <8 x i32> %r
+}
+
+define <4 x i64> @or_op1_constant_v4i64(i64* %p) nounwind {
+; SSE-LABEL: or_op1_constant_v4i64:
+; SSE: # %bb.0:
+; SSE-NEXT: movq (%rdi), %rax
+; SSE-NEXT: orq $42, %rax
+; SSE-NEXT: movq %rax, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: or_op1_constant_v4i64:
+; AVX: # %bb.0:
+; AVX-NEXT: movq (%rdi), %rax
+; AVX-NEXT: orq $42, %rax
+; AVX-NEXT: vmovq %rax, %xmm0
+; AVX-NEXT: retq
+ %x = load i64, i64* %p
+ %b = or i64 %x, 42
+ %r = insertelement <4 x i64> undef, i64 %b, i32 0
+ ret <4 x i64> %r
+}
+
+; Try again with 512-bit types.
+
+define <16 x i32> @add_op1_constant_v16i32(i32* %p) nounwind {
+; SSE-LABEL: add_op1_constant_v16i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movl (%rdi), %eax
+; SSE-NEXT: addl $42, %eax
+; SSE-NEXT: movd %eax, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: add_op1_constant_v16i32:
+; AVX: # %bb.0:
+; AVX-NEXT: movl (%rdi), %eax
+; AVX-NEXT: addl $42, %eax
+; AVX-NEXT: vmovd %eax, %xmm0
+; AVX-NEXT: retq
+ %x = load i32, i32* %p
+ %b = add i32 %x, 42
+ %r = insertelement <16 x i32> undef, i32 %b, i32 0
+ ret <16 x i32> %r
+}
+
+define <8 x i64> @sub_op0_constant_v8i64(i64* %p) nounwind {
+; SSE-LABEL: sub_op0_constant_v8i64:
+; SSE: # %bb.0:
+; SSE-NEXT: movl $42, %eax
+; SSE-NEXT: subq (%rdi), %rax
+; SSE-NEXT: movq %rax, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: sub_op0_constant_v8i64:
+; AVX: # %bb.0:
+; AVX-NEXT: movl $42, %eax
+; AVX-NEXT: subq (%rdi), %rax
+; AVX-NEXT: vmovq %rax, %xmm0
+; AVX-NEXT: retq
+ %x = load i64, i64* %p
+ %b = sub i64 42, %x
+ %r = insertelement <8 x i64> undef, i64 %b, i32 0
+ ret <8 x i64> %r
+}
+
+define <16 x i32> @mul_op1_constant_v16i32(i32* %p) nounwind {
+; SSE-LABEL: mul_op1_constant_v16i32:
+; SSE: # %bb.0:
+; SSE-NEXT: imull $42, (%rdi), %eax
+; SSE-NEXT: movd %eax, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: mul_op1_constant_v16i32:
+; AVX: # %bb.0:
+; AVX-NEXT: imull $42, (%rdi), %eax
+; AVX-NEXT: vmovd %eax, %xmm0
+; AVX-NEXT: retq
+ %x = load i32, i32* %p
+ %b = mul i32 %x, 42
+ %r = insertelement <16 x i32> undef, i32 %b, i32 0
+ ret <16 x i32> %r
+}
+
+define <8 x i64> @or_op1_constant_v8i64(i64* %p) nounwind {
+; SSE-LABEL: or_op1_constant_v8i64:
+; SSE: # %bb.0:
+; SSE-NEXT: movq (%rdi), %rax
+; SSE-NEXT: orq $42, %rax
+; SSE-NEXT: movq %rax, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: or_op1_constant_v8i64:
+; AVX: # %bb.0:
+; AVX-NEXT: movq (%rdi), %rax
+; AVX-NEXT: orq $42, %rax
+; AVX-NEXT: vmovq %rax, %xmm0
+; AVX-NEXT: retq
+ %x = load i64, i64* %p
+ %b = or i64 %x, 42
+ %r = insertelement <8 x i64> undef, i64 %b, i32 0
+ ret <8 x i64> %r
+}
+