declare fp128 @llvm.vector.reduce.fadd.f128.v1f128(fp128, <1 x fp128>)
declare float @llvm.vector.reduce.fadd.f32.v3f32(float, <3 x float>)
+declare float @llvm.vector.reduce.fadd.f32.v5f32(float, <5 x float>)
declare fp128 @llvm.vector.reduce.fadd.f128.v2f128(fp128, <2 x fp128>)
declare float @llvm.vector.reduce.fadd.f32.v16f32(float, <16 x float>)
-define half @test_v1f16(<1 x half> %a) nounwind {
+define half @test_v1f16(<1 x half> %a, half %s) nounwind {
; CHECK-LABEL: test_v1f16:
; CHECK: @ %bb.0:
-; CHECK-NEXT: .save {r11, lr}
-; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: .save {r4, r5, r11, lr}
+; CHECK-NEXT: push {r4, r5, r11, lr}
+; CHECK-NEXT: .vsave {d8}
+; CHECK-NEXT: vpush {d8}
+; CHECK-NEXT: mov r4, r0
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: bl __aeabi_h2f
+; CHECK-NEXT: mov r5, r0
+; CHECK-NEXT: mov r0, r4
; CHECK-NEXT: bl __aeabi_f2h
+; CHECK-NEXT: vmov s16, r5
; CHECK-NEXT: bl __aeabi_h2f
-; CHECK-NEXT: vldr s0, .LCPI0_0
-; CHECK-NEXT: vmov s2, r0
-; CHECK-NEXT: vadd.f32 s0, s2, s0
+; CHECK-NEXT: vmov s0, r0
+; CHECK-NEXT: vadd.f32 s0, s16, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl __aeabi_f2h
+; CHECK-NEXT: vpop {d8}
+; CHECK-NEXT: pop {r4, r5, r11, lr}
+; CHECK-NEXT: mov pc, lr
+ %b = call half @llvm.vector.reduce.fadd.f16.v1f16(half %s, <1 x half> %a)
+ ret half %b
+}
+
+define half @test_v1f16_neutral(<1 x half> %a) nounwind {
+; CHECK-LABEL: test_v1f16_neutral:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: bl __aeabi_f2h
+; CHECK-NEXT: mov r1, #255
+; CHECK-NEXT: orr r1, r1, #65280
+; CHECK-NEXT: and r0, r0, r1
; CHECK-NEXT: pop {r11, lr}
; CHECK-NEXT: mov pc, lr
-; CHECK-NEXT: .p2align 2
-; CHECK-NEXT: @ %bb.1:
-; CHECK-NEXT: .LCPI0_0:
-; CHECK-NEXT: .long 0x00000000 @ float 0
- %b = call half @llvm.vector.reduce.fadd.f16.v1f16(half 0.0, <1 x half> %a)
+ %b = call half @llvm.vector.reduce.fadd.f16.v1f16(half -0.0, <1 x half> %a)
ret half %b
}
-define float @test_v1f32(<1 x float> %a) nounwind {
+define float @test_v1f32(<1 x float> %a, float %s) nounwind {
; CHECK-LABEL: test_v1f32:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vldr s0, .LCPI1_0
-; CHECK-NEXT: vmov s2, r0
+; CHECK-NEXT: vmov s0, r0
+; CHECK-NEXT: vmov s2, r1
; CHECK-NEXT: vadd.f32 s0, s2, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: mov pc, lr
-; CHECK-NEXT: .p2align 2
-; CHECK-NEXT: @ %bb.1:
-; CHECK-NEXT: .LCPI1_0:
-; CHECK-NEXT: .long 0x00000000 @ float 0
- %b = call float @llvm.vector.reduce.fadd.f32.v1f32(float 0.0, <1 x float> %a)
+ %b = call float @llvm.vector.reduce.fadd.f32.v1f32(float %s, <1 x float> %a)
ret float %b
}
-define double @test_v1f64(<1 x double> %a) nounwind {
+define float @test_v1f32_neutral(<1 x float> %a) nounwind {
+; CHECK-LABEL: test_v1f32_neutral:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: mov pc, lr
+ %b = call float @llvm.vector.reduce.fadd.f32.v1f32(float -0.0, <1 x float> %a)
+ ret float %b
+}
+
+define double @test_v1f64(<1 x double> %a, double %s) nounwind {
; CHECK-LABEL: test_v1f64:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vmov.i32 d16, #0x0
-; CHECK-NEXT: vmov d17, r0, r1
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vmov d17, r2, r3
; CHECK-NEXT: vadd.f64 d16, d17, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
- %b = call double @llvm.vector.reduce.fadd.f64.v1f64(double 0.0, <1 x double> %a)
+ %b = call double @llvm.vector.reduce.fadd.f64.v1f64(double %s, <1 x double> %a)
ret double %b
}
-define fp128 @test_v1f128(<1 x fp128> %a) nounwind {
+define double @test_v1f64_neutral(<1 x double> %a) nounwind {
+; CHECK-LABEL: test_v1f64_neutral:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: mov pc, lr
+ %b = call double @llvm.vector.reduce.fadd.f64.v1f64(double -0.0, <1 x double> %a)
+ ret double %b
+}
+
+define fp128 @test_v1f128(<1 x fp128> %a, fp128 %s) nounwind {
; CHECK-LABEL: test_v1f128:
; CHECK: @ %bb.0:
-; CHECK-NEXT: .save {r11, lr}
-; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: .save {r4, r5, r11, lr}
+; CHECK-NEXT: push {r4, r5, r11, lr}
; CHECK-NEXT: .pad #16
; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: mov r12, #0
-; CHECK-NEXT: str r12, [sp]
-; CHECK-NEXT: str r12, [sp, #4]
-; CHECK-NEXT: str r12, [sp, #8]
-; CHECK-NEXT: str r12, [sp, #12]
+; CHECK-NEXT: ldr r12, [sp, #32]
+; CHECK-NEXT: ldr lr, [sp, #36]
+; CHECK-NEXT: ldr r4, [sp, #40]
+; CHECK-NEXT: ldr r5, [sp, #44]
+; CHECK-NEXT: stm sp, {r0, r1, r2, r3}
+; CHECK-NEXT: mov r0, r12
+; CHECK-NEXT: mov r1, lr
+; CHECK-NEXT: mov r2, r4
+; CHECK-NEXT: mov r3, r5
; CHECK-NEXT: bl __addtf3
; CHECK-NEXT: add sp, sp, #16
-; CHECK-NEXT: pop {r11, lr}
+; CHECK-NEXT: pop {r4, r5, r11, lr}
; CHECK-NEXT: mov pc, lr
- %b = call fp128 @llvm.vector.reduce.fadd.f128.v1f128(fp128 zeroinitializer, <1 x fp128> %a)
+ %b = call fp128 @llvm.vector.reduce.fadd.f128.v1f128(fp128 %s, <1 x fp128> %a)
ret fp128 %b
}
-define float @test_v3f32(<3 x float> %a) nounwind {
+define fp128 @test_v1f128_neutral(<1 x fp128> %a) nounwind {
+; CHECK-LABEL: test_v1f128_neutral:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: mov pc, lr
+ %b = call fp128 @llvm.vector.reduce.fadd.f128.v1f128(fp128 0xL00000000000000008000000000000000, <1 x fp128> %a)
+ ret fp128 %b
+}
+
+define float @test_v3f32(<3 x float> %a, float %s) nounwind {
; CHECK-LABEL: test_v3f32:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d3, r2, r3
-; CHECK-NEXT: vldr s0, .LCPI4_0
+; CHECK-NEXT: vldr s0, [sp]
; CHECK-NEXT: vmov d2, r0, r1
-; CHECK-NEXT: vadd.f32 s0, s4, s0
+; CHECK-NEXT: vadd.f32 s0, s0, s4
; CHECK-NEXT: vadd.f32 s0, s0, s5
; CHECK-NEXT: vadd.f32 s0, s0, s6
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: mov pc, lr
-; CHECK-NEXT: .p2align 2
-; CHECK-NEXT: @ %bb.1:
-; CHECK-NEXT: .LCPI4_0:
-; CHECK-NEXT: .long 0x00000000 @ float 0
- %b = call float @llvm.vector.reduce.fadd.f32.v3f32(float 0.0, <3 x float> %a)
+ %b = call float @llvm.vector.reduce.fadd.f32.v3f32(float %s, <3 x float> %a)
+ ret float %b
+}
+
+define float @test_v3f32_neutral(<3 x float> %a) nounwind {
+; CHECK-LABEL: test_v3f32_neutral:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d1, r2, r3
+; CHECK-NEXT: vmov d0, r0, r1
+; CHECK-NEXT: vadd.f32 s4, s0, s1
+; CHECK-NEXT: vadd.f32 s0, s4, s2
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: mov pc, lr
+ %b = call float @llvm.vector.reduce.fadd.f32.v3f32(float -0.0, <3 x float> %a)
+ ret float %b
+}
+
+define float @test_v5f32(<5 x float> %a, float %s) nounwind {
+; CHECK-LABEL: test_v5f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vldr s0, [sp, #4]
+; CHECK-NEXT: vmov s2, r0
+; CHECK-NEXT: vadd.f32 s0, s0, s2
+; CHECK-NEXT: vmov s2, r1
+; CHECK-NEXT: vadd.f32 s0, s0, s2
+; CHECK-NEXT: vmov s2, r2
+; CHECK-NEXT: vadd.f32 s0, s0, s2
+; CHECK-NEXT: vmov s2, r3
+; CHECK-NEXT: vadd.f32 s0, s0, s2
+; CHECK-NEXT: vldr s2, [sp]
+; CHECK-NEXT: vadd.f32 s0, s0, s2
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: mov pc, lr
+ %b = call float @llvm.vector.reduce.fadd.f32.v5f32(float %s, <5 x float> %a)
ret float %b
}
-define fp128 @test_v2f128(<2 x fp128> %a) nounwind {
+define float @test_v5f32_neutral(<5 x float> %a) nounwind {
+; CHECK-LABEL: test_v5f32_neutral:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov s0, r1
+; CHECK-NEXT: vmov s2, r0
+; CHECK-NEXT: vadd.f32 s0, s2, s0
+; CHECK-NEXT: vmov s2, r2
+; CHECK-NEXT: vadd.f32 s0, s0, s2
+; CHECK-NEXT: vmov s2, r3
+; CHECK-NEXT: vadd.f32 s0, s0, s2
+; CHECK-NEXT: vldr s2, [sp]
+; CHECK-NEXT: vadd.f32 s0, s0, s2
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: mov pc, lr
+ %b = call float @llvm.vector.reduce.fadd.f32.v5f32(float -0.0, <5 x float> %a)
+ ret float %b
+}
+
+define fp128 @test_v2f128(<2 x fp128> %a, fp128 %s) nounwind {
; CHECK-LABEL: test_v2f128:
; CHECK: @ %bb.0:
; CHECK-NEXT: .save {r4, r5, r11, lr}
; CHECK-NEXT: push {r4, r5, r11, lr}
; CHECK-NEXT: .pad #16
; CHECK-NEXT: sub sp, sp, #16
-; CHECK-NEXT: mov r12, #0
-; CHECK-NEXT: str r12, [sp]
-; CHECK-NEXT: str r12, [sp, #4]
-; CHECK-NEXT: str r12, [sp, #8]
-; CHECK-NEXT: str r12, [sp, #12]
+; CHECK-NEXT: ldr r12, [sp, #48]
+; CHECK-NEXT: ldr lr, [sp, #52]
+; CHECK-NEXT: ldr r4, [sp, #56]
+; CHECK-NEXT: ldr r5, [sp, #60]
+; CHECK-NEXT: stm sp, {r0, r1, r2, r3}
+; CHECK-NEXT: mov r0, r12
+; CHECK-NEXT: mov r1, lr
+; CHECK-NEXT: mov r2, r4
+; CHECK-NEXT: mov r3, r5
; CHECK-NEXT: bl __addtf3
+; CHECK-NEXT: ldr r4, [sp, #32]
+; CHECK-NEXT: ldr r5, [sp, #40]
+; CHECK-NEXT: ldr lr, [sp, #44]
+; CHECK-NEXT: ldr r12, [sp, #36]
+; CHECK-NEXT: stm sp, {r4, r12}
+; CHECK-NEXT: str r5, [sp, #8]
+; CHECK-NEXT: str lr, [sp, #12]
+; CHECK-NEXT: bl __addtf3
+; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: pop {r4, r5, r11, lr}
+; CHECK-NEXT: mov pc, lr
+ %b = call fp128 @llvm.vector.reduce.fadd.f128.v2f128(fp128 %s, <2 x fp128> %a)
+ ret fp128 %b
+}
+
+define fp128 @test_v2f128_neutral(<2 x fp128> %a) nounwind {
+; CHECK-LABEL: test_v2f128_neutral:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r4, r5, r11, lr}
+; CHECK-NEXT: push {r4, r5, r11, lr}
+; CHECK-NEXT: .pad #16
+; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: ldr r12, [sp, #36]
; CHECK-NEXT: ldr lr, [sp, #32]
; CHECK-NEXT: ldr r4, [sp, #40]
; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: pop {r4, r5, r11, lr}
; CHECK-NEXT: mov pc, lr
- %b = call fp128 @llvm.vector.reduce.fadd.f128.v2f128(fp128 zeroinitializer, <2 x fp128> %a)
+ %b = call fp128 @llvm.vector.reduce.fadd.f128.v2f128(fp128 0xL00000000000000008000000000000000, <2 x fp128> %a)
ret fp128 %b
}
-define float @test_v16f32(<16 x float> %a) nounwind {
+define float @test_v16f32(<16 x float> %a, float %s) nounwind {
; CHECK-LABEL: test_v16f32:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d3, r2, r3
-; CHECK-NEXT: vldr s0, .LCPI6_0
+; CHECK-NEXT: vldr s0, [sp, #48]
; CHECK-NEXT: vmov d2, r0, r1
; CHECK-NEXT: mov r0, sp
-; CHECK-NEXT: vadd.f32 s0, s4, s0
+; CHECK-NEXT: vadd.f32 s0, s0, s4
+; CHECK-NEXT: vadd.f32 s0, s0, s5
+; CHECK-NEXT: vadd.f32 s0, s0, s6
+; CHECK-NEXT: vadd.f32 s0, s0, s7
+; CHECK-NEXT: vld1.64 {d2, d3}, [r0]
+; CHECK-NEXT: add r0, sp, #16
+; CHECK-NEXT: vadd.f32 s0, s0, s4
+; CHECK-NEXT: vadd.f32 s0, s0, s5
+; CHECK-NEXT: vadd.f32 s0, s0, s6
+; CHECK-NEXT: vadd.f32 s0, s0, s7
+; CHECK-NEXT: vld1.64 {d2, d3}, [r0]
+; CHECK-NEXT: add r0, sp, #32
+; CHECK-NEXT: vadd.f32 s0, s0, s4
; CHECK-NEXT: vadd.f32 s0, s0, s5
; CHECK-NEXT: vadd.f32 s0, s0, s6
; CHECK-NEXT: vadd.f32 s0, s0, s7
; CHECK-NEXT: vld1.64 {d2, d3}, [r0]
+; CHECK-NEXT: vadd.f32 s0, s0, s4
+; CHECK-NEXT: vadd.f32 s0, s0, s5
+; CHECK-NEXT: vadd.f32 s0, s0, s6
+; CHECK-NEXT: vadd.f32 s0, s0, s7
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: mov pc, lr
+ %b = call float @llvm.vector.reduce.fadd.f32.v16f32(float %s, <16 x float> %a)
+ ret float %b
+}
+
+define float @test_v16f32_neutral(<16 x float> %a) nounwind {
+; CHECK-LABEL: test_v16f32_neutral:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: vmov d1, r2, r3
+; CHECK-NEXT: vmov d0, r0, r1
+; CHECK-NEXT: mov r0, sp
+; CHECK-NEXT: vadd.f32 s4, s0, s1
+; CHECK-NEXT: vadd.f32 s4, s4, s2
+; CHECK-NEXT: vadd.f32 s0, s4, s3
+; CHECK-NEXT: vld1.64 {d2, d3}, [r0]
; CHECK-NEXT: add r0, sp, #16
; CHECK-NEXT: vadd.f32 s0, s0, s4
; CHECK-NEXT: vadd.f32 s0, s0, s5
; CHECK-NEXT: vadd.f32 s0, s0, s7
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: mov pc, lr
-; CHECK-NEXT: .p2align 2
-; CHECK-NEXT: @ %bb.1:
-; CHECK-NEXT: .LCPI6_0:
-; CHECK-NEXT: .long 0x00000000 @ float 0
- %b = call float @llvm.vector.reduce.fadd.f32.v16f32(float 0.0, <16 x float> %a)
+ %b = call float @llvm.vector.reduce.fadd.f32.v16f32(float -0.0, <16 x float> %a)
ret float %b
}
define float @test_v2f32_zero(<2 x float> %a0) {
; SSE2-LABEL: test_v2f32_zero:
; SSE2: # %bb.0:
-; SSE2-NEXT: xorps %xmm1, %xmm1
+; SSE2-NEXT: movaps %xmm0, %xmm1
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
; SSE2-NEXT: addss %xmm0, %xmm1
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; SSE2-NEXT: addss %xmm1, %xmm0
+; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v2f32_zero:
; SSE41: # %bb.0:
-; SSE41-NEXT: xorps %xmm1, %xmm1
-; SSE41-NEXT: addss %xmm0, %xmm1
-; SSE41-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: retq
;
-; AVX-LABEL: test_v2f32_zero:
-; AVX: # %bb.0:
-; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm1
-; AVX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
-; AVX-NEXT: retq
+; AVX1-SLOW-LABEL: test_v2f32_zero:
+; AVX1-SLOW: # %bb.0:
+; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: retq
+;
+; AVX1-FAST-LABEL: test_v2f32_zero:
+; AVX1-FAST: # %bb.0:
+; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
+; AVX1-FAST-NEXT: retq
+;
+; AVX2-LABEL: test_v2f32_zero:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v2f32_zero:
; AVX512: # %bb.0:
-; AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm1
-; AVX512-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; AVX512-NEXT: vaddss %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
- %1 = call float @llvm.vector.reduce.fadd.f32.v2f32(float 0.0, <2 x float> %a0)
+ %1 = call float @llvm.vector.reduce.fadd.f32.v2f32(float -0.0, <2 x float> %a0)
ret float %1
}
define float @test_v4f32_zero(<4 x float> %a0) {
; SSE2-LABEL: test_v4f32_zero:
; SSE2: # %bb.0:
-; SSE2-NEXT: xorps %xmm1, %xmm1
+; SSE2-NEXT: movaps %xmm0, %xmm1
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
; SSE2-NEXT: addss %xmm0, %xmm1
; SSE2-NEXT: movaps %xmm0, %xmm2
-; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[1,1]
+; SSE2-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE2-NEXT: addss %xmm1, %xmm2
-; SSE2-NEXT: movaps %xmm0, %xmm1
-; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSE2-NEXT: addss %xmm2, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE2-NEXT: addss %xmm1, %xmm0
+; SSE2-NEXT: addss %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v4f32_zero:
; SSE41: # %bb.0:
-; SSE41-NEXT: xorps %xmm1, %xmm1
+; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE41-NEXT: addss %xmm0, %xmm1
-; SSE41-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; SSE41-NEXT: movaps %xmm0, %xmm2
+; SSE41-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE41-NEXT: addss %xmm1, %xmm2
-; SSE41-NEXT: movaps %xmm0, %xmm1
-; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
-; SSE41-NEXT: addss %xmm2, %xmm1
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE41-NEXT: addss %xmm1, %xmm0
+; SSE41-NEXT: addss %xmm2, %xmm0
; SSE41-NEXT: retq
;
-; AVX-LABEL: test_v4f32_zero:
-; AVX: # %bb.0:
-; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm1
-; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
-; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
-; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
-; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
-; AVX-NEXT: retq
+; AVX1-SLOW-LABEL: test_v4f32_zero:
+; AVX1-SLOW: # %bb.0:
+; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm1
+; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm1, %xmm1
+; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; AVX1-SLOW-NEXT: vaddss %xmm0, %xmm1, %xmm0
+; AVX1-SLOW-NEXT: retq
+;
+; AVX1-FAST-LABEL: test_v4f32_zero:
+; AVX1-FAST: # %bb.0:
+; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm1
+; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX1-FAST-NEXT: vaddss %xmm2, %xmm1, %xmm1
+; AVX1-FAST-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; AVX1-FAST-NEXT: vaddss %xmm0, %xmm1, %xmm0
+; AVX1-FAST-NEXT: retq
+;
+; AVX2-LABEL: test_v4f32_zero:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm1
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX2-NEXT: vaddss %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; AVX2-NEXT: vaddss %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v4f32_zero:
; AVX512: # %bb.0:
-; AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm1
-; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; AVX512-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX512-NEXT: retq
- %1 = call float @llvm.vector.reduce.fadd.f32.v4f32(float 0.0, <4 x float> %a0)
+ %1 = call float @llvm.vector.reduce.fadd.f32.v4f32(float -0.0, <4 x float> %a0)
ret float %1
}
define float @test_v8f32_zero(<8 x float> %a0) {
; SSE2-LABEL: test_v8f32_zero:
; SSE2: # %bb.0:
-; SSE2-NEXT: xorps %xmm2, %xmm2
+; SSE2-NEXT: movaps %xmm0, %xmm2
+; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[1,1]
; SSE2-NEXT: addss %xmm0, %xmm2
; SSE2-NEXT: movaps %xmm0, %xmm3
-; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[1,1]
+; SSE2-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1]
; SSE2-NEXT: addss %xmm2, %xmm3
-; SSE2-NEXT: movaps %xmm0, %xmm2
-; SSE2-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
-; SSE2-NEXT: addss %xmm3, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE2-NEXT: addss %xmm2, %xmm0
+; SSE2-NEXT: addss %xmm3, %xmm0
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: movaps %xmm1, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[1,1]
;
; SSE41-LABEL: test_v8f32_zero:
; SSE41: # %bb.0:
-; SSE41-NEXT: xorps %xmm2, %xmm2
+; SSE41-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE41-NEXT: addss %xmm0, %xmm2
-; SSE41-NEXT: movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE41-NEXT: movaps %xmm0, %xmm3
+; SSE41-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1]
; SSE41-NEXT: addss %xmm2, %xmm3
-; SSE41-NEXT: movaps %xmm0, %xmm2
-; SSE41-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
-; SSE41-NEXT: addss %xmm3, %xmm2
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE41-NEXT: addss %xmm2, %xmm0
+; SSE41-NEXT: addss %xmm3, %xmm0
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: movshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; SSE41-NEXT: addss %xmm2, %xmm0
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: retq
;
-; AVX-LABEL: test_v8f32_zero:
-; AVX: # %bb.0:
-; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm1
-; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
-; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
-; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
-; AVX-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,3,3,3]
-; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
-; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm1
-; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
-; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
-; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
-; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: retq
+; AVX1-SLOW-LABEL: test_v8f32_zero:
+; AVX1-SLOW: # %bb.0:
+; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm1
+; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm1, %xmm1
+; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,3,3,3]
+; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm1, %xmm1
+; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-SLOW-NEXT: vaddss %xmm0, %xmm1, %xmm1
+; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm1, %xmm1
+; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm1, %xmm1
+; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; AVX1-SLOW-NEXT: vaddss %xmm0, %xmm1, %xmm0
+; AVX1-SLOW-NEXT: vzeroupper
+; AVX1-SLOW-NEXT: retq
+;
+; AVX1-FAST-LABEL: test_v8f32_zero:
+; AVX1-FAST: # %bb.0:
+; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm1
+; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX1-FAST-NEXT: vaddss %xmm2, %xmm1, %xmm1
+; AVX1-FAST-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,3,3,3]
+; AVX1-FAST-NEXT: vaddss %xmm2, %xmm1, %xmm1
+; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-FAST-NEXT: vaddss %xmm0, %xmm1, %xmm1
+; AVX1-FAST-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; AVX1-FAST-NEXT: vaddss %xmm2, %xmm1, %xmm1
+; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX1-FAST-NEXT: vaddss %xmm2, %xmm1, %xmm1
+; AVX1-FAST-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; AVX1-FAST-NEXT: vaddss %xmm0, %xmm1, %xmm0
+; AVX1-FAST-NEXT: vzeroupper
+; AVX1-FAST-NEXT: retq
+;
+; AVX2-LABEL: test_v8f32_zero:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm1
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX2-NEXT: vaddss %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,3,3,3]
+; AVX2-NEXT: vaddss %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vaddss %xmm0, %xmm1, %xmm1
+; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; AVX2-NEXT: vaddss %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX2-NEXT: vaddss %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; AVX2-NEXT: vaddss %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v8f32_zero:
; AVX512: # %bb.0:
-; AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm1
-; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,3,3,3]
; AVX512-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call float @llvm.vector.reduce.fadd.f32.v8f32(float 0.0, <8 x float> %a0)
+ %1 = call float @llvm.vector.reduce.fadd.f32.v8f32(float -0.0, <8 x float> %a0)
ret float %1
}
define float @test_v16f32_zero(<16 x float> %a0) {
; SSE2-LABEL: test_v16f32_zero:
; SSE2: # %bb.0:
-; SSE2-NEXT: xorps %xmm4, %xmm4
+; SSE2-NEXT: movaps %xmm0, %xmm4
+; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm0[1,1]
; SSE2-NEXT: addss %xmm0, %xmm4
; SSE2-NEXT: movaps %xmm0, %xmm5
-; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1],xmm0[1,1]
+; SSE2-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm0[1]
; SSE2-NEXT: addss %xmm4, %xmm5
-; SSE2-NEXT: movaps %xmm0, %xmm4
-; SSE2-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm0[1]
-; SSE2-NEXT: addss %xmm5, %xmm4
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE2-NEXT: addss %xmm4, %xmm0
+; SSE2-NEXT: addss %xmm5, %xmm0
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: movaps %xmm1, %xmm4
; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm1[1,1]
;
; SSE41-LABEL: test_v16f32_zero:
; SSE41: # %bb.0:
-; SSE41-NEXT: xorps %xmm4, %xmm4
+; SSE41-NEXT: movshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
; SSE41-NEXT: addss %xmm0, %xmm4
-; SSE41-NEXT: movshdup {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; SSE41-NEXT: movaps %xmm0, %xmm5
+; SSE41-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm0[1]
; SSE41-NEXT: addss %xmm4, %xmm5
-; SSE41-NEXT: movaps %xmm0, %xmm4
-; SSE41-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm0[1]
-; SSE41-NEXT: addss %xmm5, %xmm4
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; SSE41-NEXT: addss %xmm4, %xmm0
+; SSE41-NEXT: addss %xmm5, %xmm0
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: movshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
; SSE41-NEXT: addss %xmm4, %xmm0
; SSE41-NEXT: addss %xmm3, %xmm0
; SSE41-NEXT: retq
;
-; AVX-LABEL: test_v16f32_zero:
-; AVX: # %bb.0:
-; AVX-NEXT: vxorps %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm2
-; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2
-; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
-; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2
-; AVX-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[3,3,3,3]
-; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2
-; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX-NEXT: vaddss %xmm0, %xmm2, %xmm2
-; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2
-; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
-; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2
-; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
-; AVX-NEXT: vaddss %xmm0, %xmm2, %xmm0
-; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
-; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,3,3,3]
-; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
-; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,3,3,3]
-; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: retq
+; AVX1-SLOW-LABEL: test_v16f32_zero:
+; AVX1-SLOW: # %bb.0:
+; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm0, %xmm2
+; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
+; AVX1-SLOW-NEXT: vaddss %xmm3, %xmm2, %xmm2
+; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[3,3,3,3]
+; AVX1-SLOW-NEXT: vaddss %xmm3, %xmm2, %xmm2
+; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-SLOW-NEXT: vaddss %xmm0, %xmm2, %xmm2
+; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; AVX1-SLOW-NEXT: vaddss %xmm3, %xmm2, %xmm2
+; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
+; AVX1-SLOW-NEXT: vaddss %xmm3, %xmm2, %xmm2
+; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; AVX1-SLOW-NEXT: vaddss %xmm0, %xmm2, %xmm0
+; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
+; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,3,3,3]
+; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
+; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,3,3,3]
+; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vzeroupper
+; AVX1-SLOW-NEXT: retq
+;
+; AVX1-FAST-LABEL: test_v16f32_zero:
+; AVX1-FAST: # %bb.0:
+; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm2
+; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
+; AVX1-FAST-NEXT: vaddss %xmm3, %xmm2, %xmm2
+; AVX1-FAST-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[3,3,3,3]
+; AVX1-FAST-NEXT: vaddss %xmm3, %xmm2, %xmm2
+; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-FAST-NEXT: vaddss %xmm0, %xmm2, %xmm2
+; AVX1-FAST-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; AVX1-FAST-NEXT: vaddss %xmm3, %xmm2, %xmm2
+; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
+; AVX1-FAST-NEXT: vaddss %xmm3, %xmm2, %xmm2
+; AVX1-FAST-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; AVX1-FAST-NEXT: vaddss %xmm0, %xmm2, %xmm0
+; AVX1-FAST-NEXT: vaddss %xmm1, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX1-FAST-NEXT: vaddss %xmm2, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
+; AVX1-FAST-NEXT: vaddss %xmm2, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,3,3,3]
+; AVX1-FAST-NEXT: vaddss %xmm2, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-FAST-NEXT: vaddss %xmm1, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX1-FAST-NEXT: vaddss %xmm2, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
+; AVX1-FAST-NEXT: vaddss %xmm2, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,3,3,3]
+; AVX1-FAST-NEXT: vaddss %xmm1, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vzeroupper
+; AVX1-FAST-NEXT: retq
+;
+; AVX2-LABEL: test_v16f32_zero:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; AVX2-NEXT: vaddss %xmm2, %xmm0, %xmm2
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
+; AVX2-NEXT: vaddss %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[3,3,3,3]
+; AVX2-NEXT: vaddss %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vaddss %xmm0, %xmm2, %xmm2
+; AVX2-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; AVX2-NEXT: vaddss %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
+; AVX2-NEXT: vaddss %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; AVX2-NEXT: vaddss %xmm0, %xmm2, %xmm0
+; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX2-NEXT: vaddss %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
+; AVX2-NEXT: vaddss %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,3,3,3]
+; AVX2-NEXT: vaddss %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX2-NEXT: vaddss %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
+; AVX2-NEXT: vaddss %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,3,3,3]
+; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v16f32_zero:
; AVX512: # %bb.0:
-; AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm1
-; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,3,3,3]
; AVX512-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call float @llvm.vector.reduce.fadd.f32.v16f32(float 0.0, <16 x float> %a0)
+ %1 = call float @llvm.vector.reduce.fadd.f32.v16f32(float -0.0, <16 x float> %a0)
ret float %1
}
define double @test_v2f64_zero(<2 x double> %a0) {
; SSE-LABEL: test_v2f64_zero:
; SSE: # %bb.0:
-; SSE-NEXT: xorpd %xmm1, %xmm1
+; SSE-NEXT: movapd %xmm0, %xmm1
+; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE-NEXT: addsd %xmm0, %xmm1
-; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
-; SSE-NEXT: addsd %xmm1, %xmm0
+; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
-; AVX-LABEL: test_v2f64_zero:
-; AVX: # %bb.0:
-; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm1
-; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0
-; AVX-NEXT: retq
+; AVX1-SLOW-LABEL: test_v2f64_zero:
+; AVX1-SLOW: # %bb.0:
+; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: retq
+;
+; AVX1-FAST-LABEL: test_v2f64_zero:
+; AVX1-FAST: # %bb.0:
+; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
+; AVX1-FAST-NEXT: retq
+;
+; AVX2-LABEL: test_v2f64_zero:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v2f64_zero:
; AVX512: # %bb.0:
-; AVX512-NEXT: vxorpd %xmm1, %xmm1, %xmm1
-; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm1
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; AVX512-NEXT: vaddsd %xmm0, %xmm1, %xmm0
+; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
- %1 = call double @llvm.vector.reduce.fadd.f64.v2f64(double 0.0, <2 x double> %a0)
+ %1 = call double @llvm.vector.reduce.fadd.f64.v2f64(double -0.0, <2 x double> %a0)
ret double %1
}
define double @test_v4f64_zero(<4 x double> %a0) {
; SSE-LABEL: test_v4f64_zero:
; SSE: # %bb.0:
-; SSE-NEXT: xorpd %xmm2, %xmm2
+; SSE-NEXT: movapd %xmm0, %xmm2
+; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT: addsd %xmm0, %xmm2
-; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
-; SSE-NEXT: addsd %xmm2, %xmm0
-; SSE-NEXT: addsd %xmm1, %xmm0
+; SSE-NEXT: addsd %xmm1, %xmm2
; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1]
-; SSE-NEXT: addsd %xmm1, %xmm0
+; SSE-NEXT: addsd %xmm1, %xmm2
+; SSE-NEXT: movapd %xmm2, %xmm0
; SSE-NEXT: retq
;
-; AVX-LABEL: test_v4f64_zero:
-; AVX: # %bb.0:
-; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm1
-; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
-; AVX-NEXT: vaddsd %xmm2, %xmm1, %xmm1
-; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm1
-; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: retq
+; AVX1-SLOW-LABEL: test_v4f64_zero:
+; AVX1-SLOW: # %bb.0:
+; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm1
+; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-SLOW-NEXT: vaddsd %xmm0, %xmm1, %xmm1
+; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX1-SLOW-NEXT: vaddsd %xmm0, %xmm1, %xmm0
+; AVX1-SLOW-NEXT: vzeroupper
+; AVX1-SLOW-NEXT: retq
+;
+; AVX1-FAST-LABEL: test_v4f64_zero:
+; AVX1-FAST: # %bb.0:
+; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm1
+; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-FAST-NEXT: vaddsd %xmm0, %xmm1, %xmm1
+; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX1-FAST-NEXT: vaddsd %xmm0, %xmm1, %xmm0
+; AVX1-FAST-NEXT: vzeroupper
+; AVX1-FAST-NEXT: retq
+;
+; AVX2-LABEL: test_v4f64_zero:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
+; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm1
+; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vaddsd %xmm0, %xmm1, %xmm1
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX2-NEXT: vaddsd %xmm0, %xmm1, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v4f64_zero:
; AVX512: # %bb.0:
-; AVX512-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm1
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
-; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512-NEXT: vaddsd %xmm0, %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call double @llvm.vector.reduce.fadd.f64.v4f64(double 0.0, <4 x double> %a0)
+ %1 = call double @llvm.vector.reduce.fadd.f64.v4f64(double -0.0, <4 x double> %a0)
ret double %1
}
define double @test_v8f64_zero(<8 x double> %a0) {
; SSE-LABEL: test_v8f64_zero:
; SSE: # %bb.0:
-; SSE-NEXT: xorpd %xmm4, %xmm4
+; SSE-NEXT: movapd %xmm0, %xmm4
+; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm0[1]
; SSE-NEXT: addsd %xmm0, %xmm4
-; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
-; SSE-NEXT: addsd %xmm4, %xmm0
-; SSE-NEXT: addsd %xmm1, %xmm0
+; SSE-NEXT: addsd %xmm1, %xmm4
; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1]
-; SSE-NEXT: addsd %xmm1, %xmm0
-; SSE-NEXT: addsd %xmm2, %xmm0
+; SSE-NEXT: addsd %xmm1, %xmm4
+; SSE-NEXT: addsd %xmm2, %xmm4
; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
-; SSE-NEXT: addsd %xmm2, %xmm0
-; SSE-NEXT: addsd %xmm3, %xmm0
+; SSE-NEXT: addsd %xmm2, %xmm4
+; SSE-NEXT: addsd %xmm3, %xmm4
; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1,1]
-; SSE-NEXT: addsd %xmm3, %xmm0
+; SSE-NEXT: addsd %xmm3, %xmm4
+; SSE-NEXT: movapd %xmm4, %xmm0
; SSE-NEXT: retq
;
-; AVX-LABEL: test_v8f64_zero:
-; AVX: # %bb.0:
-; AVX-NEXT: vxorpd %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vaddsd %xmm2, %xmm0, %xmm2
-; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
-; AVX-NEXT: vaddsd %xmm3, %xmm2, %xmm2
-; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX-NEXT: vaddsd %xmm0, %xmm2, %xmm2
-; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; AVX-NEXT: vaddsd %xmm0, %xmm2, %xmm0
-; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
-; AVX-NEXT: vaddsd %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
-; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: retq
+; AVX1-SLOW-LABEL: test_v8f64_zero:
+; AVX1-SLOW: # %bb.0:
+; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX1-SLOW-NEXT: vaddsd %xmm2, %xmm0, %xmm2
+; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-SLOW-NEXT: vaddsd %xmm0, %xmm2, %xmm2
+; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX1-SLOW-NEXT: vaddsd %xmm0, %xmm2, %xmm0
+; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
+; AVX1-SLOW-NEXT: vaddsd %xmm2, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vzeroupper
+; AVX1-SLOW-NEXT: retq
+;
+; AVX1-FAST-LABEL: test_v8f64_zero:
+; AVX1-FAST: # %bb.0:
+; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm2
+; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-FAST-NEXT: vaddsd %xmm0, %xmm2, %xmm2
+; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX1-FAST-NEXT: vaddsd %xmm0, %xmm2, %xmm0
+; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
+; AVX1-FAST-NEXT: vaddsd %xmm2, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vzeroupper
+; AVX1-FAST-NEXT: retq
+;
+; AVX2-LABEL: test_v8f64_zero:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
+; AVX2-NEXT: vaddsd %xmm2, %xmm0, %xmm2
+; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vaddsd %xmm0, %xmm2, %xmm2
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX2-NEXT: vaddsd %xmm0, %xmm2, %xmm0
+; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
+; AVX2-NEXT: vaddsd %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v8f64_zero:
; AVX512: # %bb.0:
-; AVX512-NEXT: vxorpd %xmm1, %xmm1, %xmm1
+; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm1
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
-; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
; AVX512-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call double @llvm.vector.reduce.fadd.f64.v8f64(double 0.0, <8 x double> %a0)
+ %1 = call double @llvm.vector.reduce.fadd.f64.v8f64(double -0.0, <8 x double> %a0)
ret double %1
}
define double @test_v16f64_zero(<16 x double> %a0) {
; SSE-LABEL: test_v16f64_zero:
; SSE: # %bb.0:
-; SSE-NEXT: xorpd %xmm8, %xmm8
-; SSE-NEXT: addsd %xmm0, %xmm8
+; SSE-NEXT: movapd %xmm0, %xmm8
; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: addsd %xmm8, %xmm0
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: addsd %xmm7, %xmm0
; SSE-NEXT: retq
;
-; AVX-LABEL: test_v16f64_zero:
-; AVX: # %bb.0:
-; AVX-NEXT: vxorpd %xmm4, %xmm4, %xmm4
-; AVX-NEXT: vaddsd %xmm4, %xmm0, %xmm4
-; AVX-NEXT: vpermilpd {{.*#+}} xmm5 = xmm0[1,0]
-; AVX-NEXT: vaddsd %xmm5, %xmm4, %xmm4
-; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX-NEXT: vaddsd %xmm0, %xmm4, %xmm4
-; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; AVX-NEXT: vaddsd %xmm0, %xmm4, %xmm0
-; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpermilpd {{.*#+}} xmm4 = xmm1[1,0]
-; AVX-NEXT: vaddsd %xmm4, %xmm0, %xmm0
-; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
-; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vaddsd %xmm2, %xmm0, %xmm0
-; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
-; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vextractf128 $1, %ymm2, %xmm1
-; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
-; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vaddsd %xmm3, %xmm0, %xmm0
-; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm3[1,0]
-; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vextractf128 $1, %ymm3, %xmm1
-; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
-; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vzeroupper
-; AVX-NEXT: retq
+; AVX1-SLOW-LABEL: test_v16f64_zero:
+; AVX1-SLOW: # %bb.0:
+; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm4 = xmm0[1,0]
+; AVX1-SLOW-NEXT: vaddsd %xmm4, %xmm0, %xmm4
+; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-SLOW-NEXT: vaddsd %xmm0, %xmm4, %xmm4
+; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX1-SLOW-NEXT: vaddsd %xmm0, %xmm4, %xmm0
+; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm4 = xmm1[1,0]
+; AVX1-SLOW-NEXT: vaddsd %xmm4, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vaddsd %xmm2, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
+; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vextractf128 $1, %ymm2, %xmm1
+; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vaddsd %xmm3, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm3[1,0]
+; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vextractf128 $1, %ymm3, %xmm1
+; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vzeroupper
+; AVX1-SLOW-NEXT: retq
+;
+; AVX1-FAST-LABEL: test_v16f64_zero:
+; AVX1-FAST: # %bb.0:
+; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm4
+; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-FAST-NEXT: vaddsd %xmm0, %xmm4, %xmm4
+; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX1-FAST-NEXT: vaddsd %xmm0, %xmm4, %xmm0
+; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm4 = xmm1[1,0]
+; AVX1-FAST-NEXT: vaddsd %xmm4, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vaddsd %xmm2, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
+; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vextractf128 $1, %ymm2, %xmm1
+; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vaddsd %xmm3, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm1 = xmm3[1,0]
+; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vextractf128 $1, %ymm3, %xmm1
+; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vzeroupper
+; AVX1-FAST-NEXT: retq
+;
+; AVX2-LABEL: test_v16f64_zero:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm4 = xmm0[1,0]
+; AVX2-NEXT: vaddsd %xmm4, %xmm0, %xmm4
+; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX2-NEXT: vaddsd %xmm0, %xmm4, %xmm4
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX2-NEXT: vaddsd %xmm0, %xmm4, %xmm0
+; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm4 = xmm1[1,0]
+; AVX2-NEXT: vaddsd %xmm4, %xmm0, %xmm0
+; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vaddsd %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
+; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vextractf128 $1, %ymm2, %xmm1
+; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vaddsd %xmm3, %xmm0, %xmm0
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm3[1,0]
+; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vextractf128 $1, %ymm3, %xmm1
+; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v16f64_zero:
; AVX512: # %bb.0:
-; AVX512-NEXT: vxorpd %xmm2, %xmm2, %xmm2
+; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm2
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
-; AVX512-NEXT: vaddsd %xmm3, %xmm2, %xmm2
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX512-NEXT: vaddsd %xmm3, %xmm2, %xmm2
; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
- %1 = call double @llvm.vector.reduce.fadd.f64.v16f64(double 0.0, <16 x double> %a0)
+ %1 = call double @llvm.vector.reduce.fadd.f64.v16f64(double -0.0, <16 x double> %a0)
ret double %1
}