;
; SSSE3-FAST-LABEL: pair_sum_v4i32_v4i32:
; SSSE3-FAST: # %bb.0:
-; SSSE3-FAST-NEXT: phaddd %xmm3, %xmm2
; SSSE3-FAST-NEXT: phaddd %xmm1, %xmm0
+; SSSE3-FAST-NEXT: phaddd %xmm3, %xmm2
; SSSE3-FAST-NEXT: phaddd %xmm2, %xmm0
; SSSE3-FAST-NEXT: retq
;
; AVX1-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
; AVX1-SLOW-NEXT: retq
;
-; AVX-FAST-LABEL: pair_sum_v4i32_v4i32:
-; AVX-FAST: # %bb.0:
-; AVX-FAST-NEXT: vphaddd %xmm3, %xmm2, %xmm2
-; AVX-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
-; AVX-FAST-NEXT: vphaddd %xmm2, %xmm0, %xmm0
-; AVX-FAST-NEXT: retq
+; AVX1-FAST-LABEL: pair_sum_v4i32_v4i32:
+; AVX1-FAST: # %bb.0:
+; AVX1-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
+; AVX1-FAST-NEXT: vphaddd %xmm3, %xmm2, %xmm1
+; AVX1-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
+; AVX1-FAST-NEXT: retq
;
; AVX2-SLOW-LABEL: pair_sum_v4i32_v4i32:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vpaddd %xmm1, %xmm2, %xmm1
; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
; AVX2-SLOW-NEXT: retq
+;
+; AVX2-FAST-LABEL: pair_sum_v4i32_v4i32:
+; AVX2-FAST: # %bb.0:
+; AVX2-FAST-NEXT: vphaddd %xmm3, %xmm2, %xmm2
+; AVX2-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
+; AVX2-FAST-NEXT: vphaddd %xmm2, %xmm0, %xmm0
+; AVX2-FAST-NEXT: retq
%5 = shufflevector <4 x i32> %0, <4 x i32> poison, <2 x i32> <i32 0, i32 2>
%6 = shufflevector <4 x i32> %0, <4 x i32> poison, <2 x i32> <i32 1, i32 3>
%7 = add <2 x i32> %5, %6
; AVX2-FAST: # %bb.0:
; AVX2-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX2-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
-; AVX2-FAST-NEXT: vphaddd %xmm4, %xmm4, %xmm1
-; AVX2-FAST-NEXT: vphaddd %xmm5, %xmm5, %xmm4
+; AVX2-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm1
+; AVX2-FAST-NEXT: vphaddd %xmm4, %xmm4, %xmm4
+; AVX2-FAST-NEXT: vphaddd %xmm5, %xmm5, %xmm5
; AVX2-FAST-NEXT: vphaddd %xmm3, %xmm2, %xmm2
-; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm3 = xmm2[0,2],xmm1[0,3]
-; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0]
-; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,3]
-; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[3]
-; AVX2-FAST-NEXT: vpaddd %xmm1, %xmm3, %xmm1
-; AVX2-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-FAST-NEXT: vphaddd %xmm7, %xmm6, %xmm1
-; AVX2-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm2
-; AVX2-FAST-NEXT: vphaddd %xmm2, %xmm1, %xmm1
+; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm3 = xmm2[0,2],xmm4[0,3]
+; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm5[0]
+; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,3],xmm4[1,3]
+; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1,2],xmm5[3]
+; AVX2-FAST-NEXT: vpaddd %xmm2, %xmm3, %xmm2
+; AVX2-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
+; AVX2-FAST-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX2-FAST-NEXT: vphaddd %xmm7, %xmm6, %xmm2
+; AVX2-FAST-NEXT: vphaddd %xmm1, %xmm2, %xmm1
; AVX2-FAST-NEXT: vpbroadcastq %xmm1, %ymm1
; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FAST-NEXT: retq
; SSSE3-FAST-NEXT: movaps %xmm3, %xmm1
; SSSE3-FAST-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm3[1]
; SSSE3-FAST-NEXT: addps %xmm3, %xmm1
-; SSSE3-FAST-NEXT: haddps %xmm0, %xmm1
-; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm1[2,0]
+; SSSE3-FAST-NEXT: haddps %xmm1, %xmm0
+; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm0[0,2]
; SSSE3-FAST-NEXT: movaps %xmm4, %xmm0
; SSSE3-FAST-NEXT: retq
;
; AVX-FAST-NEXT: vaddps %xmm1, %xmm2, %xmm1
; AVX-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm3[1,0]
; AVX-FAST-NEXT: vaddps %xmm2, %xmm3, %xmm2
-; AVX-FAST-NEXT: vhaddps %xmm1, %xmm2, %xmm1
-; AVX-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,0]
+; AVX-FAST-NEXT: vhaddps %xmm2, %xmm1, %xmm1
+; AVX-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX-FAST-NEXT: retq
%5 = call reassoc float @llvm.vector.reduce.fadd.f32.v4f32(float -0.0, <4 x float> %0)
%6 = call reassoc float @llvm.vector.reduce.fadd.f32.v4f32(float -0.0, <4 x float> %1)
; AVX-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; AVX-SLOW-NEXT: retq
;
-; AVX1-FAST-LABEL: reduction_sum_v4i32_v4i32:
-; AVX1-FAST: # %bb.0:
-; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
-; AVX1-FAST-NEXT: vpaddd %xmm4, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
-; AVX1-FAST-NEXT: vpaddd %xmm4, %xmm1, %xmm1
-; AVX1-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
-; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
-; AVX1-FAST-NEXT: vpaddd %xmm1, %xmm2, %xmm1
-; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
-; AVX1-FAST-NEXT: vpaddd %xmm2, %xmm3, %xmm2
-; AVX1-FAST-NEXT: vphaddd %xmm2, %xmm1, %xmm1
-; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,0,2]
-; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX1-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
-; AVX1-FAST-NEXT: retq
-;
-; AVX2-FAST-LABEL: reduction_sum_v4i32_v4i32:
-; AVX2-FAST: # %bb.0:
-; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
-; AVX2-FAST-NEXT: vpaddd %xmm4, %xmm0, %xmm0
-; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
-; AVX2-FAST-NEXT: vpaddd %xmm4, %xmm1, %xmm1
-; AVX2-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
-; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
-; AVX2-FAST-NEXT: vpaddd %xmm1, %xmm2, %xmm1
-; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
-; AVX2-FAST-NEXT: vpaddd %xmm2, %xmm3, %xmm2
-; AVX2-FAST-NEXT: vphaddd %xmm2, %xmm1, %xmm1
-; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,0,2]
-; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX2-FAST-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
-; AVX2-FAST-NEXT: retq
+; AVX-FAST-LABEL: reduction_sum_v4i32_v4i32:
+; AVX-FAST: # %bb.0:
+; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
+; AVX-FAST-NEXT: vpaddd %xmm4, %xmm0, %xmm0
+; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
+; AVX-FAST-NEXT: vpaddd %xmm4, %xmm1, %xmm1
+; AVX-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
+; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
+; AVX-FAST-NEXT: vpaddd %xmm1, %xmm2, %xmm1
+; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
+; AVX-FAST-NEXT: vpaddd %xmm2, %xmm3, %xmm2
+; AVX-FAST-NEXT: vphaddd %xmm2, %xmm1, %xmm1
+; AVX-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX-FAST-NEXT: retq
%5 = call i32 @llvm.vector.reduce.add.i32.v4i32(<4 x i32> %0)
%6 = call i32 @llvm.vector.reduce.add.i32.v4i32(<4 x i32> %1)
%7 = call i32 @llvm.vector.reduce.add.i32.v4i32(<4 x i32> %2)